Move V8 to external/v8

Change-Id: If68025d67453785a651c5dfb34fad298c16676a4
diff --git a/src/SConscript b/src/SConscript
new file mode 100755
index 0000000..b6c2b4d
--- /dev/null
+++ b/src/SConscript
@@ -0,0 +1,197 @@
+# Copyright 2008 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+from os.path import join, dirname, abspath
+root_dir = dirname(File('SConstruct').rfile().abspath)
+sys.path.append(join(root_dir, 'tools'))
+import js2c
+Import('context')
+
+
+SOURCES = {
+  'all': [
+    'accessors.cc', 'allocation.cc', 'api.cc', 'assembler.cc', 'ast.cc',
+    'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
+    'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
+    'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
+    'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
+    'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
+    'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc',
+    'heap-profiler.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
+    'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc',
+    'messages.cc', 'objects.cc', 'oprofile-agent.cc', 'parser.cc',
+    'property.cc', 'regexp-macro-assembler.cc',
+    'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
+    'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
+    'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
+    'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
+    'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
+    'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
+    'virtual-frame.cc', 'zone.cc'
+  ],
+  'arch:arm': [
+    'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/codegen-arm.cc',
+    'arm/constants-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
+    'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
+    'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
+    'arm/regexp-macro-assembler-arm.cc', 'arm/register-allocator-arm.cc',
+    'arm/stub-cache-arm.cc', 'arm/virtual-frame-arm.cc'
+  ],
+  'arch:ia32': [
+    'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
+    'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
+    'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
+    'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
+    'ia32/regexp-macro-assembler-ia32.cc',
+    'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
+    'ia32/virtual-frame-ia32.cc'
+  ],
+  'arch:x64': [
+    'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/codegen-x64.cc',
+    'x64/cpu-x64.cc', 'x64/disasm-x64.cc', 'x64/debug-x64.cc',
+    'x64/frames-x64.cc', 'x64/ic-x64.cc', 'x64/jump-target-x64.cc',
+    'x64/macro-assembler-x64.cc', 'x64/regexp-macro-assembler-x64.cc',
+    'x64/register-allocator-x64.cc', 'x64/stub-cache-x64.cc',
+    'x64/virtual-frame-x64.cc'
+  ],
+  'simulator:arm': ['arm/simulator-arm.cc'],
+  'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
+  'os:linux':   ['platform-linux.cc', 'platform-posix.cc'],
+  'os:android': ['platform-linux.cc', 'platform-posix.cc'],
+  'os:macos':   ['platform-macos.cc', 'platform-posix.cc'],
+  'os:nullos':  ['platform-nullos.cc'],
+  'os:win32':   ['platform-win32.cc'],
+  'mode:release': [],
+  'mode:debug': [
+    'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
+  ]
+}
+
+
+D8_FILES = {
+  'all': [
+    'd8.cc', 'd8-debug.cc'
+  ],
+  'os:linux': [
+    'd8-posix.cc'
+  ],
+  'os:macos': [
+    'd8-posix.cc'
+  ],
+  'os:android': [
+    'd8-posix.cc'
+  ],
+  'os:freebsd': [
+    'd8-posix.cc'
+  ],
+  'os:win32': [
+    'd8-windows.cc'
+  ],
+  'os:nullos': [
+    'd8-windows.cc'   # Empty implementation at the moment.
+  ],
+  'console:readline': [
+    'd8-readline.cc'
+  ]
+}
+
+
+LIBRARY_FILES = '''
+runtime.js
+v8natives.js
+array.js
+string.js
+uri.js
+math.js
+messages.js
+apinatives.js
+debug-delay.js
+mirror-delay.js
+date-delay.js
+regexp-delay.js
+json-delay.js
+'''.split()
+
+
+def Abort(message):
+  print message
+  sys.exit(1)
+
+
+def ConfigureObjectFiles():
+  env = Environment()
+  env.Replace(**context.flags['v8'])
+  context.ApplyEnvOverrides(env)
+  env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
+  env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE"')
+
+  # Build the standard platform-independent source files.
+  source_files = context.GetRelevantSources(SOURCES)
+
+  d8_files = context.GetRelevantSources(D8_FILES)
+  d8_js = env.JS2C('d8-js.cc', 'd8.js', TYPE='D8')
+  d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
+  d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj]
+
+  # Combine the JavaScript library files into a single C++ file and
+  # compile it.
+  library_files = [s for s in LIBRARY_FILES]
+  library_files.append('macros.py')
+  libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE')
+  libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
+
+  # Build dtoa.
+  dtoa_env = env.Copy()
+  dtoa_env.Replace(**context.flags['dtoa'])
+  dtoa_files = ['dtoa-config.c']
+  dtoa_obj = context.ConfigureObject(dtoa_env, dtoa_files)
+
+  source_objs = context.ConfigureObject(env, source_files)
+  non_snapshot_files = [dtoa_obj, source_objs]
+
+  # Create snapshot if necessary.
+  empty_snapshot_obj = context.ConfigureObject(env, 'snapshot-empty.cc')
+  mksnapshot_env = env.Copy()
+  mksnapshot_env.Replace(**context.flags['mksnapshot'])
+  mksnapshot_src = 'mksnapshot.cc'
+  mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
+  if context.use_snapshot:
+    if context.build_snapshot:
+      snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
+    else:
+      snapshot_cc = Command('snapshot.cc', [], [])
+    snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
+    libraries_obj = context.ConfigureObject(env, libraries_empty_src, CPPPATH=['.'])
+  else:
+    snapshot_obj = empty_snapshot_obj
+  library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
+  return (library_objs, d8_objs, [mksnapshot])
+
+
+(library_objs, d8_objs, mksnapshot) = ConfigureObjectFiles()
+Return('library_objs d8_objs mksnapshot')
diff --git a/src/accessors.cc b/src/accessors.cc
new file mode 100644
index 0000000..82ae702
--- /dev/null
+++ b/src/accessors.cc
@@ -0,0 +1,654 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "execution.h"
+#include "factory.h"
+#include "scopeinfo.h"
+#include "top.h"
+#include "zone-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+template <class C>
+static C* FindInPrototypeChain(Object* obj, bool* found_it) {
+  ASSERT(!*found_it);
+  while (!Is<C>(obj)) {
+    if (obj == Heap::null_value()) return NULL;
+    obj = obj->GetPrototype();
+  }
+  *found_it = true;
+  return C::cast(obj);
+}
+
+
+// Entry point that never should be called.
+Object* Accessors::IllegalSetter(JSObject*, Object*, void*) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Object* Accessors::IllegalGetAccessor(Object* object, void*) {
+  UNREACHABLE();
+  return object;
+}
+
+
+Object* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
+  // According to ECMA-262, section 8.6.2.2, page 28, setting
+  // read-only properties must be silently ignored.
+  return value;
+}
+
+
+//
+// Accessors::ArrayLength
+//
+
+
+Object* Accessors::ArrayGetLength(Object* object, void*) {
+  // Traverse the prototype chain until we reach an array.
+  bool found_it = false;
+  JSArray* holder = FindInPrototypeChain<JSArray>(object, &found_it);
+  if (!found_it) return Smi::FromInt(0);
+  return holder->length();
+}
+
+
+// The helper function will 'flatten' Number objects.
+Object* Accessors::FlattenNumber(Object* value) {
+  if (value->IsNumber() || !value->IsJSValue()) return value;
+  JSValue* wrapper = JSValue::cast(value);
+  ASSERT(
+      Top::context()->global_context()->number_function()->has_initial_map());
+  Map* number_map =
+      Top::context()->global_context()->number_function()->initial_map();
+  if (wrapper->map() == number_map) return wrapper->value();
+  return value;
+}
+
+
+Object* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
+  value = FlattenNumber(value);
+
+  // Need to call methods that may trigger GC.
+  HandleScope scope;
+
+  // Protect raw pointers.
+  Handle<JSObject> object_handle(object);
+  Handle<Object> value_handle(value);
+
+  bool has_exception;
+  Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
+  if (has_exception) return Failure::Exception();
+  Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
+  if (has_exception) return Failure::Exception();
+
+  // Restore raw pointers,
+  object = *object_handle;
+  value = *value_handle;
+
+  if (uint32_v->Number() == number_v->Number()) {
+    if (object->IsJSArray()) {
+      return JSArray::cast(object)->SetElementsLength(*uint32_v);
+    } else {
+      // This means one of the object's prototypes is a JSArray and
+      // the object does not have a 'length' property.
+      // Calling SetProperty causes an infinite loop.
+      return object->IgnoreAttributesAndSetLocalProperty(Heap::length_symbol(),
+                                                         value, NONE);
+    }
+  }
+  return Top::Throw(*Factory::NewRangeError("invalid_array_length",
+                                            HandleVector<Object>(NULL, 0)));
+}
+
+
+const AccessorDescriptor Accessors::ArrayLength = {
+  ArrayGetLength,
+  ArraySetLength,
+  0
+};
+
+
+//
+// Accessors::StringLength
+//
+
+
+Object* Accessors::StringGetLength(Object* object, void*) {
+  Object* value = object;
+  if (object->IsJSValue()) value = JSValue::cast(object)->value();
+  if (value->IsString()) return Smi::FromInt(String::cast(value)->length());
+  // If object is not a string we return 0 to be compatible with WebKit.
+  // Note: Firefox returns the length of ToString(object).
+  return Smi::FromInt(0);
+}
+
+
+const AccessorDescriptor Accessors::StringLength = {
+  StringGetLength,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptSource
+//
+
+
+Object* Accessors::ScriptGetSource(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->source();
+}
+
+
+const AccessorDescriptor Accessors::ScriptSource = {
+  ScriptGetSource,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptName
+//
+
+
+Object* Accessors::ScriptGetName(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->name();
+}
+
+
+const AccessorDescriptor Accessors::ScriptName = {
+  ScriptGetName,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptId
+//
+
+
+Object* Accessors::ScriptGetId(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->id();
+}
+
+
+const AccessorDescriptor Accessors::ScriptId = {
+  ScriptGetId,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptLineOffset
+//
+
+
+Object* Accessors::ScriptGetLineOffset(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->line_offset();
+}
+
+
+const AccessorDescriptor Accessors::ScriptLineOffset = {
+  ScriptGetLineOffset,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptColumnOffset
+//
+
+
+Object* Accessors::ScriptGetColumnOffset(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->column_offset();
+}
+
+
+const AccessorDescriptor Accessors::ScriptColumnOffset = {
+  ScriptGetColumnOffset,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptData
+//
+
+
+Object* Accessors::ScriptGetData(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->data();
+}
+
+
+const AccessorDescriptor Accessors::ScriptData = {
+  ScriptGetData,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptType
+//
+
+
+Object* Accessors::ScriptGetType(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->type();
+}
+
+
+const AccessorDescriptor Accessors::ScriptType = {
+  ScriptGetType,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptCompilationType
+//
+
+
+Object* Accessors::ScriptGetCompilationType(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->compilation_type();
+}
+
+
+const AccessorDescriptor Accessors::ScriptCompilationType = {
+  ScriptGetCompilationType,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptGetLineEnds
+//
+
+
+Object* Accessors::ScriptGetLineEnds(Object* object, void*) {
+  HandleScope scope;
+  Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
+  InitScriptLineEnds(script);
+  return script->line_ends();
+}
+
+
+const AccessorDescriptor Accessors::ScriptLineEnds = {
+  ScriptGetLineEnds,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptGetContextData
+//
+
+
+Object* Accessors::ScriptGetContextData(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->context_data();
+}
+
+
+const AccessorDescriptor Accessors::ScriptContextData = {
+  ScriptGetContextData,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptGetEvalFromFunction
+//
+
+
+Object* Accessors::ScriptGetEvalFromFunction(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->eval_from_function();
+}
+
+
+const AccessorDescriptor Accessors::ScriptEvalFromFunction = {
+  ScriptGetEvalFromFunction,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptGetEvalFromPosition
+//
+
+
+Object* Accessors::ScriptGetEvalFromPosition(Object* object, void*) {
+  HandleScope scope;
+  Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
+
+  // If this is not a script compiled through eval there is no eval position.
+  int compilation_type = Smi::cast(script->compilation_type())->value();
+  if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
+    return Heap::undefined_value();
+  }
+
+  // Get the function from where eval was called and find the source position
+  // from the instruction offset.
+  Handle<Code> code(JSFunction::cast(script->eval_from_function())->code());
+  return Smi::FromInt(code->SourcePosition(code->instruction_start() +
+                      script->eval_from_instructions_offset()->value()));
+}
+
+
+const AccessorDescriptor Accessors::ScriptEvalFromPosition = {
+  ScriptGetEvalFromPosition,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::FunctionPrototype
+//
+
+
+Object* Accessors::FunctionGetPrototype(Object* object, void*) {
+  bool found_it = false;
+  JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
+  if (!found_it) return Heap::undefined_value();
+  if (!function->has_prototype()) {
+    Object* prototype = Heap::AllocateFunctionPrototype(function);
+    if (prototype->IsFailure()) return prototype;
+    Object* result = function->SetPrototype(prototype);
+    if (result->IsFailure()) return result;
+  }
+  return function->prototype();
+}
+
+
+Object* Accessors::FunctionSetPrototype(JSObject* object,
+                                        Object* value,
+                                        void*) {
+  bool found_it = false;
+  JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
+  if (!found_it) return Heap::undefined_value();
+  if (function->has_initial_map()) {
+    // If the function has allocated the initial map
+    // replace it with a copy containing the new prototype.
+    Object* new_map = function->initial_map()->CopyDropTransitions();
+    if (new_map->IsFailure()) return new_map;
+    function->set_initial_map(Map::cast(new_map));
+  }
+  Object* prototype = function->SetPrototype(value);
+  if (prototype->IsFailure()) return prototype;
+  ASSERT(function->prototype() == value);
+  return function;
+}
+
+
+const AccessorDescriptor Accessors::FunctionPrototype = {
+  FunctionGetPrototype,
+  FunctionSetPrototype,
+  0
+};
+
+
+//
+// Accessors::FunctionLength
+//
+
+
+Object* Accessors::FunctionGetLength(Object* object, void*) {
+  bool found_it = false;
+  JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
+  if (!found_it) return Smi::FromInt(0);
+  // Check if already compiled.
+  if (!function->is_compiled()) {
+    // If the function isn't compiled yet, the length is not computed
+    // correctly yet. Compile it now and return the right length.
+    HandleScope scope;
+    Handle<JSFunction> function_handle(function);
+    if (!CompileLazy(function_handle, KEEP_EXCEPTION)) {
+      return Failure::Exception();
+    }
+    return Smi::FromInt(function_handle->shared()->length());
+  } else {
+    return Smi::FromInt(function->shared()->length());
+  }
+}
+
+
+const AccessorDescriptor Accessors::FunctionLength = {
+  FunctionGetLength,
+  ReadOnlySetAccessor,
+  0
+};
+
+
+//
+// Accessors::FunctionName
+//
+
+
+Object* Accessors::FunctionGetName(Object* object, void*) {
+  bool found_it = false;
+  JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
+  if (!found_it) return Heap::undefined_value();
+  return holder->shared()->name();
+}
+
+
+const AccessorDescriptor Accessors::FunctionName = {
+  FunctionGetName,
+  ReadOnlySetAccessor,
+  0
+};
+
+
+//
+// Accessors::FunctionArguments
+//
+
+
+Object* Accessors::FunctionGetArguments(Object* object, void*) {
+  HandleScope scope;
+  bool found_it = false;
+  JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
+  if (!found_it) return Heap::undefined_value();
+  Handle<JSFunction> function(holder);
+
+  // Find the top invocation of the function by traversing frames.
+  for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+    // Skip all frames that aren't invocations of the given function.
+    JavaScriptFrame* frame = it.frame();
+    if (frame->function() != *function) continue;
+
+    // If there is an arguments variable in the stack, we return that.
+    int index = ScopeInfo<>::StackSlotIndex(frame->code(),
+                                            Heap::arguments_symbol());
+    if (index >= 0) {
+      Handle<Object> arguments = Handle<Object>(frame->GetExpression(index));
+      if (!arguments->IsTheHole()) return *arguments;
+    }
+
+    // If there isn't an arguments variable in the stack, we need to
+    // find the frame that holds the actual arguments passed to the
+    // function on the stack.
+    it.AdvanceToArgumentsFrame();
+    frame = it.frame();
+
+    // Get the number of arguments and construct an arguments object
+    // mirror for the right frame.
+    const int length = frame->GetProvidedParametersCount();
+    Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
+    Handle<FixedArray> array = Factory::NewFixedArray(length);
+
+    // Copy the parameters to the arguments object.
+    ASSERT(array->length() == length);
+    for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
+    arguments->set_elements(*array);
+
+    // Return the freshly allocated arguments object.
+    return *arguments;
+  }
+
+  // No frame corresponding to the given function found. Return null.
+  return Heap::null_value();
+}
+
+
+const AccessorDescriptor Accessors::FunctionArguments = {
+  FunctionGetArguments,
+  ReadOnlySetAccessor,
+  0
+};
+
+
+//
+// Accessors::FunctionCaller
+//
+
+
+Object* Accessors::FunctionGetCaller(Object* object, void*) {
+  HandleScope scope;
+  bool found_it = false;
+  JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
+  if (!found_it) return Heap::undefined_value();
+  Handle<JSFunction> function(holder);
+
+  // Find the top invocation of the function by traversing frames.
+  for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+    // Skip all frames that aren't invocations of the given function.
+    if (it.frame()->function() != *function) continue;
+    // Once we have found the frame, we need to go to the caller
+    // frame. This may require skipping through a number of top-level
+    // frames, e.g. frames for scripts not functions.
+    while (true) {
+      it.Advance();
+      if (it.done()) return Heap::null_value();
+      JSFunction* caller = JSFunction::cast(it.frame()->function());
+      if (!caller->shared()->is_toplevel()) return caller;
+    }
+  }
+
+  // No frame corresponding to the given function found. Return null.
+  return Heap::null_value();
+}
+
+
+const AccessorDescriptor Accessors::FunctionCaller = {
+  FunctionGetCaller,
+  ReadOnlySetAccessor,
+  0
+};
+
+
+//
+// Accessors::ObjectPrototype
+//
+
+
+Object* Accessors::ObjectGetPrototype(Object* receiver, void*) {
+  Object* current = receiver->GetPrototype();
+  while (current->IsJSObject() &&
+         JSObject::cast(current)->map()->is_hidden_prototype()) {
+    current = current->GetPrototype();
+  }
+  return current;
+}
+
+
+Object* Accessors::ObjectSetPrototype(JSObject* receiver,
+                                      Object* value,
+                                      void*) {
+  // Before we can set the prototype we need to be sure
+  // prototype cycles are prevented.
+  // It is sufficient to validate that the receiver is not in the new prototype
+  // chain.
+
+  // Silently ignore the change if value is not a JSObject or null.
+  // SpiderMonkey behaves this way.
+  if (!value->IsJSObject() && !value->IsNull()) return value;
+
+  for (Object* pt = value; pt != Heap::null_value(); pt = pt->GetPrototype()) {
+    if (JSObject::cast(pt) == receiver) {
+      // Cycle detected.
+      HandleScope scope;
+      return Top::Throw(*Factory::NewError("cyclic_proto",
+                                           HandleVector<Object>(NULL, 0)));
+    }
+  }
+
+  // Find the first object in the chain whose prototype object is not
+  // hidden and set the new prototype on that object.
+  JSObject* current = receiver;
+  Object* current_proto = receiver->GetPrototype();
+  while (current_proto->IsJSObject() &&
+         JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
+    current = JSObject::cast(current_proto);
+    current_proto = current_proto->GetPrototype();
+  }
+
+  // Set the new prototype of the object.
+  Object* new_map = current->map()->CopyDropTransitions();
+  if (new_map->IsFailure()) return new_map;
+  Map::cast(new_map)->set_prototype(value);
+  current->set_map(Map::cast(new_map));
+
+  // To be consistent with other Set functions, return the value.
+  return value;
+}
+
+
+const AccessorDescriptor Accessors::ObjectPrototype = {
+  ObjectGetPrototype,
+  ObjectSetPrototype,
+  0
+};
+
+} }  // namespace v8::internal
diff --git a/src/accessors.h b/src/accessors.h
new file mode 100644
index 0000000..51d322e
--- /dev/null
+++ b/src/accessors.h
@@ -0,0 +1,112 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ACCESSORS_H_
+#define V8_ACCESSORS_H_
+
+namespace v8 {
+namespace internal {
+
+// The list of accessor descriptors. This is a second-order macro
+// taking a macro to be applied to all accessor descriptor names.
+#define ACCESSOR_DESCRIPTOR_LIST(V) \
+  V(FunctionPrototype)              \
+  V(FunctionLength)                 \
+  V(FunctionName)                   \
+  V(FunctionArguments)              \
+  V(FunctionCaller)                 \
+  V(ArrayLength)                    \
+  V(StringLength)                   \
+  V(ScriptSource)                   \
+  V(ScriptName)                     \
+  V(ScriptId)                       \
+  V(ScriptLineOffset)               \
+  V(ScriptColumnOffset)             \
+  V(ScriptData)                     \
+  V(ScriptType)                     \
+  V(ScriptCompilationType)          \
+  V(ScriptLineEnds)                 \
+  V(ScriptContextData)              \
+  V(ScriptEvalFromFunction)         \
+  V(ScriptEvalFromPosition)         \
+  V(ObjectPrototype)
+
+// Accessors contains all predefined proxy accessors.
+
+class Accessors : public AllStatic {
+ public:
+  // Accessor descriptors.
+#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
+  static const AccessorDescriptor name;
+  ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
+#undef ACCESSOR_DESCRIPTOR_DECLARATION
+
+  enum DescriptorId {
+#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
+    k##name,
+  ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
+#undef ACCESSOR_DESCRIPTOR_DECLARATION
+    descriptorCount
+  };
+
+  // Accessor functions called directly from the runtime system.
+  static Object* FunctionGetPrototype(Object* object, void*);
+  static Object* FunctionSetPrototype(JSObject* object, Object* value, void*);
+ private:
+  // Accessor functions only used through the descriptor.
+  static Object* FunctionGetLength(Object* object, void*);
+  static Object* FunctionGetName(Object* object, void*);
+  static Object* FunctionGetArguments(Object* object, void*);
+  static Object* FunctionGetCaller(Object* object, void*);
+  static Object* ArraySetLength(JSObject* object, Object* value, void*);
+  static Object* ArrayGetLength(Object* object, void*);
+  static Object* StringGetLength(Object* object, void*);
+  static Object* ScriptGetName(Object* object, void*);
+  static Object* ScriptGetId(Object* object, void*);
+  static Object* ScriptGetSource(Object* object, void*);
+  static Object* ScriptGetLineOffset(Object* object, void*);
+  static Object* ScriptGetColumnOffset(Object* object, void*);
+  static Object* ScriptGetData(Object* object, void*);
+  static Object* ScriptGetType(Object* object, void*);
+  static Object* ScriptGetCompilationType(Object* object, void*);
+  static Object* ScriptGetLineEnds(Object* object, void*);
+  static Object* ScriptGetContextData(Object* object, void*);
+  static Object* ScriptGetEvalFromFunction(Object* object, void*);
+  static Object* ScriptGetEvalFromPosition(Object* object, void*);
+  static Object* ObjectGetPrototype(Object* receiver, void*);
+  static Object* ObjectSetPrototype(JSObject* receiver, Object* value, void*);
+
+  // Helper functions.
+  static Object* FlattenNumber(Object* value);
+  static Object* IllegalSetter(JSObject*, Object*, void*);
+  static Object* IllegalGetAccessor(Object* object, void*);
+  static Object* ReadOnlySetAccessor(JSObject*, Object* value, void*);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_ACCESSORS_H_
diff --git a/src/allocation.cc b/src/allocation.cc
new file mode 100644
index 0000000..41724b6
--- /dev/null
+++ b/src/allocation.cc
@@ -0,0 +1,198 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+void* Malloced::New(size_t size) {
+  ASSERT(NativeAllocationChecker::allocation_allowed());
+  void* result = malloc(size);
+  if (result == NULL) V8::FatalProcessOutOfMemory("Malloced operator new");
+  return result;
+}
+
+
+void Malloced::Delete(void* p) {
+  free(p);
+}
+
+
+void Malloced::FatalProcessOutOfMemory() {
+  V8::FatalProcessOutOfMemory("Out of memory");
+}
+
+
+#ifdef DEBUG
+
+static void* invalid = static_cast<void*>(NULL);
+
+void* Embedded::operator new(size_t size) {
+  UNREACHABLE();
+  return invalid;
+}
+
+
+void Embedded::operator delete(void* p) {
+  UNREACHABLE();
+}
+
+
+void* AllStatic::operator new(size_t size) {
+  UNREACHABLE();
+  return invalid;
+}
+
+
+void AllStatic::operator delete(void* p) {
+  UNREACHABLE();
+}
+
+#endif
+
+
+char* StrDup(const char* str) {
+  int length = strlen(str);
+  char* result = NewArray<char>(length + 1);
+  memcpy(result, str, length * kCharSize);
+  result[length] = '\0';
+  return result;
+}
+
+
+char* StrNDup(const char* str, size_t n) {
+  size_t length = strlen(str);
+  if (n < length) length = n;
+  char* result = NewArray<char>(length + 1);
+  memcpy(result, str, length * kCharSize);
+  result[length] = '\0';
+  return result;
+}
+
+
+int NativeAllocationChecker::allocation_disallowed_ = 0;
+
+
+PreallocatedStorage PreallocatedStorage::in_use_list_(0);
+PreallocatedStorage PreallocatedStorage::free_list_(0);
+bool PreallocatedStorage::preallocated_ = false;
+
+
+void PreallocatedStorage::Init(size_t size) {
+  ASSERT(free_list_.next_ == &free_list_);
+  ASSERT(free_list_.previous_ == &free_list_);
+  PreallocatedStorage* free_chunk =
+      reinterpret_cast<PreallocatedStorage*>(new char[size]);
+  free_list_.next_ = free_list_.previous_ = free_chunk;
+  free_chunk->next_ = free_chunk->previous_ = &free_list_;
+  free_chunk->size_ = size - sizeof(PreallocatedStorage);
+  preallocated_ = true;
+}
+
+
+void* PreallocatedStorage::New(size_t size) {
+  if (!preallocated_) {
+    return FreeStoreAllocationPolicy::New(size);
+  }
+  ASSERT(free_list_.next_ != &free_list_);
+  ASSERT(free_list_.previous_ != &free_list_);
+  size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
+  // Search for exact fit.
+  for (PreallocatedStorage* storage = free_list_.next_;
+       storage != &free_list_;
+       storage = storage->next_) {
+    if (storage->size_ == size) {
+      storage->Unlink();
+      storage->LinkTo(&in_use_list_);
+      return reinterpret_cast<void*>(storage + 1);
+    }
+  }
+  // Search for first fit.
+  for (PreallocatedStorage* storage = free_list_.next_;
+       storage != &free_list_;
+       storage = storage->next_) {
+    if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
+      storage->Unlink();
+      storage->LinkTo(&in_use_list_);
+      PreallocatedStorage* left_over =
+          reinterpret_cast<PreallocatedStorage*>(
+              reinterpret_cast<char*>(storage + 1) + size);
+      left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
+      ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
+             storage->size_);
+      storage->size_ = size;
+      left_over->LinkTo(&free_list_);
+      return reinterpret_cast<void*>(storage + 1);
+    }
+  }
+  // Allocation failure.
+  ASSERT(false);
+  return NULL;
+}
+
+
+// We don't attempt to coalesce.
+void PreallocatedStorage::Delete(void* p) {
+  if (p == NULL) {
+    return;
+  }
+  if (!preallocated_) {
+    FreeStoreAllocationPolicy::Delete(p);
+    return;
+  }
+  PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
+  ASSERT(storage->next_->previous_ == storage);
+  ASSERT(storage->previous_->next_ == storage);
+  storage->Unlink();
+  storage->LinkTo(&free_list_);
+}
+
+
+void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
+  next_ = other->next_;
+  other->next_->previous_ = this;
+  previous_ = other;
+  other->next_ = this;
+}
+
+
+void PreallocatedStorage::Unlink() {
+  next_->previous_ = previous_;
+  previous_->next_ = next_;
+}
+
+
+PreallocatedStorage::PreallocatedStorage(size_t size)
+  : size_(size) {
+  previous_ = next_ = this;
+}
+
+} }  // namespace v8::internal
diff --git a/src/allocation.h b/src/allocation.h
new file mode 100644
index 0000000..586c4fd
--- /dev/null
+++ b/src/allocation.h
@@ -0,0 +1,169 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ALLOCATION_H_
+#define V8_ALLOCATION_H_
+
+namespace v8 {
+namespace internal {
+
+
+// A class that controls whether allocation is allowed.  This is for
+// the C++ heap only!
+class NativeAllocationChecker {
+ public:
+  typedef enum { ALLOW, DISALLOW } NativeAllocationAllowed;
+  explicit inline NativeAllocationChecker(NativeAllocationAllowed allowed)
+      : allowed_(allowed) {
+#ifdef DEBUG
+    if (allowed == DISALLOW) {
+      allocation_disallowed_++;
+    }
+#endif
+  }
+  ~NativeAllocationChecker() {
+#ifdef DEBUG
+    if (allowed_ == DISALLOW) {
+      allocation_disallowed_--;
+    }
+#endif
+    ASSERT(allocation_disallowed_ >= 0);
+  }
+  static inline bool allocation_allowed() {
+    return allocation_disallowed_ == 0;
+  }
+ private:
+  // This static counter ensures that NativeAllocationCheckers can be nested.
+  static int allocation_disallowed_;
+  // This flag applies to this particular instance.
+  NativeAllocationAllowed allowed_;
+};
+
+
+// Superclass for classes managed with new & delete.
+class Malloced {
+ public:
+  void* operator new(size_t size) { return New(size); }
+  void  operator delete(void* p) { Delete(p); }
+
+  static void FatalProcessOutOfMemory();
+  static void* New(size_t size);
+  static void Delete(void* p);
+};
+
+
+// A macro is used for defining the base class used for embedded instances.
+// The reason is some compilers allocate a minimum of one word for the
+// superclass. The macro prevents the use of new & delete in debug mode.
+// In release mode we are not willing to pay this overhead.
+
+#ifdef DEBUG
+// Superclass for classes with instances allocated inside stack
+// activations or inside other objects.
+class Embedded {
+ public:
+  void* operator new(size_t size);
+  void  operator delete(void* p);
+};
+#define BASE_EMBEDDED : public Embedded
+#else
+#define BASE_EMBEDDED
+#endif
+
+
+// Superclass for classes only using statics.
+class AllStatic {
+#ifdef DEBUG
+ public:
+  void* operator new(size_t size);
+  void operator delete(void* p);
+#endif
+};
+
+
+template <typename T>
+static T* NewArray(int size) {
+  ASSERT(NativeAllocationChecker::allocation_allowed());
+  T* result = new T[size];
+  if (result == NULL) Malloced::FatalProcessOutOfMemory();
+  return result;
+}
+
+
+template <typename T>
+static void DeleteArray(T* array) {
+  delete[] array;
+}
+
+
+// The normal strdup functions use malloc.  These versions of StrDup
+// and StrNDup uses new and calls the FatalProcessOutOfMemory handler
+// if allocation fails.
+char* StrDup(const char* str);
+char* StrNDup(const char* str, size_t n);
+
+
+// Allocation policy for allocating in the C free store using malloc
+// and free. Used as the default policy for lists.
+class FreeStoreAllocationPolicy {
+ public:
+  INLINE(static void* New(size_t size)) { return Malloced::New(size); }
+  INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
+};
+
+
+// Allocation policy for allocating in preallocated space.
+// Used as an allocation policy for ScopeInfo when generating
+// stack traces.
+class PreallocatedStorage : public AllStatic {
+ public:
+  explicit PreallocatedStorage(size_t size);
+  size_t size() { return size_; }
+  static void* New(size_t size);
+  static void Delete(void* p);
+
+  // Preallocate a set number of bytes.
+  static void Init(size_t size);
+
+ private:
+  size_t size_;
+  PreallocatedStorage* previous_;
+  PreallocatedStorage* next_;
+  static bool preallocated_;
+
+  static PreallocatedStorage in_use_list_;
+  static PreallocatedStorage free_list_;
+
+  void LinkTo(PreallocatedStorage* other);
+  void Unlink();
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_ALLOCATION_H_
diff --git a/src/api.cc b/src/api.cc
new file mode 100644
index 0000000..fd3d921
--- /dev/null
+++ b/src/api.cc
@@ -0,0 +1,3775 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "arguments.h"
+#include "bootstrapper.h"
+#include "compiler.h"
+#include "debug.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "platform.h"
+#include "serialize.h"
+#include "snapshot.h"
+#include "v8threads.h"
+#include "version.h"
+
+
+#define LOG_API(expr) LOG(ApiEntryCall(expr))
+
+#ifdef ENABLE_HEAP_PROTECTION
+#define ENTER_V8 i::VMState __state__(i::OTHER)
+#define LEAVE_V8 i::VMState __state__(i::EXTERNAL)
+#else
+#define ENTER_V8 ((void) 0)
+#define LEAVE_V8 ((void) 0)
+#endif
+
+namespace v8 {
+
+
+#define ON_BAILOUT(location, code)              \
+  if (IsDeadCheck(location)) {                  \
+    code;                                       \
+    UNREACHABLE();                              \
+  }
+
+
+#define EXCEPTION_PREAMBLE()                                      \
+  thread_local.IncrementCallDepth();                              \
+  ASSERT(!i::Top::external_caught_exception());                   \
+  bool has_pending_exception = false
+
+
+#define EXCEPTION_BAILOUT_CHECK(value)                                         \
+  do {                                                                         \
+    thread_local.DecrementCallDepth();                                         \
+    if (has_pending_exception) {                                               \
+      if (thread_local.CallDepthIsZero() && i::Top::is_out_of_memory()) {      \
+        if (!thread_local.ignore_out_of_memory())                              \
+          i::V8::FatalProcessOutOfMemory(NULL);                                \
+      }                                                                        \
+      bool call_depth_is_zero = thread_local.CallDepthIsZero();                \
+      i::Top::OptionalRescheduleException(call_depth_is_zero);                 \
+      return value;                                                            \
+    }                                                                          \
+  } while (false)
+
+
+#define API_ENTRY_CHECK(msg)                                                   \
+  do {                                                                         \
+    if (v8::Locker::IsActive()) {                                              \
+      ApiCheck(i::ThreadManager::IsLockedByCurrentThread(),                    \
+               msg,                                                            \
+               "Entering the V8 API without proper locking in place");         \
+    }                                                                          \
+  } while (false)
+
+// --- D a t a   t h a t   i s   s p e c i f i c   t o   a   t h r e a d ---
+
+
+static i::HandleScopeImplementer thread_local;
+
+
+// --- E x c e p t i o n   B e h a v i o r ---
+
+
+static FatalErrorCallback exception_behavior = NULL;
+int i::Internals::kJSObjectType = JS_OBJECT_TYPE;
+int i::Internals::kFirstNonstringType = FIRST_NONSTRING_TYPE;
+int i::Internals::kProxyType = PROXY_TYPE;
+
+static void DefaultFatalErrorHandler(const char* location,
+                                     const char* message) {
+  ENTER_V8;
+  API_Fatal(location, message);
+}
+
+
+
+static FatalErrorCallback& GetFatalErrorHandler() {
+  if (exception_behavior == NULL) {
+    exception_behavior = DefaultFatalErrorHandler;
+  }
+  return exception_behavior;
+}
+
+
+
+// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
+// The default fatal error handler is called and execution is stopped.
+void i::V8::FatalProcessOutOfMemory(const char* location) {
+  i::V8::SetFatalError();
+  FatalErrorCallback callback = GetFatalErrorHandler();
+  {
+    LEAVE_V8;
+    callback(location, "Allocation failed - process out of memory");
+  }
+  // If the callback returns, we stop execution.
+  UNREACHABLE();
+}
+
+
+void V8::SetFatalErrorHandler(FatalErrorCallback that) {
+  exception_behavior = that;
+}
+
+
+bool Utils::ReportApiFailure(const char* location, const char* message) {
+  FatalErrorCallback callback = GetFatalErrorHandler();
+  callback(location, message);
+  i::V8::SetFatalError();
+  return false;
+}
+
+
+bool V8::IsDead() {
+  return i::V8::IsDead();
+}
+
+
+static inline bool ApiCheck(bool condition,
+                            const char* location,
+                            const char* message) {
+  return condition ? true : Utils::ReportApiFailure(location, message);
+}
+
+
+static bool ReportV8Dead(const char* location) {
+  FatalErrorCallback callback = GetFatalErrorHandler();
+  callback(location, "V8 is no longer usable");
+  return true;
+}
+
+
+static bool ReportEmptyHandle(const char* location) {
+  FatalErrorCallback callback = GetFatalErrorHandler();
+  callback(location, "Reading from empty handle");
+  return true;
+}
+
+
+/**
+ * IsDeadCheck checks that the vm is usable.  If, for instance, the vm has been
+ * out of memory at some point this check will fail.  It should be called on
+ * entry to all methods that touch anything in the heap, except destructors
+ * which you sometimes can't avoid calling after the vm has crashed.  Functions
+ * that call EnsureInitialized or ON_BAILOUT don't have to also call
+ * IsDeadCheck.  ON_BAILOUT has the advantage over EnsureInitialized that you
+ * can arrange to return if the VM is dead.  This is needed to ensure that no VM
+ * heap allocations are attempted on a dead VM.  EnsureInitialized has the
+ * advantage over ON_BAILOUT that it actually initializes the VM if this has not
+ * yet been done.
+ */
+static inline bool IsDeadCheck(const char* location) {
+  return !i::V8::IsRunning()
+      && i::V8::IsDead() ? ReportV8Dead(location) : false;
+}
+
+
+static inline bool EmptyCheck(const char* location, v8::Handle<v8::Data> obj) {
+  return obj.IsEmpty() ? ReportEmptyHandle(location) : false;
+}
+
+
+static inline bool EmptyCheck(const char* location, const v8::Data* obj) {
+  return (obj == 0) ? ReportEmptyHandle(location) : false;
+}
+
+// --- S t a t i c s ---
+
+
+static i::StringInputBuffer write_input_buffer;
+
+
+static inline bool EnsureInitialized(const char* location) {
+  if (i::V8::IsRunning()) {
+    return true;
+  }
+  if (IsDeadCheck(location)) {
+    return false;
+  }
+  return ApiCheck(v8::V8::Initialize(), location, "Error initializing V8");
+}
+
+
+ImplementationUtilities::HandleScopeData*
+    ImplementationUtilities::CurrentHandleScope() {
+  return &i::HandleScope::current_;
+}
+
+
+#ifdef DEBUG
+void ImplementationUtilities::ZapHandleRange(i::Object** begin,
+                                             i::Object** end) {
+  i::HandleScope::ZapRange(begin, end);
+}
+#endif
+
+
+v8::Handle<v8::Primitive> ImplementationUtilities::Undefined() {
+  if (!EnsureInitialized("v8::Undefined()")) return v8::Handle<v8::Primitive>();
+  return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::undefined_value()));
+}
+
+
+v8::Handle<v8::Primitive> ImplementationUtilities::Null() {
+  if (!EnsureInitialized("v8::Null()")) return v8::Handle<v8::Primitive>();
+  return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::null_value()));
+}
+
+
+v8::Handle<v8::Boolean> ImplementationUtilities::True() {
+  if (!EnsureInitialized("v8::True()")) return v8::Handle<v8::Boolean>();
+  return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::true_value()));
+}
+
+
+v8::Handle<v8::Boolean> ImplementationUtilities::False() {
+  if (!EnsureInitialized("v8::False()")) return v8::Handle<v8::Boolean>();
+  return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::false_value()));
+}
+
+
+void V8::SetFlagsFromString(const char* str, int length) {
+  i::FlagList::SetFlagsFromString(str, length);
+}
+
+
+void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
+  i::FlagList::SetFlagsFromCommandLine(argc, argv, remove_flags);
+}
+
+
+v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
+  if (IsDeadCheck("v8::ThrowException()")) return v8::Handle<Value>();
+  ENTER_V8;
+  // If we're passed an empty handle, we throw an undefined exception
+  // to deal more gracefully with out of memory situations.
+  if (value.IsEmpty()) {
+    i::Top::ScheduleThrow(i::Heap::undefined_value());
+  } else {
+    i::Top::ScheduleThrow(*Utils::OpenHandle(*value));
+  }
+  return v8::Undefined();
+}
+
+
+RegisteredExtension* RegisteredExtension::first_extension_ = NULL;
+
+
+RegisteredExtension::RegisteredExtension(Extension* extension)
+    : extension_(extension), state_(UNVISITED) { }
+
+
+void RegisteredExtension::Register(RegisteredExtension* that) {
+  that->next_ = RegisteredExtension::first_extension_;
+  RegisteredExtension::first_extension_ = that;
+}
+
+
+void RegisterExtension(Extension* that) {
+  RegisteredExtension* extension = new RegisteredExtension(that);
+  RegisteredExtension::Register(extension);
+}
+
+
+Extension::Extension(const char* name,
+                     const char* source,
+                     int dep_count,
+                     const char** deps)
+    : name_(name),
+      source_(source),
+      dep_count_(dep_count),
+      deps_(deps),
+      auto_enable_(false) { }
+
+
+v8::Handle<Primitive> Undefined() {
+  LOG_API("Undefined");
+  return ImplementationUtilities::Undefined();
+}
+
+
+v8::Handle<Primitive> Null() {
+  LOG_API("Null");
+  return ImplementationUtilities::Null();
+}
+
+
+v8::Handle<Boolean> True() {
+  LOG_API("True");
+  return ImplementationUtilities::True();
+}
+
+
+v8::Handle<Boolean> False() {
+  LOG_API("False");
+  return ImplementationUtilities::False();
+}
+
+
+ResourceConstraints::ResourceConstraints()
+  : max_young_space_size_(0),
+    max_old_space_size_(0),
+    stack_limit_(NULL) { }
+
+
+bool SetResourceConstraints(ResourceConstraints* constraints) {
+  int semispace_size = constraints->max_young_space_size();
+  int old_gen_size = constraints->max_old_space_size();
+  if (semispace_size != 0 || old_gen_size != 0) {
+    bool result = i::Heap::ConfigureHeap(semispace_size, old_gen_size);
+    if (!result) return false;
+  }
+  if (constraints->stack_limit() != NULL) {
+    uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
+    i::StackGuard::SetStackLimit(limit);
+  }
+  return true;
+}
+
+
+i::Object** V8::GlobalizeReference(i::Object** obj) {
+  if (IsDeadCheck("V8::Persistent::New")) return NULL;
+  LOG_API("Persistent::New");
+  i::Handle<i::Object> result =
+      i::GlobalHandles::Create(*obj);
+  return result.location();
+}
+
+
+void V8::MakeWeak(i::Object** object, void* parameters,
+                  WeakReferenceCallback callback) {
+  LOG_API("MakeWeak");
+  i::GlobalHandles::MakeWeak(object, parameters, callback);
+}
+
+
+void V8::ClearWeak(i::Object** obj) {
+  LOG_API("ClearWeak");
+  i::GlobalHandles::ClearWeakness(obj);
+}
+
+
+bool V8::IsGlobalNearDeath(i::Object** obj) {
+  LOG_API("IsGlobalNearDeath");
+  if (!i::V8::IsRunning()) return false;
+  return i::GlobalHandles::IsNearDeath(obj);
+}
+
+
+bool V8::IsGlobalWeak(i::Object** obj) {
+  LOG_API("IsGlobalWeak");
+  if (!i::V8::IsRunning()) return false;
+  return i::GlobalHandles::IsWeak(obj);
+}
+
+
+void V8::DisposeGlobal(i::Object** obj) {
+  LOG_API("DisposeGlobal");
+  if (!i::V8::IsRunning()) return;
+  if ((*obj)->IsGlobalContext()) i::Heap::NotifyContextDisposed();
+  i::GlobalHandles::Destroy(obj);
+}
+
+// --- H a n d l e s ---
+
+
+HandleScope::HandleScope() : is_closed_(false) {
+  API_ENTRY_CHECK("HandleScope::HandleScope");
+  i::HandleScope::Enter(&previous_);
+}
+
+
+HandleScope::~HandleScope() {
+  if (!is_closed_) {
+    i::HandleScope::Leave(&previous_);
+  }
+}
+
+
+int HandleScope::NumberOfHandles() {
+  return i::HandleScope::NumberOfHandles();
+}
+
+
+i::Object** v8::HandleScope::CreateHandle(i::Object* value) {
+  return i::HandleScope::CreateHandle(value);
+}
+
+
+void Context::Enter() {
+  if (IsDeadCheck("v8::Context::Enter()")) return;
+  ENTER_V8;
+  i::Handle<i::Context> env = Utils::OpenHandle(this);
+  thread_local.EnterContext(env);
+
+  thread_local.SaveContext(i::Top::context());
+  i::Top::set_context(*env);
+}
+
+
+void Context::Exit() {
+  if (!i::V8::IsRunning()) return;
+  if (!ApiCheck(thread_local.LeaveLastContext(),
+                "v8::Context::Exit()",
+                "Cannot exit non-entered context")) {
+    return;
+  }
+
+  // Content of 'last_context' could be NULL.
+  i::Context* last_context = thread_local.RestoreContext();
+  i::Top::set_context(last_context);
+}
+
+
+void Context::SetData(v8::Handle<Value> data) {
+  if (IsDeadCheck("v8::Context::SetData()")) return;
+  ENTER_V8;
+  {
+    HandleScope scope;
+    i::Handle<i::Context> env = Utils::OpenHandle(this);
+    i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
+    ASSERT(env->IsGlobalContext());
+    if (env->IsGlobalContext()) {
+      env->set_data(*raw_data);
+    }
+  }
+}
+
+
+v8::Local<v8::Value> Context::GetData() {
+  if (IsDeadCheck("v8::Context::GetData()")) return v8::Local<Value>();
+  ENTER_V8;
+  i::Object* raw_result = NULL;
+  {
+    HandleScope scope;
+    i::Handle<i::Context> env = Utils::OpenHandle(this);
+    ASSERT(env->IsGlobalContext());
+    if (env->IsGlobalContext()) {
+      raw_result = env->data();
+    } else {
+      return Local<Value>();
+    }
+  }
+  i::Handle<i::Object> result(raw_result);
+  return Utils::ToLocal(result);
+}
+
+
+i::Object** v8::HandleScope::RawClose(i::Object** value) {
+  if (!ApiCheck(!is_closed_,
+                "v8::HandleScope::Close()",
+                "Local scope has already been closed")) {
+    return 0;
+  }
+  LOG_API("CloseHandleScope");
+
+  // Read the result before popping the handle block.
+  i::Object* result = *value;
+  is_closed_ = true;
+  i::HandleScope::Leave(&previous_);
+
+  // Allocate a new handle on the previous handle block.
+  i::Handle<i::Object> handle(result);
+  return handle.location();
+}
+
+
+// --- N e a n d e r ---
+
+
+// A constructor cannot easily return an error value, therefore it is necessary
+// to check for a dead VM with ON_BAILOUT before constructing any Neander
+// objects.  To remind you about this there is no HandleScope in the
+// NeanderObject constructor.  When you add one to the site calling the
+// constructor you should check that you ensured the VM was not dead first.
+NeanderObject::NeanderObject(int size) {
+  EnsureInitialized("v8::Nowhere");
+  ENTER_V8;
+  value_ = i::Factory::NewNeanderObject();
+  i::Handle<i::FixedArray> elements = i::Factory::NewFixedArray(size);
+  value_->set_elements(*elements);
+}
+
+
+int NeanderObject::size() {
+  return i::FixedArray::cast(value_->elements())->length();
+}
+
+
+NeanderArray::NeanderArray() : obj_(2) {
+  obj_.set(0, i::Smi::FromInt(0));
+}
+
+
+int NeanderArray::length() {
+  return i::Smi::cast(obj_.get(0))->value();
+}
+
+
+i::Object* NeanderArray::get(int offset) {
+  ASSERT(0 <= offset);
+  ASSERT(offset < length());
+  return obj_.get(offset + 1);
+}
+
+
+// This method cannot easily return an error value, therefore it is necessary
+// to check for a dead VM with ON_BAILOUT before calling it.  To remind you
+// about this there is no HandleScope in this method.  When you add one to the
+// site calling this method you should check that you ensured the VM was not
+// dead first.
+void NeanderArray::add(i::Handle<i::Object> value) {
+  int length = this->length();
+  int size = obj_.size();
+  if (length == size - 1) {
+    i::Handle<i::FixedArray> new_elms = i::Factory::NewFixedArray(2 * size);
+    for (int i = 0; i < length; i++)
+      new_elms->set(i + 1, get(i));
+    obj_.value()->set_elements(*new_elms);
+  }
+  obj_.set(length + 1, *value);
+  obj_.set(0, i::Smi::FromInt(length + 1));
+}
+
+
+void NeanderArray::set(int index, i::Object* value) {
+  if (index < 0 || index >= this->length()) return;
+  obj_.set(index + 1, value);
+}
+
+
+// --- T e m p l a t e ---
+
+
+static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
+  that->set_tag(i::Smi::FromInt(type));
+}
+
+
+void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
+                   v8::PropertyAttribute attribute) {
+  if (IsDeadCheck("v8::Template::SetProperty()")) return;
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list());
+  if (list->IsUndefined()) {
+    list = NeanderArray().value();
+    Utils::OpenHandle(this)->set_property_list(*list);
+  }
+  NeanderArray array(list);
+  array.add(Utils::OpenHandle(*name));
+  array.add(Utils::OpenHandle(*value));
+  array.add(Utils::OpenHandle(*v8::Integer::New(attribute)));
+}
+
+
+// --- F u n c t i o n   T e m p l a t e ---
+static void InitializeFunctionTemplate(
+      i::Handle<i::FunctionTemplateInfo> info) {
+  info->set_tag(i::Smi::FromInt(Consts::FUNCTION_TEMPLATE));
+  info->set_flag(0);
+}
+
+
+Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
+  if (IsDeadCheck("v8::FunctionTemplate::PrototypeTemplate()")) {
+    return Local<ObjectTemplate>();
+  }
+  ENTER_V8;
+  i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template());
+  if (result->IsUndefined()) {
+    result = Utils::OpenHandle(*ObjectTemplate::New());
+    Utils::OpenHandle(this)->set_prototype_template(*result);
+  }
+  return Local<ObjectTemplate>(ToApi<ObjectTemplate>(result));
+}
+
+
+void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
+  if (IsDeadCheck("v8::FunctionTemplate::Inherit()")) return;
+  ENTER_V8;
+  Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value));
+}
+
+
+// To distinguish the function templates, so that we can find them in the
+// function cache of the global context.
+static int next_serial_number = 0;
+
+
+Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
+    v8::Handle<Value> data, v8::Handle<Signature> signature) {
+  EnsureInitialized("v8::FunctionTemplate::New()");
+  LOG_API("FunctionTemplate::New");
+  ENTER_V8;
+  i::Handle<i::Struct> struct_obj =
+      i::Factory::NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
+  i::Handle<i::FunctionTemplateInfo> obj =
+      i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
+  InitializeFunctionTemplate(obj);
+  obj->set_serial_number(i::Smi::FromInt(next_serial_number++));
+  if (callback != 0) {
+    if (data.IsEmpty()) data = v8::Undefined();
+    Utils::ToLocal(obj)->SetCallHandler(callback, data);
+  }
+  obj->set_undetectable(false);
+  obj->set_needs_access_check(false);
+
+  if (!signature.IsEmpty())
+    obj->set_signature(*Utils::OpenHandle(*signature));
+  return Utils::ToLocal(obj);
+}
+
+
+Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
+      int argc, Handle<FunctionTemplate> argv[]) {
+  EnsureInitialized("v8::Signature::New()");
+  LOG_API("Signature::New");
+  ENTER_V8;
+  i::Handle<i::Struct> struct_obj =
+      i::Factory::NewStruct(i::SIGNATURE_INFO_TYPE);
+  i::Handle<i::SignatureInfo> obj =
+      i::Handle<i::SignatureInfo>::cast(struct_obj);
+  if (!receiver.IsEmpty()) obj->set_receiver(*Utils::OpenHandle(*receiver));
+  if (argc > 0) {
+    i::Handle<i::FixedArray> args = i::Factory::NewFixedArray(argc);
+    for (int i = 0; i < argc; i++) {
+      if (!argv[i].IsEmpty())
+        args->set(i, *Utils::OpenHandle(*argv[i]));
+    }
+    obj->set_args(*args);
+  }
+  return Utils::ToLocal(obj);
+}
+
+
+Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
+  Handle<FunctionTemplate> types[1] = { type };
+  return TypeSwitch::New(1, types);
+}
+
+
+Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
+  EnsureInitialized("v8::TypeSwitch::New()");
+  LOG_API("TypeSwitch::New");
+  ENTER_V8;
+  i::Handle<i::FixedArray> vector = i::Factory::NewFixedArray(argc);
+  for (int i = 0; i < argc; i++)
+    vector->set(i, *Utils::OpenHandle(*types[i]));
+  i::Handle<i::Struct> struct_obj =
+      i::Factory::NewStruct(i::TYPE_SWITCH_INFO_TYPE);
+  i::Handle<i::TypeSwitchInfo> obj =
+      i::Handle<i::TypeSwitchInfo>::cast(struct_obj);
+  obj->set_types(*vector);
+  return Utils::ToLocal(obj);
+}
+
+
+int TypeSwitch::match(v8::Handle<Value> value) {
+  LOG_API("TypeSwitch::match");
+  i::Handle<i::Object> obj = Utils::OpenHandle(*value);
+  i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
+  i::FixedArray* types = i::FixedArray::cast(info->types());
+  for (int i = 0; i < types->length(); i++) {
+    if (obj->IsInstanceOf(i::FunctionTemplateInfo::cast(types->get(i))))
+      return i + 1;
+  }
+  return 0;
+}
+
+
+void FunctionTemplate::SetCallHandler(InvocationCallback callback,
+                                      v8::Handle<Value> data) {
+  if (IsDeadCheck("v8::FunctionTemplate::SetCallHandler()")) return;
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::Struct> struct_obj =
+      i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
+  i::Handle<i::CallHandlerInfo> obj =
+      i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+  obj->set_callback(*FromCData(callback));
+  if (data.IsEmpty()) data = v8::Undefined();
+  obj->set_data(*Utils::OpenHandle(*data));
+  Utils::OpenHandle(this)->set_call_code(*obj);
+}
+
+
+void FunctionTemplate::AddInstancePropertyAccessor(
+      v8::Handle<String> name,
+      AccessorGetter getter,
+      AccessorSetter setter,
+      v8::Handle<Value> data,
+      v8::AccessControl settings,
+      v8::PropertyAttribute attributes) {
+  if (IsDeadCheck("v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
+    return;
+  }
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
+  ASSERT(getter != NULL);
+  obj->set_getter(*FromCData(getter));
+  obj->set_setter(*FromCData(setter));
+  if (data.IsEmpty()) data = v8::Undefined();
+  obj->set_data(*Utils::OpenHandle(*data));
+  obj->set_name(*Utils::OpenHandle(*name));
+  if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
+  if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
+  if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
+  obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
+
+  i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
+  if (list->IsUndefined()) {
+    list = NeanderArray().value();
+    Utils::OpenHandle(this)->set_property_accessors(*list);
+  }
+  NeanderArray array(list);
+  array.add(obj);
+}
+
+
+Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
+  if (IsDeadCheck("v8::FunctionTemplate::InstanceTemplate()")
+      || EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
+    return Local<ObjectTemplate>();
+  ENTER_V8;
+  if (Utils::OpenHandle(this)->instance_template()->IsUndefined()) {
+    Local<ObjectTemplate> templ =
+        ObjectTemplate::New(v8::Handle<FunctionTemplate>(this));
+    Utils::OpenHandle(this)->set_instance_template(*Utils::OpenHandle(*templ));
+  }
+  i::Handle<i::ObjectTemplateInfo> result(i::ObjectTemplateInfo::cast(
+        Utils::OpenHandle(this)->instance_template()));
+  return Utils::ToLocal(result);
+}
+
+
+void FunctionTemplate::SetClassName(Handle<String> name) {
+  if (IsDeadCheck("v8::FunctionTemplate::SetClassName()")) return;
+  ENTER_V8;
+  Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name));
+}
+
+
+void FunctionTemplate::SetHiddenPrototype(bool value) {
+  if (IsDeadCheck("v8::FunctionTemplate::SetHiddenPrototype()")) return;
+  ENTER_V8;
+  Utils::OpenHandle(this)->set_hidden_prototype(value);
+}
+
+
+void FunctionTemplate::SetNamedInstancePropertyHandler(
+      NamedPropertyGetter getter,
+      NamedPropertySetter setter,
+      NamedPropertyQuery query,
+      NamedPropertyDeleter remover,
+      NamedPropertyEnumerator enumerator,
+      Handle<Value> data) {
+  if (IsDeadCheck("v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
+    return;
+  }
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::Struct> struct_obj =
+      i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
+  i::Handle<i::InterceptorInfo> obj =
+      i::Handle<i::InterceptorInfo>::cast(struct_obj);
+  if (getter != 0) obj->set_getter(*FromCData(getter));
+  if (setter != 0) obj->set_setter(*FromCData(setter));
+  if (query != 0) obj->set_query(*FromCData(query));
+  if (remover != 0) obj->set_deleter(*FromCData(remover));
+  if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator));
+  if (data.IsEmpty()) data = v8::Undefined();
+  obj->set_data(*Utils::OpenHandle(*data));
+  Utils::OpenHandle(this)->set_named_property_handler(*obj);
+}
+
+
+void FunctionTemplate::SetIndexedInstancePropertyHandler(
+      IndexedPropertyGetter getter,
+      IndexedPropertySetter setter,
+      IndexedPropertyQuery query,
+      IndexedPropertyDeleter remover,
+      IndexedPropertyEnumerator enumerator,
+      Handle<Value> data) {
+  if (IsDeadCheck(
+        "v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) {
+    return;
+  }
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::Struct> struct_obj =
+      i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
+  i::Handle<i::InterceptorInfo> obj =
+      i::Handle<i::InterceptorInfo>::cast(struct_obj);
+  if (getter != 0) obj->set_getter(*FromCData(getter));
+  if (setter != 0) obj->set_setter(*FromCData(setter));
+  if (query != 0) obj->set_query(*FromCData(query));
+  if (remover != 0) obj->set_deleter(*FromCData(remover));
+  if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator));
+  if (data.IsEmpty()) data = v8::Undefined();
+  obj->set_data(*Utils::OpenHandle(*data));
+  Utils::OpenHandle(this)->set_indexed_property_handler(*obj);
+}
+
+
+void FunctionTemplate::SetInstanceCallAsFunctionHandler(
+      InvocationCallback callback,
+      Handle<Value> data) {
+  if (IsDeadCheck("v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
+    return;
+  }
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::Struct> struct_obj =
+      i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
+  i::Handle<i::CallHandlerInfo> obj =
+      i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+  obj->set_callback(*FromCData(callback));
+  if (data.IsEmpty()) data = v8::Undefined();
+  obj->set_data(*Utils::OpenHandle(*data));
+  Utils::OpenHandle(this)->set_instance_call_handler(*obj);
+}
+
+
+// --- O b j e c t T e m p l a t e ---
+
+
+Local<ObjectTemplate> ObjectTemplate::New() {
+  return New(Local<FunctionTemplate>());
+}
+
+
+Local<ObjectTemplate> ObjectTemplate::New(
+      v8::Handle<FunctionTemplate> constructor) {
+  if (IsDeadCheck("v8::ObjectTemplate::New()")) return Local<ObjectTemplate>();
+  EnsureInitialized("v8::ObjectTemplate::New()");
+  LOG_API("ObjectTemplate::New");
+  ENTER_V8;
+  i::Handle<i::Struct> struct_obj =
+      i::Factory::NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
+  i::Handle<i::ObjectTemplateInfo> obj =
+      i::Handle<i::ObjectTemplateInfo>::cast(struct_obj);
+  InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
+  if (!constructor.IsEmpty())
+    obj->set_constructor(*Utils::OpenHandle(*constructor));
+  obj->set_internal_field_count(i::Smi::FromInt(0));
+  return Utils::ToLocal(obj);
+}
+
+
+// Ensure that the object template has a constructor.  If no
+// constructor is available we create one.
+static void EnsureConstructor(ObjectTemplate* object_template) {
+  if (Utils::OpenHandle(object_template)->constructor()->IsUndefined()) {
+    Local<FunctionTemplate> templ = FunctionTemplate::New();
+    i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
+    constructor->set_instance_template(*Utils::OpenHandle(object_template));
+    Utils::OpenHandle(object_template)->set_constructor(*constructor);
+  }
+}
+
+
+void ObjectTemplate::SetAccessor(v8::Handle<String> name,
+                                 AccessorGetter getter,
+                                 AccessorSetter setter,
+                                 v8::Handle<Value> data,
+                                 AccessControl settings,
+                                 PropertyAttribute attribute) {
+  if (IsDeadCheck("v8::ObjectTemplate::SetAccessor()")) return;
+  ENTER_V8;
+  HandleScope scope;
+  EnsureConstructor(this);
+  i::FunctionTemplateInfo* constructor =
+      i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+  i::Handle<i::FunctionTemplateInfo> cons(constructor);
+  Utils::ToLocal(cons)->AddInstancePropertyAccessor(name,
+                                                    getter,
+                                                    setter,
+                                                    data,
+                                                    settings,
+                                                    attribute);
+}
+
+
+void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
+                                             NamedPropertySetter setter,
+                                             NamedPropertyQuery query,
+                                             NamedPropertyDeleter remover,
+                                             NamedPropertyEnumerator enumerator,
+                                             Handle<Value> data) {
+  if (IsDeadCheck("v8::ObjectTemplate::SetNamedPropertyHandler()")) return;
+  ENTER_V8;
+  HandleScope scope;
+  EnsureConstructor(this);
+  i::FunctionTemplateInfo* constructor =
+      i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+  i::Handle<i::FunctionTemplateInfo> cons(constructor);
+  Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
+                                                        setter,
+                                                        query,
+                                                        remover,
+                                                        enumerator,
+                                                        data);
+}
+
+
+void ObjectTemplate::MarkAsUndetectable() {
+  if (IsDeadCheck("v8::ObjectTemplate::MarkAsUndetectable()")) return;
+  ENTER_V8;
+  HandleScope scope;
+  EnsureConstructor(this);
+  i::FunctionTemplateInfo* constructor =
+      i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+  i::Handle<i::FunctionTemplateInfo> cons(constructor);
+  cons->set_undetectable(true);
+}
+
+
+void ObjectTemplate::SetAccessCheckCallbacks(
+      NamedSecurityCallback named_callback,
+      IndexedSecurityCallback indexed_callback,
+      Handle<Value> data,
+      bool turned_on_by_default) {
+  if (IsDeadCheck("v8::ObjectTemplate::SetAccessCheckCallbacks()")) return;
+  ENTER_V8;
+  HandleScope scope;
+  EnsureConstructor(this);
+
+  i::Handle<i::Struct> struct_info =
+      i::Factory::NewStruct(i::ACCESS_CHECK_INFO_TYPE);
+  i::Handle<i::AccessCheckInfo> info =
+      i::Handle<i::AccessCheckInfo>::cast(struct_info);
+  info->set_named_callback(*FromCData(named_callback));
+  info->set_indexed_callback(*FromCData(indexed_callback));
+  if (data.IsEmpty()) data = v8::Undefined();
+  info->set_data(*Utils::OpenHandle(*data));
+
+  i::FunctionTemplateInfo* constructor =
+      i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+  i::Handle<i::FunctionTemplateInfo> cons(constructor);
+  cons->set_access_check_info(*info);
+  cons->set_needs_access_check(turned_on_by_default);
+}
+
+
+void ObjectTemplate::SetIndexedPropertyHandler(
+      IndexedPropertyGetter getter,
+      IndexedPropertySetter setter,
+      IndexedPropertyQuery query,
+      IndexedPropertyDeleter remover,
+      IndexedPropertyEnumerator enumerator,
+      Handle<Value> data) {
+  if (IsDeadCheck("v8::ObjectTemplate::SetIndexedPropertyHandler()")) return;
+  ENTER_V8;
+  HandleScope scope;
+  EnsureConstructor(this);
+  i::FunctionTemplateInfo* constructor =
+      i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+  i::Handle<i::FunctionTemplateInfo> cons(constructor);
+  Utils::ToLocal(cons)->SetIndexedInstancePropertyHandler(getter,
+                                                          setter,
+                                                          query,
+                                                          remover,
+                                                          enumerator,
+                                                          data);
+}
+
+
+void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
+                                              Handle<Value> data) {
+  if (IsDeadCheck("v8::ObjectTemplate::SetCallAsFunctionHandler()")) return;
+  ENTER_V8;
+  HandleScope scope;
+  EnsureConstructor(this);
+  i::FunctionTemplateInfo* constructor =
+      i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+  i::Handle<i::FunctionTemplateInfo> cons(constructor);
+  Utils::ToLocal(cons)->SetInstanceCallAsFunctionHandler(callback, data);
+}
+
+
+int ObjectTemplate::InternalFieldCount() {
+  if (IsDeadCheck("v8::ObjectTemplate::InternalFieldCount()")) {
+    return 0;
+  }
+  return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value();
+}
+
+
+void ObjectTemplate::SetInternalFieldCount(int value) {
+  if (IsDeadCheck("v8::ObjectTemplate::SetInternalFieldCount()")) return;
+  if (!ApiCheck(i::Smi::IsValid(value),
+                "v8::ObjectTemplate::SetInternalFieldCount()",
+                "Invalid internal field count")) {
+    return;
+  }
+  ENTER_V8;
+  if (value > 0) {
+    // The internal field count is set by the constructor function's
+    // construct code, so we ensure that there is a constructor
+    // function to do the setting.
+    EnsureConstructor(this);
+  }
+  Utils::OpenHandle(this)->set_internal_field_count(i::Smi::FromInt(value));
+}
+
+
+// --- S c r i p t D a t a ---
+
+
+ScriptData* ScriptData::PreCompile(const char* input, int length) {
+  unibrow::Utf8InputBuffer<> buf(input, length);
+  return i::PreParse(i::Handle<i::String>(), &buf, NULL);
+}
+
+
+ScriptData* ScriptData::New(unsigned* data, int length) {
+  return new i::ScriptDataImpl(i::Vector<unsigned>(data, length));
+}
+
+
+// --- S c r i p t ---
+
+
+Local<Script> Script::New(v8::Handle<String> source,
+                          v8::ScriptOrigin* origin,
+                          v8::ScriptData* script_data) {
+  ON_BAILOUT("v8::Script::New()", return Local<Script>());
+  LOG_API("Script::New");
+  ENTER_V8;
+  i::Handle<i::String> str = Utils::OpenHandle(*source);
+  i::Handle<i::Object> name_obj;
+  int line_offset = 0;
+  int column_offset = 0;
+  if (origin != NULL) {
+    if (!origin->ResourceName().IsEmpty()) {
+      name_obj = Utils::OpenHandle(*origin->ResourceName());
+    }
+    if (!origin->ResourceLineOffset().IsEmpty()) {
+      line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
+    }
+    if (!origin->ResourceColumnOffset().IsEmpty()) {
+      column_offset = static_cast<int>(origin->ResourceColumnOffset()->Value());
+    }
+  }
+  EXCEPTION_PREAMBLE();
+  i::ScriptDataImpl* pre_data = static_cast<i::ScriptDataImpl*>(script_data);
+  // We assert that the pre-data is sane, even though we can actually
+  // handle it if it turns out not to be in release mode.
+  ASSERT(pre_data == NULL || pre_data->SanityCheck());
+  // If the pre-data isn't sane we simply ignore it
+  if (pre_data != NULL && !pre_data->SanityCheck()) {
+    pre_data = NULL;
+  }
+  i::Handle<i::JSFunction> boilerplate = i::Compiler::Compile(str,
+                                                              name_obj,
+                                                              line_offset,
+                                                              column_offset,
+                                                              NULL,
+                                                              pre_data);
+  has_pending_exception = boilerplate.is_null();
+  EXCEPTION_BAILOUT_CHECK(Local<Script>());
+  return Local<Script>(ToApi<Script>(boilerplate));
+}
+
+
+Local<Script> Script::New(v8::Handle<String> source,
+                          v8::Handle<Value> file_name) {
+  ScriptOrigin origin(file_name);
+  return New(source, &origin);
+}
+
+
+Local<Script> Script::Compile(v8::Handle<String> source,
+                              v8::ScriptOrigin* origin,
+                              v8::ScriptData* script_data) {
+  ON_BAILOUT("v8::Script::Compile()", return Local<Script>());
+  LOG_API("Script::Compile");
+  ENTER_V8;
+  Local<Script> generic = New(source, origin, script_data);
+  if (generic.IsEmpty())
+    return generic;
+  i::Handle<i::JSFunction> boilerplate = Utils::OpenHandle(*generic);
+  i::Handle<i::JSFunction> result =
+      i::Factory::NewFunctionFromBoilerplate(boilerplate,
+                                             i::Top::global_context());
+  return Local<Script>(ToApi<Script>(result));
+}
+
+
+Local<Script> Script::Compile(v8::Handle<String> source,
+                              v8::Handle<Value> file_name) {
+  ScriptOrigin origin(file_name);
+  return Compile(source, &origin);
+}
+
+
+Local<Value> Script::Run() {
+  ON_BAILOUT("v8::Script::Run()", return Local<Value>());
+  LOG_API("Script::Run");
+  ENTER_V8;
+  i::Object* raw_result = NULL;
+  {
+    HandleScope scope;
+    i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+    if (fun->IsBoilerplate()) {
+      fun = i::Factory::NewFunctionFromBoilerplate(fun,
+                                                   i::Top::global_context());
+    }
+    EXCEPTION_PREAMBLE();
+    i::Handle<i::Object> receiver(i::Top::context()->global_proxy());
+    i::Handle<i::Object> result =
+        i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(Local<Value>());
+    raw_result = *result;
+  }
+  i::Handle<i::Object> result(raw_result);
+  return Utils::ToLocal(result);
+}
+
+
+Local<Value> Script::Id() {
+  ON_BAILOUT("v8::Script::Id()", return Local<Value>());
+  LOG_API("Script::Id");
+  i::Object* raw_id = NULL;
+  {
+    HandleScope scope;
+    i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+    i::Handle<i::Script> script(i::Script::cast(fun->shared()->script()));
+    i::Handle<i::Object> id(script->id());
+    raw_id = *id;
+  }
+  i::Handle<i::Object> id(raw_id);
+  return Utils::ToLocal(id);
+}
+
+
+void Script::SetData(v8::Handle<Value> data) {
+  ON_BAILOUT("v8::Script::SetData()", return);
+  LOG_API("Script::SetData");
+  {
+    HandleScope scope;
+    i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+    i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
+    i::Handle<i::Script> script(i::Script::cast(fun->shared()->script()));
+    script->set_data(*raw_data);
+  }
+}
+
+
+// --- E x c e p t i o n s ---
+
+
+v8::TryCatch::TryCatch()
+    : next_(i::Top::try_catch_handler()),
+      exception_(i::Heap::the_hole_value()),
+      message_(i::Smi::FromInt(0)),
+      is_verbose_(false),
+      can_continue_(true),
+      capture_message_(true),
+      js_handler_(NULL) {
+  i::Top::RegisterTryCatchHandler(this);
+}
+
+
+v8::TryCatch::~TryCatch() {
+  i::Top::UnregisterTryCatchHandler(this);
+}
+
+
+bool v8::TryCatch::HasCaught() const {
+  return !reinterpret_cast<i::Object*>(exception_)->IsTheHole();
+}
+
+
+bool v8::TryCatch::CanContinue() const {
+  return can_continue_;
+}
+
+
+v8::Local<Value> v8::TryCatch::Exception() const {
+  if (HasCaught()) {
+    // Check for out of memory exception.
+    i::Object* exception = reinterpret_cast<i::Object*>(exception_);
+    return v8::Utils::ToLocal(i::Handle<i::Object>(exception));
+  } else {
+    return v8::Local<Value>();
+  }
+}
+
+
+v8::Local<Value> v8::TryCatch::StackTrace() const {
+  if (HasCaught()) {
+    i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
+    if (!raw_obj->IsJSObject()) return v8::Local<Value>();
+    v8::HandleScope scope;
+    i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj));
+    i::Handle<i::String> name = i::Factory::LookupAsciiSymbol("stack");
+    if (!obj->HasProperty(*name))
+      return v8::Local<Value>();
+    return scope.Close(v8::Utils::ToLocal(i::GetProperty(obj, name)));
+  } else {
+    return v8::Local<Value>();
+  }
+}
+
+
+v8::Local<v8::Message> v8::TryCatch::Message() const {
+  if (HasCaught() && message_ != i::Smi::FromInt(0)) {
+    i::Object* message = reinterpret_cast<i::Object*>(message_);
+    return v8::Utils::MessageToLocal(i::Handle<i::Object>(message));
+  } else {
+    return v8::Local<v8::Message>();
+  }
+}
+
+
+void v8::TryCatch::Reset() {
+  exception_ = i::Heap::the_hole_value();
+  message_ = i::Smi::FromInt(0);
+}
+
+
+void v8::TryCatch::SetVerbose(bool value) {
+  is_verbose_ = value;
+}
+
+
+void v8::TryCatch::SetCaptureMessage(bool value) {
+  capture_message_ = value;
+}
+
+
+// --- M e s s a g e ---
+
+
+Local<String> Message::Get() const {
+  ON_BAILOUT("v8::Message::Get()", return Local<String>());
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(obj);
+  Local<String> result = Utils::ToLocal(raw_result);
+  return scope.Close(result);
+}
+
+
+v8::Handle<Value> Message::GetScriptResourceName() const {
+  if (IsDeadCheck("v8::Message::GetScriptResourceName()")) {
+    return Local<String>();
+  }
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> obj =
+      i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+  // Return this.script.name.
+  i::Handle<i::JSValue> script =
+      i::Handle<i::JSValue>::cast(GetProperty(obj, "script"));
+  i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name());
+  return scope.Close(Utils::ToLocal(resource_name));
+}
+
+
+v8::Handle<Value> Message::GetScriptData() const {
+  if (IsDeadCheck("v8::Message::GetScriptResourceData()")) {
+    return Local<Value>();
+  }
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> obj =
+      i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+  // Return this.script.data.
+  i::Handle<i::JSValue> script =
+      i::Handle<i::JSValue>::cast(GetProperty(obj, "script"));
+  i::Handle<i::Object> data(i::Script::cast(script->value())->data());
+  return scope.Close(Utils::ToLocal(data));
+}
+
+
+static i::Handle<i::Object> CallV8HeapFunction(const char* name,
+                                               i::Handle<i::Object> recv,
+                                               int argc,
+                                               i::Object** argv[],
+                                               bool* has_pending_exception) {
+  i::Handle<i::String> fmt_str = i::Factory::LookupAsciiSymbol(name);
+  i::Object* object_fun = i::Top::builtins()->GetProperty(*fmt_str);
+  i::Handle<i::JSFunction> fun =
+      i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
+  i::Handle<i::Object> value =
+      i::Execution::Call(fun, recv, argc, argv, has_pending_exception);
+  return value;
+}
+
+
+static i::Handle<i::Object> CallV8HeapFunction(const char* name,
+                                               i::Handle<i::Object> data,
+                                               bool* has_pending_exception) {
+  i::Object** argv[1] = { data.location() };
+  return CallV8HeapFunction(name,
+                            i::Top::builtins(),
+                            1,
+                            argv,
+                            has_pending_exception);
+}
+
+
+int Message::GetLineNumber() const {
+  ON_BAILOUT("v8::Message::GetLineNumber()", return -1);
+  ENTER_V8;
+  HandleScope scope;
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> result = CallV8HeapFunction("GetLineNumber",
+                                                   Utils::OpenHandle(this),
+                                                   &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(0);
+  return static_cast<int>(result->Number());
+}
+
+
+int Message::GetStartPosition() const {
+  if (IsDeadCheck("v8::Message::GetStartPosition()")) return 0;
+  ENTER_V8;
+  HandleScope scope;
+
+  i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
+  return static_cast<int>(GetProperty(data_obj, "startPos")->Number());
+}
+
+
+int Message::GetEndPosition() const {
+  if (IsDeadCheck("v8::Message::GetEndPosition()")) return 0;
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
+  return static_cast<int>(GetProperty(data_obj, "endPos")->Number());
+}
+
+
+int Message::GetStartColumn() const {
+  if (IsDeadCheck("v8::Message::GetStartColumn()")) return 0;
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
+      "GetPositionInLine",
+      data_obj,
+      &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(0);
+  return static_cast<int>(start_col_obj->Number());
+}
+
+
+int Message::GetEndColumn() const {
+  if (IsDeadCheck("v8::Message::GetEndColumn()")) return 0;
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
+      "GetPositionInLine",
+      data_obj,
+      &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(0);
+  int start = static_cast<int>(GetProperty(data_obj, "startPos")->Number());
+  int end = static_cast<int>(GetProperty(data_obj, "endPos")->Number());
+  return static_cast<int>(start_col_obj->Number()) + (end - start);
+}
+
+
+Local<String> Message::GetSourceLine() const {
+  ON_BAILOUT("v8::Message::GetSourceLine()", return Local<String>());
+  ENTER_V8;
+  HandleScope scope;
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine",
+                                                   Utils::OpenHandle(this),
+                                                   &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(Local<v8::String>());
+  if (result->IsString()) {
+    return scope.Close(Utils::ToLocal(i::Handle<i::String>::cast(result)));
+  } else {
+    return Local<String>();
+  }
+}
+
+
+void Message::PrintCurrentStackTrace(FILE* out) {
+  if (IsDeadCheck("v8::Message::PrintCurrentStackTrace()")) return;
+  ENTER_V8;
+  i::Top::PrintCurrentStackTrace(out);
+}
+
+
+// --- D a t a ---
+
+bool Value::IsUndefined() const {
+  if (IsDeadCheck("v8::Value::IsUndefined()")) return false;
+  return Utils::OpenHandle(this)->IsUndefined();
+}
+
+
+bool Value::IsNull() const {
+  if (IsDeadCheck("v8::Value::IsNull()")) return false;
+  return Utils::OpenHandle(this)->IsNull();
+}
+
+
+bool Value::IsTrue() const {
+  if (IsDeadCheck("v8::Value::IsTrue()")) return false;
+  return Utils::OpenHandle(this)->IsTrue();
+}
+
+
+bool Value::IsFalse() const {
+  if (IsDeadCheck("v8::Value::IsFalse()")) return false;
+  return Utils::OpenHandle(this)->IsFalse();
+}
+
+
+bool Value::IsFunction() const {
+  if (IsDeadCheck("v8::Value::IsFunction()")) return false;
+  return Utils::OpenHandle(this)->IsJSFunction();
+}
+
+
+bool Value::FullIsString() const {
+  if (IsDeadCheck("v8::Value::IsString()")) return false;
+  bool result = Utils::OpenHandle(this)->IsString();
+  ASSERT_EQ(result, QuickIsString());
+  return result;
+}
+
+
+bool Value::IsArray() const {
+  if (IsDeadCheck("v8::Value::IsArray()")) return false;
+  return Utils::OpenHandle(this)->IsJSArray();
+}
+
+
+bool Value::IsObject() const {
+  if (IsDeadCheck("v8::Value::IsObject()")) return false;
+  return Utils::OpenHandle(this)->IsJSObject();
+}
+
+
+bool Value::IsNumber() const {
+  if (IsDeadCheck("v8::Value::IsNumber()")) return false;
+  return Utils::OpenHandle(this)->IsNumber();
+}
+
+
+bool Value::IsBoolean() const {
+  if (IsDeadCheck("v8::Value::IsBoolean()")) return false;
+  return Utils::OpenHandle(this)->IsBoolean();
+}
+
+
+bool Value::IsExternal() const {
+  if (IsDeadCheck("v8::Value::IsExternal()")) return false;
+  return Utils::OpenHandle(this)->IsProxy();
+}
+
+
+bool Value::IsInt32() const {
+  if (IsDeadCheck("v8::Value::IsInt32()")) return false;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (obj->IsSmi()) return true;
+  if (obj->IsNumber()) {
+    double value = obj->Number();
+    return i::FastI2D(i::FastD2I(value)) == value;
+  }
+  return false;
+}
+
+
+bool Value::IsDate() const {
+  if (IsDeadCheck("v8::Value::IsDate()")) return false;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  return obj->HasSpecificClassOf(i::Heap::Date_symbol());
+}
+
+
+Local<String> Value::ToString() const {
+  if (IsDeadCheck("v8::Value::ToString()")) return Local<String>();
+  LOG_API("ToString");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> str;
+  if (obj->IsString()) {
+    str = obj;
+  } else {
+    ENTER_V8;
+    EXCEPTION_PREAMBLE();
+    str = i::Execution::ToString(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(Local<String>());
+  }
+  return Local<String>(ToApi<String>(str));
+}
+
+
+Local<String> Value::ToDetailString() const {
+  if (IsDeadCheck("v8::Value::ToDetailString()")) return Local<String>();
+  LOG_API("ToDetailString");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> str;
+  if (obj->IsString()) {
+    str = obj;
+  } else {
+    ENTER_V8;
+    EXCEPTION_PREAMBLE();
+    str = i::Execution::ToDetailString(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(Local<String>());
+  }
+  return Local<String>(ToApi<String>(str));
+}
+
+
+Local<v8::Object> Value::ToObject() const {
+  if (IsDeadCheck("v8::Value::ToObject()")) return Local<v8::Object>();
+  LOG_API("ToObject");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> val;
+  if (obj->IsJSObject()) {
+    val = obj;
+  } else {
+    ENTER_V8;
+    EXCEPTION_PREAMBLE();
+    val = i::Execution::ToObject(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+  }
+  return Local<v8::Object>(ToApi<Object>(val));
+}
+
+
+Local<Boolean> Value::ToBoolean() const {
+  if (IsDeadCheck("v8::Value::ToBoolean()")) return Local<Boolean>();
+  LOG_API("ToBoolean");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (obj->IsBoolean()) {
+    return Local<Boolean>(ToApi<Boolean>(obj));
+  } else {
+    ENTER_V8;
+    i::Handle<i::Object> val = i::Execution::ToBoolean(obj);
+    return Local<Boolean>(ToApi<Boolean>(val));
+  }
+}
+
+
+Local<Number> Value::ToNumber() const {
+  if (IsDeadCheck("v8::Value::ToNumber()")) return Local<Number>();
+  LOG_API("ToNumber");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> num;
+  if (obj->IsNumber()) {
+    num = obj;
+  } else {
+    ENTER_V8;
+    EXCEPTION_PREAMBLE();
+    num = i::Execution::ToNumber(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(Local<Number>());
+  }
+  return Local<Number>(ToApi<Number>(num));
+}
+
+
+Local<Integer> Value::ToInteger() const {
+  if (IsDeadCheck("v8::Value::ToInteger()")) return Local<Integer>();
+  LOG_API("ToInteger");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> num;
+  if (obj->IsSmi()) {
+    num = obj;
+  } else {
+    ENTER_V8;
+    EXCEPTION_PREAMBLE();
+    num = i::Execution::ToInteger(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(Local<Integer>());
+  }
+  return Local<Integer>(ToApi<Integer>(num));
+}
+
+
+void External::CheckCast(v8::Value* that) {
+  if (IsDeadCheck("v8::External::Cast()")) return;
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->IsProxy(),
+           "v8::External::Cast()",
+           "Could not convert to external");
+}
+
+
+void v8::Object::CheckCast(Value* that) {
+  if (IsDeadCheck("v8::Object::Cast()")) return;
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->IsJSObject(),
+           "v8::Object::Cast()",
+           "Could not convert to object");
+}
+
+
+void v8::Function::CheckCast(Value* that) {
+  if (IsDeadCheck("v8::Function::Cast()")) return;
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->IsJSFunction(),
+           "v8::Function::Cast()",
+           "Could not convert to function");
+}
+
+
+void v8::String::CheckCast(v8::Value* that) {
+  if (IsDeadCheck("v8::String::Cast()")) return;
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->IsString(),
+           "v8::String::Cast()",
+           "Could not convert to string");
+}
+
+
+void v8::Number::CheckCast(v8::Value* that) {
+  if (IsDeadCheck("v8::Number::Cast()")) return;
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->IsNumber(),
+           "v8::Number::Cast()",
+           "Could not convert to number");
+}
+
+
+void v8::Integer::CheckCast(v8::Value* that) {
+  if (IsDeadCheck("v8::Integer::Cast()")) return;
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->IsNumber(),
+           "v8::Integer::Cast()",
+           "Could not convert to number");
+}
+
+
+void v8::Array::CheckCast(Value* that) {
+  if (IsDeadCheck("v8::Array::Cast()")) return;
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->IsJSArray(),
+           "v8::Array::Cast()",
+           "Could not convert to array");
+}
+
+
+void v8::Date::CheckCast(v8::Value* that) {
+  if (IsDeadCheck("v8::Date::Cast()")) return;
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->HasSpecificClassOf(i::Heap::Date_symbol()),
+           "v8::Date::Cast()",
+           "Could not convert to date");
+}
+
+
+bool Value::BooleanValue() const {
+  if (IsDeadCheck("v8::Value::BooleanValue()")) return false;
+  LOG_API("BooleanValue");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (obj->IsBoolean()) {
+    return obj->IsTrue();
+  } else {
+    ENTER_V8;
+    i::Handle<i::Object> value = i::Execution::ToBoolean(obj);
+    return value->IsTrue();
+  }
+}
+
+
+double Value::NumberValue() const {
+  if (IsDeadCheck("v8::Value::NumberValue()")) return i::OS::nan_value();
+  LOG_API("NumberValue");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> num;
+  if (obj->IsNumber()) {
+    num = obj;
+  } else {
+    ENTER_V8;
+    EXCEPTION_PREAMBLE();
+    num = i::Execution::ToNumber(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(i::OS::nan_value());
+  }
+  return num->Number();
+}
+
+
+int64_t Value::IntegerValue() const {
+  if (IsDeadCheck("v8::Value::IntegerValue()")) return 0;
+  LOG_API("IntegerValue");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> num;
+  if (obj->IsNumber()) {
+    num = obj;
+  } else {
+    ENTER_V8;
+    EXCEPTION_PREAMBLE();
+    num = i::Execution::ToInteger(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(0);
+  }
+  if (num->IsSmi()) {
+    return i::Smi::cast(*num)->value();
+  } else {
+    return static_cast<int64_t>(num->Number());
+  }
+}
+
+
+Local<Int32> Value::ToInt32() const {
+  if (IsDeadCheck("v8::Value::ToInt32()")) return Local<Int32>();
+  LOG_API("ToInt32");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> num;
+  if (obj->IsSmi()) {
+    num = obj;
+  } else {
+    ENTER_V8;
+    EXCEPTION_PREAMBLE();
+    num = i::Execution::ToInt32(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(Local<Int32>());
+  }
+  return Local<Int32>(ToApi<Int32>(num));
+}
+
+
+Local<Uint32> Value::ToUint32() const {
+  if (IsDeadCheck("v8::Value::ToUint32()")) return Local<Uint32>();
+  LOG_API("ToUInt32");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> num;
+  if (obj->IsSmi()) {
+    num = obj;
+  } else {
+    ENTER_V8;
+    EXCEPTION_PREAMBLE();
+    num = i::Execution::ToUint32(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(Local<Uint32>());
+  }
+  return Local<Uint32>(ToApi<Uint32>(num));
+}
+
+
+Local<Uint32> Value::ToArrayIndex() const {
+  if (IsDeadCheck("v8::Value::ToArrayIndex()")) return Local<Uint32>();
+  LOG_API("ToArrayIndex");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (obj->IsSmi()) {
+    if (i::Smi::cast(*obj)->value() >= 0) return Utils::Uint32ToLocal(obj);
+    return Local<Uint32>();
+  }
+  ENTER_V8;
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> string_obj =
+      i::Execution::ToString(obj, &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(Local<Uint32>());
+  i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
+  uint32_t index;
+  if (str->AsArrayIndex(&index)) {
+    i::Handle<i::Object> value;
+    if (index <= static_cast<uint32_t>(i::Smi::kMaxValue)) {
+      value = i::Handle<i::Object>(i::Smi::FromInt(index));
+    } else {
+      value = i::Factory::NewNumber(index);
+    }
+    return Utils::Uint32ToLocal(value);
+  }
+  return Local<Uint32>();
+}
+
+
+int32_t Value::Int32Value() const {
+  if (IsDeadCheck("v8::Value::Int32Value()")) return 0;
+  LOG_API("Int32Value");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (obj->IsSmi()) {
+    return i::Smi::cast(*obj)->value();
+  } else {
+    LOG_API("Int32Value (slow)");
+    ENTER_V8;
+    EXCEPTION_PREAMBLE();
+    i::Handle<i::Object> num =
+        i::Execution::ToInt32(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(0);
+    if (num->IsSmi()) {
+      return i::Smi::cast(*num)->value();
+    } else {
+      return static_cast<int32_t>(num->Number());
+    }
+  }
+}
+
+
+bool Value::Equals(Handle<Value> that) const {
+  if (IsDeadCheck("v8::Value::Equals()")
+      || EmptyCheck("v8::Value::Equals()", this)
+      || EmptyCheck("v8::Value::Equals()", that)) {
+    return false;
+  }
+  LOG_API("Equals");
+  ENTER_V8;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> other = Utils::OpenHandle(*that);
+  i::Object** args[1] = { other.location() };
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> result =
+      CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(false);
+  return *result == i::Smi::FromInt(i::EQUAL);
+}
+
+
+bool Value::StrictEquals(Handle<Value> that) const {
+  if (IsDeadCheck("v8::Value::StrictEquals()")
+      || EmptyCheck("v8::Value::StrictEquals()", this)
+      || EmptyCheck("v8::Value::StrictEquals()", that)) {
+    return false;
+  }
+  LOG_API("StrictEquals");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> other = Utils::OpenHandle(*that);
+  // Must check HeapNumber first, since NaN !== NaN.
+  if (obj->IsHeapNumber()) {
+    if (!other->IsNumber()) return false;
+    double x = obj->Number();
+    double y = other->Number();
+    // Must check explicitly for NaN:s on Windows, but -0 works fine.
+    return x == y && !isnan(x) && !isnan(y);
+  } else if (*obj == *other) {  // Also covers Booleans.
+    return true;
+  } else if (obj->IsSmi()) {
+    return other->IsNumber() && obj->Number() == other->Number();
+  } else if (obj->IsString()) {
+    return other->IsString() &&
+      i::String::cast(*obj)->Equals(i::String::cast(*other));
+  } else if (obj->IsUndefined() || obj->IsUndetectableObject()) {
+    return other->IsUndefined() || other->IsUndetectableObject();
+  } else {
+    return false;
+  }
+}
+
+
+uint32_t Value::Uint32Value() const {
+  if (IsDeadCheck("v8::Value::Uint32Value()")) return 0;
+  LOG_API("Uint32Value");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (obj->IsSmi()) {
+    return i::Smi::cast(*obj)->value();
+  } else {
+    ENTER_V8;
+    EXCEPTION_PREAMBLE();
+    i::Handle<i::Object> num =
+        i::Execution::ToUint32(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(0);
+    if (num->IsSmi()) {
+      return i::Smi::cast(*num)->value();
+    } else {
+      return static_cast<uint32_t>(num->Number());
+    }
+  }
+}
+
+
+bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
+                     v8::PropertyAttribute attribs) {
+  ON_BAILOUT("v8::Object::Set()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::Object> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+  i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> obj = i::SetProperty(
+      self,
+      key_obj,
+      value_obj,
+      static_cast<PropertyAttributes>(attribs));
+  has_pending_exception = obj.is_null();
+  EXCEPTION_BAILOUT_CHECK(false);
+  return true;
+}
+
+
+bool v8::Object::ForceSet(v8::Handle<Value> key,
+                          v8::Handle<Value> value,
+                          v8::PropertyAttribute attribs) {
+  ON_BAILOUT("v8::Object::ForceSet()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+  i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> obj = i::ForceSetProperty(
+      self,
+      key_obj,
+      value_obj,
+      static_cast<PropertyAttributes>(attribs));
+  has_pending_exception = obj.is_null();
+  EXCEPTION_BAILOUT_CHECK(false);
+  return true;
+}
+
+
+bool v8::Object::ForceDelete(v8::Handle<Value> key) {
+  ON_BAILOUT("v8::Object::ForceDelete()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
+  has_pending_exception = obj.is_null();
+  EXCEPTION_BAILOUT_CHECK(false);
+  return obj->IsTrue();
+}
+
+
+Local<Value> v8::Object::Get(v8::Handle<Value> key) {
+  ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
+  ENTER_V8;
+  i::Handle<i::Object> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> result = i::GetProperty(self, key_obj);
+  has_pending_exception = result.is_null();
+  EXCEPTION_BAILOUT_CHECK(Local<Value>());
+  return Utils::ToLocal(result);
+}
+
+
+Local<Value> v8::Object::GetPrototype() {
+  ON_BAILOUT("v8::Object::GetPrototype()", return Local<v8::Value>());
+  ENTER_V8;
+  i::Handle<i::Object> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> result = i::GetPrototype(self);
+  return Utils::ToLocal(result);
+}
+
+
+Local<Object> v8::Object::FindInstanceInPrototypeChain(
+    v8::Handle<FunctionTemplate> tmpl) {
+  ON_BAILOUT("v8::Object::FindInstanceInPrototypeChain()",
+             return Local<v8::Object>());
+  ENTER_V8;
+  i::JSObject* object = *Utils::OpenHandle(this);
+  i::FunctionTemplateInfo* tmpl_info = *Utils::OpenHandle(*tmpl);
+  while (!object->IsInstanceOf(tmpl_info)) {
+    i::Object* prototype = object->GetPrototype();
+    if (!prototype->IsJSObject()) return Local<Object>();
+    object = i::JSObject::cast(prototype);
+  }
+  return Utils::ToLocal(i::Handle<i::JSObject>(object));
+}
+
+
+Local<Array> v8::Object::GetPropertyNames() {
+  ON_BAILOUT("v8::Object::GetPropertyNames()", return Local<v8::Array>());
+  ENTER_V8;
+  v8::HandleScope scope;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::FixedArray> value =
+      i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS);
+  // Because we use caching to speed up enumeration it is important
+  // to never change the result of the basic enumeration function so
+  // we clone the result.
+  i::Handle<i::FixedArray> elms = i::Factory::CopyFixedArray(value);
+  i::Handle<i::JSArray> result = i::Factory::NewJSArrayWithElements(elms);
+  return scope.Close(Utils::ToLocal(result));
+}
+
+
+Local<String> v8::Object::ObjectProtoToString() {
+  ON_BAILOUT("v8::Object::ObjectProtoToString()", return Local<v8::String>());
+  ENTER_V8;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+
+  i::Handle<i::Object> name(self->class_name());
+
+  // Native implementation of Object.prototype.toString (v8natives.js):
+  //   var c = %ClassOf(this);
+  //   if (c === 'Arguments') c  = 'Object';
+  //   return "[object " + c + "]";
+
+  if (!name->IsString()) {
+    return v8::String::New("[object ]");
+
+  } else {
+    i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
+    if (class_name->IsEqualTo(i::CStrVector("Arguments"))) {
+      return v8::String::New("[object Object]");
+
+    } else {
+      const char* prefix = "[object ";
+      Local<String> str = Utils::ToLocal(class_name);
+      const char* postfix = "]";
+
+      size_t prefix_len = strlen(prefix);
+      size_t str_len = str->Length();
+      size_t postfix_len = strlen(postfix);
+
+      size_t buf_len = prefix_len + str_len + postfix_len;
+      char* buf = i::NewArray<char>(buf_len);
+
+      // Write prefix.
+      char* ptr = buf;
+      memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize);
+      ptr += prefix_len;
+
+      // Write real content.
+      str->WriteAscii(ptr, 0, str_len);
+      ptr += str_len;
+
+      // Write postfix.
+      memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize);
+
+      // Copy the buffer into a heap-allocated string and return it.
+      Local<String> result = v8::String::New(buf, buf_len);
+      i::DeleteArray(buf);
+      return result;
+    }
+  }
+}
+
+
+bool v8::Object::Delete(v8::Handle<String> key) {
+  ON_BAILOUT("v8::Object::Delete()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+  return i::DeleteProperty(self, key_obj)->IsTrue();
+}
+
+
+bool v8::Object::Has(v8::Handle<String> key) {
+  ON_BAILOUT("v8::Object::Has()", return false);
+  ENTER_V8;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+  return self->HasProperty(*key_obj);
+}
+
+
+bool v8::Object::Delete(uint32_t index) {
+  ON_BAILOUT("v8::Object::DeleteProperty()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  return i::DeleteElement(self, index)->IsTrue();
+}
+
+
+bool v8::Object::Has(uint32_t index) {
+  ON_BAILOUT("v8::Object::HasProperty()", return false);
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  return self->HasElement(index);
+}
+
+
+bool v8::Object::HasRealNamedProperty(Handle<String> key) {
+  ON_BAILOUT("v8::Object::HasRealNamedProperty()", return false);
+  return Utils::OpenHandle(this)->HasRealNamedProperty(
+      *Utils::OpenHandle(*key));
+}
+
+
+bool v8::Object::HasRealIndexedProperty(uint32_t index) {
+  ON_BAILOUT("v8::Object::HasRealIndexedProperty()", return false);
+  return Utils::OpenHandle(this)->HasRealElementProperty(index);
+}
+
+
+bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
+  ON_BAILOUT("v8::Object::HasRealNamedCallbackProperty()", return false);
+  ENTER_V8;
+  return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
+      *Utils::OpenHandle(*key));
+}
+
+
+bool v8::Object::HasNamedLookupInterceptor() {
+  ON_BAILOUT("v8::Object::HasNamedLookupInterceptor()", return false);
+  return Utils::OpenHandle(this)->HasNamedInterceptor();
+}
+
+
+bool v8::Object::HasIndexedLookupInterceptor() {
+  ON_BAILOUT("v8::Object::HasIndexedLookupInterceptor()", return false);
+  return Utils::OpenHandle(this)->HasIndexedInterceptor();
+}
+
+
+Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
+      Handle<String> key) {
+  ON_BAILOUT("v8::Object::GetRealNamedPropertyInPrototypeChain()",
+             return Local<Value>());
+  ENTER_V8;
+  i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
+  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+  i::LookupResult lookup;
+  self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
+  if (lookup.IsValid()) {
+    PropertyAttributes attributes;
+    i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
+                                                      &lookup,
+                                                      *key_obj,
+                                                      &attributes));
+    return Utils::ToLocal(result);
+  }
+  return Local<Value>();  // No real property was found in prototype chain.
+}
+
+
+Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
+  ON_BAILOUT("v8::Object::GetRealNamedProperty()", return Local<Value>());
+  ENTER_V8;
+  i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
+  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+  i::LookupResult lookup;
+  self_obj->LookupRealNamedProperty(*key_obj, &lookup);
+  if (lookup.IsValid()) {
+    PropertyAttributes attributes;
+    i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
+                                                      &lookup,
+                                                      *key_obj,
+                                                      &attributes));
+    return Utils::ToLocal(result);
+  }
+  return Local<Value>();  // No real property was found in prototype chain.
+}
+
+
+// Turns on access checks by copying the map and setting the check flag.
+// Because the object gets a new map, existing inline cache caching
+// the old map of this object will fail.
+void v8::Object::TurnOnAccessCheck() {
+  ON_BAILOUT("v8::Object::TurnOnAccessCheck()", return);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+
+  i::Handle<i::Map> new_map =
+    i::Factory::CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
+  new_map->set_is_access_check_needed(true);
+  obj->set_map(*new_map);
+}
+
+
+bool v8::Object::IsDirty() {
+  return Utils::OpenHandle(this)->IsDirty();
+}
+
+
+Local<v8::Object> v8::Object::Clone() {
+  ON_BAILOUT("v8::Object::Clone()", return Local<Object>());
+  ENTER_V8;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::JSObject> result = i::Copy(self);
+  has_pending_exception = result.is_null();
+  EXCEPTION_BAILOUT_CHECK(Local<Object>());
+  return Utils::ToLocal(result);
+}
+
+
+int v8::Object::GetIdentityHash() {
+  ON_BAILOUT("v8::Object::GetIdentityHash()", return 0);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
+  i::Handle<i::Object> hash_symbol = i::Factory::identity_hash_symbol();
+  i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
+  int hash_value;
+  if (hash->IsSmi()) {
+    hash_value = i::Smi::cast(*hash)->value();
+  } else {
+    int attempts = 0;
+    do {
+      // Generate a random 32-bit hash value but limit range to fit
+      // within a smi.
+      hash_value = i::V8::Random() & i::Smi::kMaxValue;
+      attempts++;
+    } while (hash_value == 0 && attempts < 30);
+    hash_value = hash_value != 0 ? hash_value : 1;  // never return 0
+    i::SetProperty(hidden_props,
+                   hash_symbol,
+                   i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
+                   static_cast<PropertyAttributes>(None));
+  }
+  return hash_value;
+}
+
+
+bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
+                                v8::Handle<v8::Value> value) {
+  ON_BAILOUT("v8::Object::SetHiddenValue()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
+  i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+  i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> obj = i::SetProperty(
+      hidden_props,
+      key_obj,
+      value_obj,
+      static_cast<PropertyAttributes>(None));
+  has_pending_exception = obj.is_null();
+  EXCEPTION_BAILOUT_CHECK(false);
+  return true;
+}
+
+
+v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
+  ON_BAILOUT("v8::Object::GetHiddenValue()", return Local<v8::Value>());
+  ENTER_V8;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
+  if (hidden_props->IsUndefined()) {
+    return v8::Local<v8::Value>();
+  }
+  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
+  has_pending_exception = result.is_null();
+  EXCEPTION_BAILOUT_CHECK(v8::Local<v8::Value>());
+  if (result->IsUndefined()) {
+    return v8::Local<v8::Value>();
+  }
+  return Utils::ToLocal(result);
+}
+
+
+bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
+  ON_BAILOUT("v8::DeleteHiddenValue()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
+  if (hidden_props->IsUndefined()) {
+    return true;
+  }
+  i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props));
+  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+  return i::DeleteProperty(js_obj, key_obj)->IsTrue();
+}
+
+
+void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
+  ON_BAILOUT("v8::SetElementsToPixelData()", return);
+  ENTER_V8;
+  HandleScope scope;
+  if (!ApiCheck(i::Smi::IsValid(length),
+                "v8::Object::SetIndexedPropertiesToPixelData()",
+                "length exceeds max acceptable value")) {
+    return;
+  }
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  if (!ApiCheck(!self->IsJSArray(),
+                "v8::Object::SetIndexedPropertiesToPixelData()",
+                "JSArray is not supported")) {
+    return;
+  }
+  i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
+  self->set_elements(*pixels);
+}
+
+
+Local<v8::Object> Function::NewInstance() const {
+  return NewInstance(0, NULL);
+}
+
+
+Local<v8::Object> Function::NewInstance(int argc,
+                                        v8::Handle<v8::Value> argv[]) const {
+  ON_BAILOUT("v8::Function::NewInstance()", return Local<v8::Object>());
+  LOG_API("Function::NewInstance");
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
+  STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+  i::Object*** args = reinterpret_cast<i::Object***>(argv);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> returned =
+      i::Execution::New(function, argc, args, &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+  return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
+}
+
+
+Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
+                                v8::Handle<v8::Value> argv[]) {
+  ON_BAILOUT("v8::Function::Call()", return Local<v8::Value>());
+  LOG_API("Function::Call");
+  ENTER_V8;
+  i::Object* raw_result = NULL;
+  {
+    HandleScope scope;
+    i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+    i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
+    STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+    i::Object*** args = reinterpret_cast<i::Object***>(argv);
+    EXCEPTION_PREAMBLE();
+    i::Handle<i::Object> returned =
+        i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(Local<Object>());
+    raw_result = *returned;
+  }
+  i::Handle<i::Object> result(raw_result);
+  return Utils::ToLocal(result);
+}
+
+
+void Function::SetName(v8::Handle<v8::String> name) {
+  ENTER_V8;
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  func->shared()->set_name(*Utils::OpenHandle(*name));
+}
+
+
+Handle<Value> Function::GetName() const {
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name()));
+}
+
+
+int String::Length() const {
+  if (IsDeadCheck("v8::String::Length()")) return 0;
+  return Utils::OpenHandle(this)->length();
+}
+
+
+int String::Utf8Length() const {
+  if (IsDeadCheck("v8::String::Utf8Length()")) return 0;
+  return Utils::OpenHandle(this)->Utf8Length();
+}
+
+
+int String::WriteUtf8(char* buffer, int capacity) const {
+  if (IsDeadCheck("v8::String::WriteUtf8()")) return 0;
+  LOG_API("String::WriteUtf8");
+  ENTER_V8;
+  i::Handle<i::String> str = Utils::OpenHandle(this);
+  write_input_buffer.Reset(0, *str);
+  int len = str->length();
+  // Encode the first K - 3 bytes directly into the buffer since we
+  // know there's room for them.  If no capacity is given we copy all
+  // of them here.
+  int fast_end = capacity - (unibrow::Utf8::kMaxEncodedSize - 1);
+  int i;
+  int pos = 0;
+  for (i = 0; i < len && (capacity == -1 || pos < fast_end); i++) {
+    i::uc32 c = write_input_buffer.GetNext();
+    int written = unibrow::Utf8::Encode(buffer + pos, c);
+    pos += written;
+  }
+  if (i < len) {
+    // For the last characters we need to check the length for each one
+    // because they may be longer than the remaining space in the
+    // buffer.
+    char intermediate[unibrow::Utf8::kMaxEncodedSize];
+    for (; i < len && pos < capacity; i++) {
+      i::uc32 c = write_input_buffer.GetNext();
+      int written = unibrow::Utf8::Encode(intermediate, c);
+      if (pos + written <= capacity) {
+        for (int j = 0; j < written; j++)
+          buffer[pos + j] = intermediate[j];
+        pos += written;
+      } else {
+        // We've reached the end of the buffer
+        break;
+      }
+    }
+  }
+  if (i == len && (capacity == -1 || pos < capacity))
+    buffer[pos++] = '\0';
+  return pos;
+}
+
+
+int String::WriteAscii(char* buffer, int start, int length) const {
+  if (IsDeadCheck("v8::String::WriteAscii()")) return 0;
+  LOG_API("String::WriteAscii");
+  ENTER_V8;
+  ASSERT(start >= 0 && length >= -1);
+  i::Handle<i::String> str = Utils::OpenHandle(this);
+  // Flatten the string for efficiency.  This applies whether we are
+  // using StringInputBuffer or Get(i) to access the characters.
+  str->TryFlattenIfNotFlat();
+  int end = length;
+  if ( (length == -1) || (length > str->length() - start) )
+    end = str->length() - start;
+  if (end < 0) return 0;
+  write_input_buffer.Reset(start, *str);
+  int i;
+  for (i = 0; i < end; i++) {
+    char c = static_cast<char>(write_input_buffer.GetNext());
+    if (c == '\0') c = ' ';
+    buffer[i] = c;
+  }
+  if (length == -1 || i < length)
+    buffer[i] = '\0';
+  return i;
+}
+
+
+int String::Write(uint16_t* buffer, int start, int length) const {
+  if (IsDeadCheck("v8::String::Write()")) return 0;
+  LOG_API("String::Write");
+  ENTER_V8;
+  ASSERT(start >= 0 && length >= -1);
+  i::Handle<i::String> str = Utils::OpenHandle(this);
+  int end = length;
+  if ( (length == -1) || (length > str->length() - start) )
+    end = str->length() - start;
+  if (end < 0) return 0;
+  i::String::WriteToFlat(*str, buffer, start, end);
+  if (length == -1 || end < length)
+    buffer[end] = '\0';
+  return end;
+}
+
+
+bool v8::String::IsExternal() const {
+  EnsureInitialized("v8::String::IsExternal()");
+  i::Handle<i::String> str = Utils::OpenHandle(this);
+  return i::StringShape(*str).IsExternalTwoByte();
+}
+
+
+bool v8::String::IsExternalAscii() const {
+  EnsureInitialized("v8::String::IsExternalAscii()");
+  i::Handle<i::String> str = Utils::OpenHandle(this);
+  return i::StringShape(*str).IsExternalAscii();
+}
+
+
+void v8::String::VerifyExternalStringResource(
+    v8::String::ExternalStringResource* value) const {
+  i::Handle<i::String> str = Utils::OpenHandle(this);
+  v8::String::ExternalStringResource* expected;
+  if (i::StringShape(*str).IsExternalTwoByte()) {
+    void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+    expected = reinterpret_cast<ExternalStringResource*>(resource);
+  } else {
+    expected = NULL;
+  }
+  CHECK_EQ(expected, value);
+}
+
+
+v8::String::ExternalAsciiStringResource*
+      v8::String::GetExternalAsciiStringResource() const {
+  EnsureInitialized("v8::String::GetExternalAsciiStringResource()");
+  i::Handle<i::String> str = Utils::OpenHandle(this);
+  if (i::StringShape(*str).IsExternalAscii()) {
+    void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+    return reinterpret_cast<ExternalAsciiStringResource*>(resource);
+  } else {
+    return NULL;
+  }
+}
+
+
+double Number::Value() const {
+  if (IsDeadCheck("v8::Number::Value()")) return 0;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  return obj->Number();
+}
+
+
+bool Boolean::Value() const {
+  if (IsDeadCheck("v8::Boolean::Value()")) return false;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  return obj->IsTrue();
+}
+
+
+int64_t Integer::Value() const {
+  if (IsDeadCheck("v8::Integer::Value()")) return 0;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (obj->IsSmi()) {
+    return i::Smi::cast(*obj)->value();
+  } else {
+    return static_cast<int64_t>(obj->Number());
+  }
+}
+
+
+int32_t Int32::Value() const {
+  if (IsDeadCheck("v8::Int32::Value()")) return 0;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (obj->IsSmi()) {
+    return i::Smi::cast(*obj)->value();
+  } else {
+    return static_cast<int32_t>(obj->Number());
+  }
+}
+
+
+int v8::Object::InternalFieldCount() {
+  if (IsDeadCheck("v8::Object::InternalFieldCount()")) return 0;
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  return obj->GetInternalFieldCount();
+}
+
+
+Local<Value> v8::Object::CheckedGetInternalField(int index) {
+  if (IsDeadCheck("v8::Object::GetInternalField()")) return Local<Value>();
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  if (!ApiCheck(index < obj->GetInternalFieldCount(),
+                "v8::Object::GetInternalField()",
+                "Reading internal field out of bounds")) {
+    return Local<Value>();
+  }
+  i::Handle<i::Object> value(obj->GetInternalField(index));
+  Local<Value> result = Utils::ToLocal(value);
+#ifdef DEBUG
+  Local<Value> unchecked = UncheckedGetInternalField(index);
+  ASSERT(unchecked.IsEmpty() || (unchecked == result));
+#endif
+  return result;
+}
+
+
+void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
+  if (IsDeadCheck("v8::Object::SetInternalField()")) return;
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  if (!ApiCheck(index < obj->GetInternalFieldCount(),
+                "v8::Object::SetInternalField()",
+                "Writing internal field out of bounds")) {
+    return;
+  }
+  ENTER_V8;
+  i::Handle<i::Object> val = Utils::OpenHandle(*value);
+  obj->SetInternalField(index, *val);
+}
+
+
+void v8::Object::SetPointerInInternalField(int index, void* value) {
+  SetInternalField(index, External::Wrap(value));
+}
+
+
+// --- E n v i r o n m e n t ---
+
+bool v8::V8::Initialize() {
+  if (i::V8::IsRunning()) return true;
+  ENTER_V8;
+  HandleScope scope;
+  if (i::Snapshot::Initialize()) {
+    return true;
+  } else {
+    return i::V8::Initialize(NULL);
+  }
+}
+
+
+bool v8::V8::Dispose() {
+  i::V8::TearDown();
+  return true;
+}
+
+
+bool v8::V8::IdleNotification(bool is_high_priority) {
+  // Returning true tells the caller that it need not
+  // continue to call IdleNotification.
+  if (!i::V8::IsRunning()) return true;
+  return i::V8::IdleNotification(is_high_priority);
+}
+
+
+void v8::V8::LowMemoryNotification() {
+#if defined(ANDROID)
+  if (!i::V8::IsRunning()) return;
+  i::Heap::CollectAllGarbage(true);
+#endif
+}
+
+
+const char* v8::V8::GetVersion() {
+  static v8::internal::EmbeddedVector<char, 128> buffer;
+  v8::internal::Version::GetString(buffer);
+  return buffer.start();
+}
+
+
+static i::Handle<i::FunctionTemplateInfo>
+    EnsureConstructor(i::Handle<i::ObjectTemplateInfo> templ) {
+  if (templ->constructor()->IsUndefined()) {
+    Local<FunctionTemplate> constructor = FunctionTemplate::New();
+    Utils::OpenHandle(*constructor)->set_instance_template(*templ);
+    templ->set_constructor(*Utils::OpenHandle(*constructor));
+  }
+  return i::Handle<i::FunctionTemplateInfo>(
+    i::FunctionTemplateInfo::cast(templ->constructor()));
+}
+
+
+Persistent<Context> v8::Context::New(
+    v8::ExtensionConfiguration* extensions,
+    v8::Handle<ObjectTemplate> global_template,
+    v8::Handle<Value> global_object) {
+  EnsureInitialized("v8::Context::New()");
+  LOG_API("Context::New");
+  ON_BAILOUT("v8::Context::New()", return Persistent<Context>());
+
+  // Enter V8 via an ENTER_V8 scope.
+  i::Handle<i::Context> env;
+  {
+    ENTER_V8;
+#if defined(ANDROID)
+    // On mobile device, full GC is expensive, leave it to the system to
+    // decide when should make a full GC.
+#else
+    // Give the heap a chance to cleanup if we've disposed contexts.
+    i::Heap::CollectAllGarbageIfContextDisposed();
+#endif
+    v8::Handle<ObjectTemplate> proxy_template = global_template;
+    i::Handle<i::FunctionTemplateInfo> proxy_constructor;
+    i::Handle<i::FunctionTemplateInfo> global_constructor;
+
+    if (!global_template.IsEmpty()) {
+      // Make sure that the global_template has a constructor.
+      global_constructor =
+          EnsureConstructor(Utils::OpenHandle(*global_template));
+
+      // Create a fresh template for the global proxy object.
+      proxy_template = ObjectTemplate::New();
+      proxy_constructor =
+          EnsureConstructor(Utils::OpenHandle(*proxy_template));
+
+      // Set the global template to be the prototype template of
+      // global proxy template.
+      proxy_constructor->set_prototype_template(
+          *Utils::OpenHandle(*global_template));
+
+      // Migrate security handlers from global_template to
+      // proxy_template.  Temporarily removing access check
+      // information from the global template.
+      if (!global_constructor->access_check_info()->IsUndefined()) {
+        proxy_constructor->set_access_check_info(
+            global_constructor->access_check_info());
+        proxy_constructor->set_needs_access_check(
+            global_constructor->needs_access_check());
+        global_constructor->set_needs_access_check(false);
+        global_constructor->set_access_check_info(i::Heap::undefined_value());
+      }
+    }
+
+    // Create the environment.
+    env = i::Bootstrapper::CreateEnvironment(
+        Utils::OpenHandle(*global_object),
+        proxy_template,
+        extensions);
+
+    // Restore the access check info on the global template.
+    if (!global_template.IsEmpty()) {
+      ASSERT(!global_constructor.is_null());
+      ASSERT(!proxy_constructor.is_null());
+      global_constructor->set_access_check_info(
+          proxy_constructor->access_check_info());
+      global_constructor->set_needs_access_check(
+          proxy_constructor->needs_access_check());
+    }
+  }
+  // Leave V8.
+
+  if (env.is_null())
+    return Persistent<Context>();
+  return Persistent<Context>(Utils::ToLocal(env));
+}
+
+
+void v8::Context::SetSecurityToken(Handle<Value> token) {
+  if (IsDeadCheck("v8::Context::SetSecurityToken()")) return;
+  ENTER_V8;
+  i::Handle<i::Context> env = Utils::OpenHandle(this);
+  i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
+  env->set_security_token(*token_handle);
+}
+
+
+void v8::Context::UseDefaultSecurityToken() {
+  if (IsDeadCheck("v8::Context::UseDefaultSecurityToken()")) return;
+  ENTER_V8;
+  i::Handle<i::Context> env = Utils::OpenHandle(this);
+  env->set_security_token(env->global());
+}
+
+
+Handle<Value> v8::Context::GetSecurityToken() {
+  if (IsDeadCheck("v8::Context::GetSecurityToken()")) return Handle<Value>();
+  i::Handle<i::Context> env = Utils::OpenHandle(this);
+  i::Object* security_token = env->security_token();
+  i::Handle<i::Object> token_handle(security_token);
+  return Utils::ToLocal(token_handle);
+}
+
+
+bool Context::HasOutOfMemoryException() {
+  i::Handle<i::Context> env = Utils::OpenHandle(this);
+  return env->has_out_of_memory();
+}
+
+
+bool Context::InContext() {
+  return i::Top::context() != NULL;
+}
+
+
+v8::Local<v8::Context> Context::GetEntered() {
+  if (IsDeadCheck("v8::Context::GetEntered()")) return Local<Context>();
+  i::Handle<i::Object> last = thread_local.LastEnteredContext();
+  if (last.is_null()) return Local<Context>();
+  i::Handle<i::Context> context = i::Handle<i::Context>::cast(last);
+  return Utils::ToLocal(context);
+}
+
+
+v8::Local<v8::Context> Context::GetCurrent() {
+  if (IsDeadCheck("v8::Context::GetCurrent()")) return Local<Context>();
+  i::Handle<i::Context> context(i::Top::global_context());
+  return Utils::ToLocal(context);
+}
+
+
+v8::Local<v8::Context> Context::GetCalling() {
+  if (IsDeadCheck("v8::Context::GetCalling()")) return Local<Context>();
+  i::Handle<i::Object> calling = i::Top::GetCallingGlobalContext();
+  if (calling.is_null()) return Local<Context>();
+  i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
+  return Utils::ToLocal(context);
+}
+
+
+v8::Local<v8::Object> Context::Global() {
+  if (IsDeadCheck("v8::Context::Global()")) return Local<v8::Object>();
+  i::Object** ctx = reinterpret_cast<i::Object**>(this);
+  i::Handle<i::Context> context =
+      i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+  i::Handle<i::Object> global(context->global_proxy());
+  return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
+}
+
+
+void Context::DetachGlobal() {
+  if (IsDeadCheck("v8::Context::DetachGlobal()")) return;
+  ENTER_V8;
+  i::Object** ctx = reinterpret_cast<i::Object**>(this);
+  i::Handle<i::Context> context =
+      i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+  i::Bootstrapper::DetachGlobal(context);
+}
+
+
+Local<v8::Object> ObjectTemplate::NewInstance() {
+  ON_BAILOUT("v8::ObjectTemplate::NewInstance()", return Local<v8::Object>());
+  LOG_API("ObjectTemplate::NewInstance");
+  ENTER_V8;
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> obj =
+      i::Execution::InstantiateObject(Utils::OpenHandle(this),
+                                      &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+  return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj));
+}
+
+
+Local<v8::Function> FunctionTemplate::GetFunction() {
+  ON_BAILOUT("v8::FunctionTemplate::GetFunction()",
+             return Local<v8::Function>());
+  LOG_API("FunctionTemplate::GetFunction");
+  ENTER_V8;
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> obj =
+      i::Execution::InstantiateFunction(Utils::OpenHandle(this),
+                                        &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(Local<v8::Function>());
+  return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj));
+}
+
+
+bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
+  ON_BAILOUT("v8::FunctionTemplate::HasInstanceOf()", return false);
+  i::Object* obj = *Utils::OpenHandle(*value);
+  return obj->IsInstanceOf(*Utils::OpenHandle(this));
+}
+
+
+static Local<External> ExternalNewImpl(void* data) {
+  return Utils::ToLocal(i::Factory::NewProxy(static_cast<i::Address>(data)));
+}
+
+static void* ExternalValueImpl(i::Handle<i::Object> obj) {
+  return reinterpret_cast<void*>(i::Proxy::cast(*obj)->proxy());
+}
+
+
+static const intptr_t kAlignedPointerMask = 3;
+
+Local<Value> v8::External::Wrap(void* data) {
+  STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
+  LOG_API("External::Wrap");
+  EnsureInitialized("v8::External::Wrap()");
+  ENTER_V8;
+  if ((reinterpret_cast<intptr_t>(data) & kAlignedPointerMask) == 0) {
+    uintptr_t data_ptr = reinterpret_cast<uintptr_t>(data);
+    intptr_t data_value =
+        static_cast<intptr_t>(data_ptr >> i::Internals::kAlignedPointerShift);
+    STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value));
+    if (i::Smi::IsIntptrValid(data_value)) {
+      i::Handle<i::Object> obj(i::Smi::FromIntptr(data_value));
+      return Utils::ToLocal(obj);
+    }
+  }
+  return ExternalNewImpl(data);
+}
+
+
+void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
+  if (IsDeadCheck("v8::External::Unwrap()")) return 0;
+  i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
+  void* result;
+  if (obj->IsSmi()) {
+    // The external value was an aligned pointer.
+    uintptr_t value = static_cast<uintptr_t>(
+        i::Smi::cast(*obj)->value()) << i::Internals::kAlignedPointerShift;
+    result = reinterpret_cast<void*>(value);
+  } else if (obj->IsProxy()) {
+    result = ExternalValueImpl(obj);
+  } else {
+    result = NULL;
+  }
+  ASSERT_EQ(result, QuickUnwrap(wrapper));
+  return result;
+}
+
+
+Local<External> v8::External::New(void* data) {
+  STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
+  LOG_API("External::New");
+  EnsureInitialized("v8::External::New()");
+  ENTER_V8;
+  return ExternalNewImpl(data);
+}
+
+
+void* External::Value() const {
+  if (IsDeadCheck("v8::External::Value()")) return 0;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  return ExternalValueImpl(obj);
+}
+
+
+Local<String> v8::String::Empty() {
+  EnsureInitialized("v8::String::Empty()");
+  LOG_API("String::Empty()");
+  return Utils::ToLocal(i::Factory::empty_symbol());
+}
+
+
+Local<String> v8::String::New(const char* data, int length) {
+  EnsureInitialized("v8::String::New()");
+  LOG_API("String::New(char)");
+  if (length == 0) return Empty();
+  ENTER_V8;
+  if (length == -1) length = strlen(data);
+  i::Handle<i::String> result =
+      i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
+  return Utils::ToLocal(result);
+}
+
+
+Local<String> v8::String::NewUndetectable(const char* data, int length) {
+  EnsureInitialized("v8::String::NewUndetectable()");
+  LOG_API("String::NewUndetectable(char)");
+  ENTER_V8;
+  if (length == -1) length = strlen(data);
+  i::Handle<i::String> result =
+      i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
+  result->MarkAsUndetectable();
+  return Utils::ToLocal(result);
+}
+
+
+static int TwoByteStringLength(const uint16_t* data) {
+  int length = 0;
+  while (data[length] != '\0') length++;
+  return length;
+}
+
+
+Local<String> v8::String::New(const uint16_t* data, int length) {
+  EnsureInitialized("v8::String::New()");
+  LOG_API("String::New(uint16_)");
+  if (length == 0) return Empty();
+  ENTER_V8;
+  if (length == -1) length = TwoByteStringLength(data);
+  i::Handle<i::String> result =
+      i::Factory::NewStringFromTwoByte(i::Vector<const uint16_t>(data, length));
+  return Utils::ToLocal(result);
+}
+
+
+Local<String> v8::String::NewUndetectable(const uint16_t* data, int length) {
+  EnsureInitialized("v8::String::NewUndetectable()");
+  LOG_API("String::NewUndetectable(uint16_)");
+  ENTER_V8;
+  if (length == -1) length = TwoByteStringLength(data);
+  i::Handle<i::String> result =
+      i::Factory::NewStringFromTwoByte(i::Vector<const uint16_t>(data, length));
+  result->MarkAsUndetectable();
+  return Utils::ToLocal(result);
+}
+
+
+i::Handle<i::String> NewExternalStringHandle(
+      v8::String::ExternalStringResource* resource) {
+  i::Handle<i::String> result =
+      i::Factory::NewExternalStringFromTwoByte(resource);
+  return result;
+}
+
+
+i::Handle<i::String> NewExternalAsciiStringHandle(
+      v8::String::ExternalAsciiStringResource* resource) {
+  i::Handle<i::String> result =
+      i::Factory::NewExternalStringFromAscii(resource);
+  return result;
+}
+
+
+static void DisposeExternalString(v8::Persistent<v8::Value> obj,
+                                  void* parameter) {
+  ENTER_V8;
+  i::ExternalTwoByteString* str =
+      i::ExternalTwoByteString::cast(*Utils::OpenHandle(*obj));
+
+  // External symbols are deleted when they are pruned out of the symbol
+  // table. Generally external symbols are not registered with the weak handle
+  // callbacks unless they are upgraded to a symbol after being externalized.
+  if (!str->IsSymbol()) {
+    v8::String::ExternalStringResource* resource =
+        reinterpret_cast<v8::String::ExternalStringResource*>(parameter);
+    if (resource != NULL) {
+      const size_t total_size = resource->length() * sizeof(*resource->data());
+      i::Counters::total_external_string_memory.Decrement(total_size);
+
+      // The object will continue to live in the JavaScript heap until the
+      // handle is entirely cleaned out by the next GC. For example the
+      // destructor for the resource below could bring it back to life again.
+      // Which is why we make sure to not have a dangling pointer here.
+      str->set_resource(NULL);
+      delete resource;
+    }
+  }
+
+  // In any case we do not need this handle any longer.
+  obj.Dispose();
+}
+
+
+static void DisposeExternalAsciiString(v8::Persistent<v8::Value> obj,
+                                       void* parameter) {
+  ENTER_V8;
+  i::ExternalAsciiString* str =
+      i::ExternalAsciiString::cast(*Utils::OpenHandle(*obj));
+
+  // External symbols are deleted when they are pruned out of the symbol
+  // table. Generally external symbols are not registered with the weak handle
+  // callbacks unless they are upgraded to a symbol after being externalized.
+  if (!str->IsSymbol()) {
+    v8::String::ExternalAsciiStringResource* resource =
+        reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter);
+    if (resource != NULL) {
+      const size_t total_size = resource->length() * sizeof(*resource->data());
+      i::Counters::total_external_string_memory.Decrement(total_size);
+
+      // The object will continue to live in the JavaScript heap until the
+      // handle is entirely cleaned out by the next GC. For example the
+      // destructor for the resource below could bring it back to life again.
+      // Which is why we make sure to not have a dangling pointer here.
+      str->set_resource(NULL);
+      delete resource;
+    }
+  }
+
+  // In any case we do not need this handle any longer.
+  obj.Dispose();
+}
+
+
+Local<String> v8::String::NewExternal(
+      v8::String::ExternalStringResource* resource) {
+  EnsureInitialized("v8::String::NewExternal()");
+  LOG_API("String::NewExternal");
+  ENTER_V8;
+  const size_t total_size = resource->length() * sizeof(*resource->data());
+  i::Counters::total_external_string_memory.Increment(total_size);
+  i::Handle<i::String> result = NewExternalStringHandle(resource);
+  i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
+  i::GlobalHandles::MakeWeak(handle.location(),
+                             resource,
+                             &DisposeExternalString);
+  return Utils::ToLocal(result);
+}
+
+
+bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
+  if (IsDeadCheck("v8::String::MakeExternal()")) return false;
+  if (this->IsExternal()) return false;  // Already an external string.
+  ENTER_V8;
+  i::Handle<i::String> obj = Utils::OpenHandle(this);
+  bool result = obj->MakeExternal(resource);
+  if (result && !obj->IsSymbol()) {
+    // Operation was successful and the string is not a symbol. In this case
+    // we need to make sure that the we call the destructor for the external
+    // resource when no strong references to the string remain.
+    i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
+    i::GlobalHandles::MakeWeak(handle.location(),
+                               resource,
+                               &DisposeExternalString);
+  }
+  return result;
+}
+
+
+Local<String> v8::String::NewExternal(
+      v8::String::ExternalAsciiStringResource* resource) {
+  EnsureInitialized("v8::String::NewExternal()");
+  LOG_API("String::NewExternal");
+  ENTER_V8;
+  const size_t total_size = resource->length() * sizeof(*resource->data());
+  i::Counters::total_external_string_memory.Increment(total_size);
+  i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
+  i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
+  i::GlobalHandles::MakeWeak(handle.location(),
+                             resource,
+                             &DisposeExternalAsciiString);
+  return Utils::ToLocal(result);
+}
+
+
+bool v8::String::MakeExternal(
+    v8::String::ExternalAsciiStringResource* resource) {
+  if (IsDeadCheck("v8::String::MakeExternal()")) return false;
+  if (this->IsExternal()) return false;  // Already an external string.
+  ENTER_V8;
+  i::Handle<i::String> obj = Utils::OpenHandle(this);
+  bool result = obj->MakeExternal(resource);
+  if (result && !obj->IsSymbol()) {
+    // Operation was successful and the string is not a symbol. In this case
+    // we need to make sure that the we call the destructor for the external
+    // resource when no strong references to the string remain.
+    i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
+    i::GlobalHandles::MakeWeak(handle.location(),
+                               resource,
+                               &DisposeExternalAsciiString);
+  }
+  return result;
+}
+
+
+bool v8::String::CanMakeExternal() {
+  if (IsDeadCheck("v8::String::CanMakeExternal()")) return false;
+  i::Handle<i::String> obj = Utils::OpenHandle(this);
+  int size = obj->Size();  // Byte size of the original string.
+  if (size < i::ExternalString::kSize)
+    return false;
+  i::StringShape shape(*obj);
+  return !shape.IsExternal();
+}
+
+
+Local<v8::Object> v8::Object::New() {
+  EnsureInitialized("v8::Object::New()");
+  LOG_API("Object::New");
+  ENTER_V8;
+  i::Handle<i::JSObject> obj =
+      i::Factory::NewJSObject(i::Top::object_function());
+  return Utils::ToLocal(obj);
+}
+
+
+Local<v8::Value> v8::Date::New(double time) {
+  EnsureInitialized("v8::Date::New()");
+  LOG_API("Date::New");
+  ENTER_V8;
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> obj =
+      i::Execution::NewDate(time, &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(Local<v8::Value>());
+  return Utils::ToLocal(obj);
+}
+
+
+double v8::Date::NumberValue() const {
+  if (IsDeadCheck("v8::Date::NumberValue()")) return 0;
+  LOG_API("Date::NumberValue");
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+  return jsvalue->value()->Number();
+}
+
+
+Local<v8::Array> v8::Array::New(int length) {
+  EnsureInitialized("v8::Array::New()");
+  LOG_API("Array::New");
+  ENTER_V8;
+  i::Handle<i::JSArray> obj = i::Factory::NewJSArray(length);
+  return Utils::ToLocal(obj);
+}
+
+
+uint32_t v8::Array::Length() const {
+  if (IsDeadCheck("v8::Array::Length()")) return 0;
+  i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
+  i::Object* length = obj->length();
+  if (length->IsSmi()) {
+    return i::Smi::cast(length)->value();
+  } else {
+    return static_cast<uint32_t>(length->Number());
+  }
+}
+
+
+Local<Object> Array::CloneElementAt(uint32_t index) {
+  ON_BAILOUT("v8::Array::CloneElementAt()", return Local<Object>());
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  if (!self->HasFastElements()) {
+    return Local<Object>();
+  }
+  i::FixedArray* elms = i::FixedArray::cast(self->elements());
+  i::Object* paragon = elms->get(index);
+  if (!paragon->IsJSObject()) {
+    return Local<Object>();
+  }
+  i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::JSObject> result = i::Copy(paragon_handle);
+  has_pending_exception = result.is_null();
+  EXCEPTION_BAILOUT_CHECK(Local<Object>());
+  return Utils::ToLocal(result);
+}
+
+
+Local<String> v8::String::NewSymbol(const char* data, int length) {
+  EnsureInitialized("v8::String::NewSymbol()");
+  LOG_API("String::NewSymbol(char)");
+  ENTER_V8;
+  if (length == -1) length = strlen(data);
+  i::Handle<i::String> result =
+      i::Factory::LookupSymbol(i::Vector<const char>(data, length));
+  return Utils::ToLocal(result);
+}
+
+
+Local<Number> v8::Number::New(double value) {
+  EnsureInitialized("v8::Number::New()");
+  ENTER_V8;
+  i::Handle<i::Object> result = i::Factory::NewNumber(value);
+  return Utils::NumberToLocal(result);
+}
+
+
+Local<Integer> v8::Integer::New(int32_t value) {
+  EnsureInitialized("v8::Integer::New()");
+  if (i::Smi::IsValid(value)) {
+    return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value)));
+  }
+  ENTER_V8;
+  i::Handle<i::Object> result = i::Factory::NewNumber(value);
+  return Utils::IntegerToLocal(result);
+}
+
+
+void V8::IgnoreOutOfMemoryException() {
+  thread_local.set_ignore_out_of_memory(true);
+}
+
+
+bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
+  EnsureInitialized("v8::V8::AddMessageListener()");
+  ON_BAILOUT("v8::V8::AddMessageListener()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  NeanderArray listeners(i::Factory::message_listeners());
+  NeanderObject obj(2);
+  obj.set(0, *i::Factory::NewProxy(FUNCTION_ADDR(that)));
+  obj.set(1, data.IsEmpty() ?
+             i::Heap::undefined_value() :
+             *Utils::OpenHandle(*data));
+  listeners.add(obj.value());
+  return true;
+}
+
+
+void V8::RemoveMessageListeners(MessageCallback that) {
+  EnsureInitialized("v8::V8::RemoveMessageListener()");
+  ON_BAILOUT("v8::V8::RemoveMessageListeners()", return);
+  ENTER_V8;
+  HandleScope scope;
+  NeanderArray listeners(i::Factory::message_listeners());
+  for (int i = 0; i < listeners.length(); i++) {
+    if (listeners.get(i)->IsUndefined()) continue;  // skip deleted ones
+
+    NeanderObject listener(i::JSObject::cast(listeners.get(i)));
+    i::Handle<i::Proxy> callback_obj(i::Proxy::cast(listener.get(0)));
+    if (callback_obj->proxy() == FUNCTION_ADDR(that)) {
+      listeners.set(i, i::Heap::undefined_value());
+    }
+  }
+}
+
+
+void V8::SetCounterFunction(CounterLookupCallback callback) {
+  if (IsDeadCheck("v8::V8::SetCounterFunction()")) return;
+  i::StatsTable::SetCounterFunction(callback);
+}
+
+void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
+  if (IsDeadCheck("v8::V8::SetCreateHistogramFunction()")) return;
+  i::StatsTable::SetCreateHistogramFunction(callback);
+}
+
+void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
+  if (IsDeadCheck("v8::V8::SetAddHistogramSampleFunction()")) return;
+  i::StatsTable::SetAddHistogramSampleFunction(callback);
+}
+
+void V8::EnableSlidingStateWindow() {
+  if (IsDeadCheck("v8::V8::EnableSlidingStateWindow()")) return;
+  i::Logger::EnableSlidingStateWindow();
+}
+
+
+void V8::SetFailedAccessCheckCallbackFunction(
+      FailedAccessCheckCallback callback) {
+  if (IsDeadCheck("v8::V8::SetFailedAccessCheckCallbackFunction()")) return;
+  i::Top::SetFailedAccessCheckCallback(callback);
+}
+
+
+void V8::AddObjectGroup(Persistent<Value>* objects, size_t length) {
+  if (IsDeadCheck("v8::V8::AddObjectGroup()")) return;
+  STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
+  i::GlobalHandles::AddGroup(reinterpret_cast<i::Object***>(objects), length);
+}
+
+
+int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
+  if (IsDeadCheck("v8::V8::AdjustAmountOfExternalAllocatedMemory()")) return 0;
+  return i::Heap::AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
+}
+
+
+void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
+  if (IsDeadCheck("v8::V8::SetGlobalGCPrologueCallback()")) return;
+  i::Heap::SetGlobalGCPrologueCallback(callback);
+}
+
+
+void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
+  if (IsDeadCheck("v8::V8::SetGlobalGCEpilogueCallback()")) return;
+  i::Heap::SetGlobalGCEpilogueCallback(callback);
+}
+
+
+void V8::PauseProfiler() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  i::Logger::PauseProfiler(PROFILER_MODULE_CPU);
+#endif
+}
+
+
+void V8::ResumeProfiler() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  i::Logger::ResumeProfiler(PROFILER_MODULE_CPU);
+#endif
+}
+
+
+bool V8::IsProfilerPaused() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  return i::Logger::GetActiveProfilerModules() & PROFILER_MODULE_CPU;
+#else
+  return true;
+#endif
+}
+
+
+void V8::ResumeProfilerEx(int flags) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) {
+    // Snapshot mode: resume modules, perform GC, then pause only
+    // those modules which haven't been started prior to making a
+    // snapshot.
+
+    // Reset snapshot flag and CPU module flags.
+    flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
+    const int current_flags = i::Logger::GetActiveProfilerModules();
+    i::Logger::ResumeProfiler(flags);
+    i::Heap::CollectAllGarbage(false);
+    i::Logger::PauseProfiler(~current_flags & flags);
+  } else {
+    i::Logger::ResumeProfiler(flags);
+  }
+#endif
+}
+
+
+void V8::PauseProfilerEx(int flags) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  i::Logger::PauseProfiler(flags);
+#endif
+}
+
+
+int V8::GetActiveProfilerModules() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  return i::Logger::GetActiveProfilerModules();
+#else
+  return PROFILER_MODULE_NONE;
+#endif
+}
+
+
+int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
+#endif
+  return 0;
+}
+
+
+int V8::GetCurrentThreadId() {
+  API_ENTRY_CHECK("V8::GetCurrentThreadId()");
+  EnsureInitialized("V8::GetCurrentThreadId()");
+  return i::Top::thread_id();
+}
+
+
+void V8::TerminateExecution(int thread_id) {
+  if (!i::V8::IsRunning()) return;
+  API_ENTRY_CHECK("V8::GetCurrentThreadId()");
+  // If the thread_id identifies the current thread just terminate
+  // execution right away.  Otherwise, ask the thread manager to
+  // terminate the thread with the given id if any.
+  if (thread_id == i::Top::thread_id()) {
+    i::StackGuard::TerminateExecution();
+  } else {
+    i::ThreadManager::TerminateExecution(thread_id);
+  }
+}
+
+
+void V8::TerminateExecution() {
+  if (!i::V8::IsRunning()) return;
+  i::StackGuard::TerminateExecution();
+}
+
+
+String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
+  EnsureInitialized("v8::String::Utf8Value::Utf8Value()");
+  if (obj.IsEmpty()) {
+    str_ = NULL;
+    length_ = 0;
+    return;
+  }
+  ENTER_V8;
+  HandleScope scope;
+  TryCatch try_catch;
+  Handle<String> str = obj->ToString();
+  if (str.IsEmpty()) {
+    str_ = NULL;
+    length_ = 0;
+  } else {
+    length_ = str->Utf8Length();
+    str_ = i::NewArray<char>(length_ + 1);
+    str->WriteUtf8(str_);
+  }
+}
+
+
+String::Utf8Value::~Utf8Value() {
+  i::DeleteArray(str_);
+}
+
+
+String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj) {
+  EnsureInitialized("v8::String::AsciiValue::AsciiValue()");
+  if (obj.IsEmpty()) {
+    str_ = NULL;
+    length_ = 0;
+    return;
+  }
+  ENTER_V8;
+  HandleScope scope;
+  TryCatch try_catch;
+  Handle<String> str = obj->ToString();
+  if (str.IsEmpty()) {
+    str_ = NULL;
+    length_ = 0;
+  } else {
+    length_ = str->Length();
+    str_ = i::NewArray<char>(length_ + 1);
+    str->WriteAscii(str_);
+  }
+}
+
+
+String::AsciiValue::~AsciiValue() {
+  i::DeleteArray(str_);
+}
+
+
+String::Value::Value(v8::Handle<v8::Value> obj) {
+  EnsureInitialized("v8::String::Value::Value()");
+  if (obj.IsEmpty()) {
+    str_ = NULL;
+    length_ = 0;
+    return;
+  }
+  ENTER_V8;
+  HandleScope scope;
+  TryCatch try_catch;
+  Handle<String> str = obj->ToString();
+  if (str.IsEmpty()) {
+    str_ = NULL;
+    length_ = 0;
+  } else {
+    length_ = str->Length();
+    str_ = i::NewArray<uint16_t>(length_ + 1);
+    str->Write(str_);
+  }
+}
+
+
+String::Value::~Value() {
+  i::DeleteArray(str_);
+}
+
+Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
+  LOG_API("RangeError");
+  ON_BAILOUT("v8::Exception::RangeError()", return Local<Value>());
+  ENTER_V8;
+  i::Object* error;
+  {
+    HandleScope scope;
+    i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+    i::Handle<i::Object> result = i::Factory::NewRangeError(message);
+    error = *result;
+  }
+  i::Handle<i::Object> result(error);
+  return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
+  LOG_API("ReferenceError");
+  ON_BAILOUT("v8::Exception::ReferenceError()", return Local<Value>());
+  ENTER_V8;
+  i::Object* error;
+  {
+    HandleScope scope;
+    i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+    i::Handle<i::Object> result = i::Factory::NewReferenceError(message);
+    error = *result;
+  }
+  i::Handle<i::Object> result(error);
+  return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
+  LOG_API("SyntaxError");
+  ON_BAILOUT("v8::Exception::SyntaxError()", return Local<Value>());
+  ENTER_V8;
+  i::Object* error;
+  {
+    HandleScope scope;
+    i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+    i::Handle<i::Object> result = i::Factory::NewSyntaxError(message);
+    error = *result;
+  }
+  i::Handle<i::Object> result(error);
+  return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
+  LOG_API("TypeError");
+  ON_BAILOUT("v8::Exception::TypeError()", return Local<Value>());
+  ENTER_V8;
+  i::Object* error;
+  {
+    HandleScope scope;
+    i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+    i::Handle<i::Object> result = i::Factory::NewTypeError(message);
+    error = *result;
+  }
+  i::Handle<i::Object> result(error);
+  return Utils::ToLocal(result);
+}
+
+Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
+  LOG_API("Error");
+  ON_BAILOUT("v8::Exception::Error()", return Local<Value>());
+  ENTER_V8;
+  i::Object* error;
+  {
+    HandleScope scope;
+    i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
+    i::Handle<i::Object> result = i::Factory::NewError(message);
+    error = *result;
+  }
+  i::Handle<i::Object> result(error);
+  return Utils::ToLocal(result);
+}
+
+
+// --- D e b u g   S u p p o r t ---
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
+  EnsureInitialized("v8::Debug::SetDebugEventListener()");
+  ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::Object> proxy = i::Factory::undefined_value();
+  if (that != NULL) {
+    proxy = i::Factory::NewProxy(FUNCTION_ADDR(that));
+  }
+  i::Debugger::SetEventListener(proxy, Utils::OpenHandle(*data));
+  return true;
+}
+
+
+bool Debug::SetDebugEventListener(v8::Handle<v8::Object> that,
+                                  Handle<Value> data) {
+  ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
+  ENTER_V8;
+  i::Debugger::SetEventListener(Utils::OpenHandle(*that),
+                                Utils::OpenHandle(*data));
+  return true;
+}
+
+
+void Debug::DebugBreak() {
+  if (!i::V8::IsRunning()) return;
+  i::StackGuard::DebugBreak();
+}
+
+
+static v8::Debug::MessageHandler message_handler = NULL;
+
+static void MessageHandlerWrapper(const v8::Debug::Message& message) {
+  if (message_handler) {
+    v8::String::Value json(message.GetJSON());
+    message_handler(*json, json.length(), message.GetClientData());
+  }
+}
+
+
+void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
+                              bool message_handler_thread) {
+  EnsureInitialized("v8::Debug::SetMessageHandler");
+  ENTER_V8;
+  // Message handler thread not supported any more. Parameter temporally left in
+  // the API for client compatability reasons.
+  CHECK(!message_handler_thread);
+
+  // TODO(sgjesse) support the old message handler API through a simple wrapper.
+  message_handler = handler;
+  if (message_handler != NULL) {
+    i::Debugger::SetMessageHandler(MessageHandlerWrapper);
+  } else {
+    i::Debugger::SetMessageHandler(NULL);
+  }
+}
+
+
+void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
+  EnsureInitialized("v8::Debug::SetMessageHandler");
+  ENTER_V8;
+  HandleScope scope;
+  i::Debugger::SetMessageHandler(handler);
+}
+
+
+void Debug::SendCommand(const uint16_t* command, int length,
+                        ClientData* client_data) {
+  if (!i::V8::IsRunning()) return;
+  i::Debugger::ProcessCommand(i::Vector<const uint16_t>(command, length),
+                              client_data);
+}
+
+
+void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
+                                   int period) {
+  EnsureInitialized("v8::Debug::SetHostDispatchHandler");
+  ENTER_V8;
+  i::Debugger::SetHostDispatchHandler(handler, period);
+}
+
+
+Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
+                         v8::Handle<v8::Value> data) {
+  if (!i::V8::IsRunning()) return Local<Value>();
+  ON_BAILOUT("v8::Debug::Call()", return Local<Value>());
+  ENTER_V8;
+  i::Handle<i::Object> result;
+  EXCEPTION_PREAMBLE();
+  if (data.IsEmpty()) {
+    result = i::Debugger::Call(Utils::OpenHandle(*fun),
+                               i::Factory::undefined_value(),
+                               &has_pending_exception);
+  } else {
+    result = i::Debugger::Call(Utils::OpenHandle(*fun),
+                               Utils::OpenHandle(*data),
+                               &has_pending_exception);
+  }
+  EXCEPTION_BAILOUT_CHECK(Local<Value>());
+  return Utils::ToLocal(result);
+}
+
+
+Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
+  if (!i::V8::IsRunning()) return Local<Value>();
+  ON_BAILOUT("v8::Debug::GetMirror()", return Local<Value>());
+  ENTER_V8;
+  v8::HandleScope scope;
+  i::Debug::Load();
+  i::Handle<i::JSObject> debug(i::Debug::debug_context()->global());
+  i::Handle<i::String> name = i::Factory::LookupAsciiSymbol("MakeMirror");
+  i::Handle<i::Object> fun_obj = i::GetProperty(debug, name);
+  i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
+  v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
+  const int kArgc = 1;
+  v8::Handle<v8::Value> argv[kArgc] = { obj };
+  EXCEPTION_PREAMBLE();
+  v8::Handle<v8::Value> result = v8_fun->Call(Utils::ToLocal(debug),
+                                              kArgc,
+                                              argv);
+  EXCEPTION_BAILOUT_CHECK(Local<Value>());
+  return scope.Close(result);
+}
+
+
+bool Debug::EnableAgent(const char* name, int port) {
+  return i::Debugger::StartAgent(name, port);
+}
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+namespace internal {
+
+
+HandleScopeImplementer* HandleScopeImplementer::instance() {
+  return &thread_local;
+}
+
+
+void HandleScopeImplementer::FreeThreadResources() {
+  thread_local.Free();
+}
+
+
+char* HandleScopeImplementer::ArchiveThread(char* storage) {
+  return thread_local.ArchiveThreadHelper(storage);
+}
+
+
+char* HandleScopeImplementer::ArchiveThreadHelper(char* storage) {
+  v8::ImplementationUtilities::HandleScopeData* current =
+      v8::ImplementationUtilities::CurrentHandleScope();
+  handle_scope_data_ = *current;
+  memcpy(storage, this, sizeof(*this));
+
+  ResetAfterArchive();
+  current->Initialize();
+
+  return storage + ArchiveSpacePerThread();
+}
+
+
+int HandleScopeImplementer::ArchiveSpacePerThread() {
+  return sizeof(thread_local);
+}
+
+
+char* HandleScopeImplementer::RestoreThread(char* storage) {
+  return thread_local.RestoreThreadHelper(storage);
+}
+
+
+char* HandleScopeImplementer::RestoreThreadHelper(char* storage) {
+  memcpy(this, storage, sizeof(*this));
+  *v8::ImplementationUtilities::CurrentHandleScope() = handle_scope_data_;
+  return storage + ArchiveSpacePerThread();
+}
+
+
+void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
+  // Iterate over all handles in the blocks except for the last.
+  for (int i = blocks()->length() - 2; i >= 0; --i) {
+    Object** block = blocks()->at(i);
+    v->VisitPointers(block, &block[kHandleBlockSize]);
+  }
+
+  // Iterate over live handles in the last block (if any).
+  if (!blocks()->is_empty()) {
+    v->VisitPointers(blocks()->last(), handle_scope_data_.next);
+  }
+
+  if (!saved_contexts_.is_empty()) {
+    Object** start = reinterpret_cast<Object**>(&saved_contexts_.first());
+    v->VisitPointers(start, start + saved_contexts_.length());
+  }
+}
+
+
+void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
+  v8::ImplementationUtilities::HandleScopeData* current =
+      v8::ImplementationUtilities::CurrentHandleScope();
+  thread_local.handle_scope_data_ = *current;
+  thread_local.IterateThis(v);
+}
+
+
+char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
+  HandleScopeImplementer* thread_local =
+      reinterpret_cast<HandleScopeImplementer*>(storage);
+  thread_local->IterateThis(v);
+  return storage + ArchiveSpacePerThread();
+}
+
+} }  // namespace v8::internal
diff --git a/src/api.h b/src/api.h
new file mode 100644
index 0000000..1221f35
--- /dev/null
+++ b/src/api.h
@@ -0,0 +1,470 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_API_H_
+#define V8_API_H_
+
+#include "apiutils.h"
+#include "factory.h"
+
+namespace v8 {
+
+// Constants used in the implementation of the API.  The most natural thing
+// would usually be to place these with the classes that use them, but
+// we want to keep them out of v8.h because it is an externally
+// visible file.
+class Consts {
+ public:
+  enum TemplateType {
+    FUNCTION_TEMPLATE = 0,
+    OBJECT_TEMPLATE = 1
+  };
+};
+
+
+// Utilities for working with neander-objects, primitive
+// env-independent JSObjects used by the api.
+class NeanderObject {
+ public:
+  explicit NeanderObject(int size);
+  inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
+  inline NeanderObject(v8::internal::Object* obj);
+  inline v8::internal::Object* get(int index);
+  inline void set(int index, v8::internal::Object* value);
+  inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
+  int size();
+ private:
+  v8::internal::Handle<v8::internal::JSObject> value_;
+};
+
+
+// Utilities for working with neander-arrays, a simple extensible
+// array abstraction built on neander-objects.
+class NeanderArray {
+ public:
+  NeanderArray();
+  inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
+  inline v8::internal::Handle<v8::internal::JSObject> value() {
+    return obj_.value();
+  }
+
+  void add(v8::internal::Handle<v8::internal::Object> value);
+
+  int length();
+
+  v8::internal::Object* get(int index);
+  // Change the value at an index to undefined value. If the index is
+  // out of bounds, the request is ignored. Returns the old value.
+  void set(int index, v8::internal::Object* value);
+ private:
+  NeanderObject obj_;
+};
+
+
+NeanderObject::NeanderObject(v8::internal::Handle<v8::internal::Object> obj)
+    : value_(v8::internal::Handle<v8::internal::JSObject>::cast(obj)) { }
+
+
+NeanderObject::NeanderObject(v8::internal::Object* obj)
+    : value_(v8::internal::Handle<v8::internal::JSObject>(
+        v8::internal::JSObject::cast(obj))) { }
+
+
+NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
+    : obj_(obj) { }
+
+
+v8::internal::Object* NeanderObject::get(int offset) {
+  ASSERT(value()->HasFastElements());
+  return v8::internal::FixedArray::cast(value()->elements())->get(offset);
+}
+
+
+void NeanderObject::set(int offset, v8::internal::Object* value) {
+  ASSERT(value_->HasFastElements());
+  v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
+}
+
+
+template <typename T> static inline T ToCData(v8::internal::Object* obj) {
+  STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
+  return reinterpret_cast<T>(
+      reinterpret_cast<intptr_t>(v8::internal::Proxy::cast(obj)->proxy()));
+}
+
+
+template <typename T>
+static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
+  STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
+  return v8::internal::Factory::NewProxy(
+      reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
+}
+
+
+v8::Arguments::Arguments(v8::Local<v8::Value> data,
+                         v8::Local<v8::Object> holder,
+                         v8::Local<v8::Function> callee,
+                         bool is_construct_call,
+                         void** values, int length)
+    : data_(data), holder_(holder), callee_(callee),
+      is_construct_call_(is_construct_call),
+      values_(values), length_(length) { }
+
+
+enum ExtensionTraversalState {
+  UNVISITED, VISITED, INSTALLED
+};
+
+
+class RegisteredExtension {
+ public:
+  explicit RegisteredExtension(Extension* extension);
+  static void Register(RegisteredExtension* that);
+  Extension* extension() { return extension_; }
+  RegisteredExtension* next() { return next_; }
+  RegisteredExtension* next_auto() { return next_auto_; }
+  ExtensionTraversalState state() { return state_; }
+  void set_state(ExtensionTraversalState value) { state_ = value; }
+  static RegisteredExtension* first_extension() { return first_extension_; }
+ private:
+  Extension* extension_;
+  RegisteredExtension* next_;
+  RegisteredExtension* next_auto_;
+  ExtensionTraversalState state_;
+  static RegisteredExtension* first_extension_;
+  static RegisteredExtension* first_auto_extension_;
+};
+
+
+class Utils {
+ public:
+  static bool ReportApiFailure(const char* location, const char* message);
+
+  static Local<FunctionTemplate> ToFunctionTemplate(NeanderObject obj);
+  static Local<ObjectTemplate> ToObjectTemplate(NeanderObject obj);
+
+  static inline Local<Context> ToLocal(
+      v8::internal::Handle<v8::internal::Context> obj);
+  static inline Local<Value> ToLocal(
+      v8::internal::Handle<v8::internal::Object> obj);
+  static inline Local<Function> ToLocal(
+      v8::internal::Handle<v8::internal::JSFunction> obj);
+  static inline Local<String> ToLocal(
+      v8::internal::Handle<v8::internal::String> obj);
+  static inline Local<Object> ToLocal(
+      v8::internal::Handle<v8::internal::JSObject> obj);
+  static inline Local<Array> ToLocal(
+      v8::internal::Handle<v8::internal::JSArray> obj);
+  static inline Local<External> ToLocal(
+      v8::internal::Handle<v8::internal::Proxy> obj);
+  static inline Local<Message> MessageToLocal(
+      v8::internal::Handle<v8::internal::Object> obj);
+  static inline Local<Number> NumberToLocal(
+      v8::internal::Handle<v8::internal::Object> obj);
+  static inline Local<Integer> IntegerToLocal(
+      v8::internal::Handle<v8::internal::Object> obj);
+  static inline Local<Uint32> Uint32ToLocal(
+      v8::internal::Handle<v8::internal::Object> obj);
+  static inline Local<FunctionTemplate> ToLocal(
+      v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
+  static inline Local<ObjectTemplate> ToLocal(
+      v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
+  static inline Local<Signature> ToLocal(
+      v8::internal::Handle<v8::internal::SignatureInfo> obj);
+  static inline Local<TypeSwitch> ToLocal(
+      v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
+
+  static inline v8::internal::Handle<v8::internal::TemplateInfo>
+      OpenHandle(const Template* that);
+  static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
+      OpenHandle(const FunctionTemplate* that);
+  static inline v8::internal::Handle<v8::internal::ObjectTemplateInfo>
+      OpenHandle(const ObjectTemplate* that);
+  static inline v8::internal::Handle<v8::internal::Object>
+      OpenHandle(const Data* data);
+  static inline v8::internal::Handle<v8::internal::JSObject>
+      OpenHandle(const v8::Object* data);
+  static inline v8::internal::Handle<v8::internal::JSArray>
+      OpenHandle(const v8::Array* data);
+  static inline v8::internal::Handle<v8::internal::String>
+      OpenHandle(const String* data);
+  static inline v8::internal::Handle<v8::internal::JSFunction>
+      OpenHandle(const Script* data);
+  static inline v8::internal::Handle<v8::internal::JSFunction>
+      OpenHandle(const Function* data);
+  static inline v8::internal::Handle<v8::internal::JSObject>
+      OpenHandle(const Message* message);
+  static inline v8::internal::Handle<v8::internal::Context>
+      OpenHandle(const v8::Context* context);
+  static inline v8::internal::Handle<v8::internal::SignatureInfo>
+      OpenHandle(const v8::Signature* sig);
+  static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
+      OpenHandle(const v8::TypeSwitch* that);
+  static inline v8::internal::Handle<v8::internal::Proxy>
+      OpenHandle(const v8::External* that);
+};
+
+
+template <class T>
+static inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
+  return reinterpret_cast<T*>(obj.location());
+}
+
+
+template <class T>
+v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
+    v8::HandleScope* scope) {
+  return Utils::OpenHandle(*scope->Close(Utils::ToLocal(*this)));
+}
+
+
+// Implementations of ToLocal
+
+#define MAKE_TO_LOCAL(Name, From, To)                                       \
+  Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
+    ASSERT(!obj->IsTheHole());                                              \
+    return Local<To>(reinterpret_cast<To*>(obj.location()));                \
+  }
+
+MAKE_TO_LOCAL(ToLocal, Context, Context)
+MAKE_TO_LOCAL(ToLocal, Object, Value)
+MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
+MAKE_TO_LOCAL(ToLocal, String, String)
+MAKE_TO_LOCAL(ToLocal, JSObject, Object)
+MAKE_TO_LOCAL(ToLocal, JSArray, Array)
+MAKE_TO_LOCAL(ToLocal, Proxy, External)
+MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
+MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
+MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
+MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
+MAKE_TO_LOCAL(MessageToLocal, Object, Message)
+MAKE_TO_LOCAL(NumberToLocal, Object, Number)
+MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
+MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
+
+#undef MAKE_TO_LOCAL
+
+
+// Implementations of OpenHandle
+
+#define MAKE_OPEN_HANDLE(From, To) \
+  v8::internal::Handle<v8::internal::To> Utils::OpenHandle(\
+    const v8::From* that) { \
+    return v8::internal::Handle<v8::internal::To>( \
+        reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
+  }
+
+MAKE_OPEN_HANDLE(Template, TemplateInfo)
+MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo)
+MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo)
+MAKE_OPEN_HANDLE(Signature, SignatureInfo)
+MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo)
+MAKE_OPEN_HANDLE(Data, Object)
+MAKE_OPEN_HANDLE(Object, JSObject)
+MAKE_OPEN_HANDLE(Array, JSArray)
+MAKE_OPEN_HANDLE(String, String)
+MAKE_OPEN_HANDLE(Script, JSFunction)
+MAKE_OPEN_HANDLE(Function, JSFunction)
+MAKE_OPEN_HANDLE(Message, JSObject)
+MAKE_OPEN_HANDLE(Context, Context)
+MAKE_OPEN_HANDLE(External, Proxy)
+
+#undef MAKE_OPEN_HANDLE
+
+
+namespace internal {
+
+// This class is here in order to be able to declare it a friend of
+// HandleScope.  Moving these methods to be members of HandleScope would be
+// neat in some ways, but it would expose external implementation details in
+// our public header file, which is undesirable.
+//
+// There is a singleton instance of this class to hold the per-thread data.
+// For multithreaded V8 programs this data is copied in and out of storage
+// so that the currently executing thread always has its own copy of this
+// data.
+class HandleScopeImplementer {
+ public:
+
+  HandleScopeImplementer()
+      : blocks_(0),
+        entered_contexts_(0),
+        saved_contexts_(0),
+        spare_(NULL),
+        ignore_out_of_memory_(false),
+        call_depth_(0) { }
+
+  static HandleScopeImplementer* instance();
+
+  // Threading support for handle data.
+  static int ArchiveSpacePerThread();
+  static char* RestoreThread(char* from);
+  static char* ArchiveThread(char* to);
+  static void FreeThreadResources();
+
+  // Garbage collection support.
+  static void Iterate(v8::internal::ObjectVisitor* v);
+  static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
+
+
+  inline internal::Object** GetSpareOrNewBlock();
+  inline void DeleteExtensions(int extensions);
+
+  inline void IncrementCallDepth() {call_depth_++;}
+  inline void DecrementCallDepth() {call_depth_--;}
+  inline bool CallDepthIsZero() { return call_depth_ == 0; }
+
+  inline void EnterContext(Handle<Object> context);
+  inline bool LeaveLastContext();
+
+  // Returns the last entered context or an empty handle if no
+  // contexts have been entered.
+  inline Handle<Object> LastEnteredContext();
+
+  inline void SaveContext(Context* context);
+  inline Context* RestoreContext();
+  inline bool HasSavedContexts();
+
+  inline List<internal::Object**>* blocks() { return &blocks_; }
+  inline bool ignore_out_of_memory() { return ignore_out_of_memory_; }
+  inline void set_ignore_out_of_memory(bool value) {
+    ignore_out_of_memory_ = value;
+  }
+
+ private:
+  void ResetAfterArchive() {
+    blocks_.Initialize(0);
+    entered_contexts_.Initialize(0);
+    saved_contexts_.Initialize(0);
+    spare_ = NULL;
+    ignore_out_of_memory_ = false;
+    call_depth_ = 0;
+  }
+
+  void Free() {
+    ASSERT(blocks_.length() == 0);
+    ASSERT(entered_contexts_.length() == 0);
+    ASSERT(saved_contexts_.length() == 0);
+    blocks_.Free();
+    entered_contexts_.Free();
+    saved_contexts_.Free();
+    if (spare_ != NULL) {
+      DeleteArray(spare_);
+      spare_ = NULL;
+    }
+    ASSERT(call_depth_ == 0);
+  }
+
+  List<internal::Object**> blocks_;
+  // Used as a stack to keep track of entered contexts.
+  List<Handle<Object> > entered_contexts_;
+  // Used as a stack to keep track of saved contexts.
+  List<Context*> saved_contexts_;
+  Object** spare_;
+  bool ignore_out_of_memory_;
+  int call_depth_;
+  // This is only used for threading support.
+  v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
+
+  void IterateThis(ObjectVisitor* v);
+  char* RestoreThreadHelper(char* from);
+  char* ArchiveThreadHelper(char* to);
+
+  DISALLOW_COPY_AND_ASSIGN(HandleScopeImplementer);
+};
+
+
+static const int kHandleBlockSize = v8::internal::KB - 2;  // fit in one page
+
+
+void HandleScopeImplementer::SaveContext(Context* context) {
+  saved_contexts_.Add(context);
+}
+
+
+Context* HandleScopeImplementer::RestoreContext() {
+  return saved_contexts_.RemoveLast();
+}
+
+
+bool HandleScopeImplementer::HasSavedContexts() {
+  return !saved_contexts_.is_empty();
+}
+
+
+void HandleScopeImplementer::EnterContext(Handle<Object> context) {
+  entered_contexts_.Add(context);
+}
+
+
+bool HandleScopeImplementer::LeaveLastContext() {
+  if (entered_contexts_.is_empty()) return false;
+  entered_contexts_.RemoveLast();
+  return true;
+}
+
+
+Handle<Object> HandleScopeImplementer::LastEnteredContext() {
+  if (entered_contexts_.is_empty()) return Handle<Object>::null();
+  return entered_contexts_.last();
+}
+
+
+// If there's a spare block, use it for growing the current scope.
+internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
+  internal::Object** block = (spare_ != NULL) ?
+      spare_ :
+      NewArray<internal::Object*>(kHandleBlockSize);
+  spare_ = NULL;
+  return block;
+}
+
+
+void HandleScopeImplementer::DeleteExtensions(int extensions) {
+  if (spare_ != NULL) {
+    DeleteArray(spare_);
+    spare_ = NULL;
+  }
+  for (int i = extensions; i > 1; --i) {
+    internal::Object** block = blocks_.RemoveLast();
+#ifdef DEBUG
+    v8::ImplementationUtilities::ZapHandleRange(block,
+                                                &block[kHandleBlockSize]);
+#endif
+    DeleteArray(block);
+  }
+  spare_ = blocks_.RemoveLast();
+#ifdef DEBUG
+  v8::ImplementationUtilities::ZapHandleRange(
+      spare_,
+      &spare_[kHandleBlockSize]);
+#endif
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_API_H_
diff --git a/src/apinatives.js b/src/apinatives.js
new file mode 100644
index 0000000..6451e62
--- /dev/null
+++ b/src/apinatives.js
@@ -0,0 +1,110 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file contains infrastructure used by the API.  See
+// v8natives.js for an explanation of these files are processed and
+// loaded.
+
+
+function CreateDate(time) {
+  var date = new ORIGINAL_DATE();
+  date.setTime(time);
+  return date;
+}
+
+
+const kApiFunctionCache = {};
+const functionCache = kApiFunctionCache;
+
+
+function Instantiate(data, name) {
+  if (!%IsTemplate(data)) return data;
+  var tag = %GetTemplateField(data, kApiTagOffset);
+  switch (tag) {
+    case kFunctionTag:
+      return InstantiateFunction(data, name);
+    case kNewObjectTag:
+      var Constructor = %GetTemplateField(data, kApiConstructorOffset);
+      var result = Constructor ? new (Instantiate(Constructor))() : {};
+      ConfigureTemplateInstance(result, data);
+      result = %ToFastProperties(result);
+      return result;
+    default:
+      throw 'Unknown API tag <' + tag + '>';
+  }
+}
+
+
+function InstantiateFunction(data, name) {
+  // We need a reference to kApiFunctionCache in the stack frame
+  // if we need to bail out from a stack overflow.
+  var cache = kApiFunctionCache;
+  var serialNumber = %GetTemplateField(data, kApiSerialNumberOffset);
+  var isFunctionCached =
+   (serialNumber in cache) && (cache[serialNumber] != kUninitialized);
+  if (!isFunctionCached) {
+    try {
+      cache[serialNumber] = null;
+      var fun = %CreateApiFunction(data);
+      if (name) %FunctionSetName(fun, name);
+      cache[serialNumber] = fun;
+      var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
+      fun.prototype = prototype ? Instantiate(prototype) : {};
+      %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
+      var parent = %GetTemplateField(data, kApiParentTemplateOffset);
+      if (parent) {
+        var parent_fun = Instantiate(parent);
+        fun.prototype.__proto__ = parent_fun.prototype;
+      }
+      ConfigureTemplateInstance(fun, data);
+    } catch (e) {
+      cache[serialNumber] = kUninitialized;
+      throw e;
+    }
+  }
+  return cache[serialNumber];
+}
+
+
+function ConfigureTemplateInstance(obj, data) {
+  var properties = %GetTemplateField(data, kApiPropertyListOffset);
+  if (properties) {
+    // Disable access checks while instantiating the object.
+    var requires_access_checks = %DisableAccessChecks(obj);
+    try {
+      for (var i = 0; i < properties[0]; i += 3) {
+        var name = properties[i + 1];
+        var prop_data = properties[i + 2];
+        var attributes = properties[i + 3];
+        var value = Instantiate(prop_data, name);
+        %SetProperty(obj, name, value, attributes);
+      }
+    } finally {
+      if (requires_access_checks) %EnableAccessChecks(obj);
+    }
+  }
+}
diff --git a/src/apiutils.h b/src/apiutils.h
new file mode 100644
index 0000000..8c791eb
--- /dev/null
+++ b/src/apiutils.h
@@ -0,0 +1,69 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_APIUTILS_H_
+#define V8_APIUTILS_H_
+
+namespace v8 {
+
+class ImplementationUtilities {
+ public:
+  static v8::Handle<v8::Primitive> Undefined();
+  static v8::Handle<v8::Primitive> Null();
+  static v8::Handle<v8::Boolean> True();
+  static v8::Handle<v8::Boolean> False();
+
+  static int GetNameCount(ExtensionConfiguration* that) {
+    return that->name_count_;
+  }
+
+  static const char** GetNames(ExtensionConfiguration* that) {
+    return that->names_;
+  }
+
+  static v8::Arguments NewArguments(Local<Value> data,
+                                    Local<Object> holder,
+                                    Local<Function> callee,
+                                    bool is_construct_call,
+                                    void** argv, int argc) {
+    return v8::Arguments(data, holder, callee, is_construct_call, argv, argc);
+  }
+
+  // Introduce an alias for the handle scope data to allow non-friends
+  // to access the HandleScope data.
+  typedef v8::HandleScope::Data HandleScopeData;
+
+  static HandleScopeData* CurrentHandleScope();
+
+#ifdef DEBUG
+  static void ZapHandleRange(internal::Object** begin, internal::Object** end);
+#endif
+};
+
+}  // namespace v8
+
+#endif  // V8_APIUTILS_H_
diff --git a/src/arguments.h b/src/arguments.h
new file mode 100644
index 0000000..d2f1bfc
--- /dev/null
+++ b/src/arguments.h
@@ -0,0 +1,97 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARGUMENTS_H_
+#define V8_ARGUMENTS_H_
+
+namespace v8 {
+namespace internal {
+
+// Arguments provides access to runtime call parameters.
+//
+// It uses the fact that the instance fields of Arguments
+// (length_, arguments_) are "overlayed" with the parameters
+// (no. of parameters, and the parameter pointer) passed so
+// that inside the C++ function, the parameters passed can
+// be accessed conveniently:
+//
+//   Object* Runtime_function(Arguments args) {
+//     ... use args[i] here ...
+//   }
+
+class Arguments BASE_EMBEDDED {
+ public:
+  Arguments(int length, Object** arguments)
+      : length_(length), arguments_(arguments) { }
+
+  Object*& operator[] (int index) {
+    ASSERT(0 <= index && index < length_);
+    return arguments_[-index];
+  }
+
+  template <class S> Handle<S> at(int index) {
+    Object** value = &((*this)[index]);
+    // This cast checks that the object we're accessing does indeed have the
+    // expected type.
+    S::cast(*value);
+    return Handle<S>(reinterpret_cast<S**>(value));
+  }
+
+  // Get the total number of arguments including the receiver.
+  int length() const { return length_; }
+
+  Object** arguments() { return arguments_; }
+
+ private:
+  int length_;
+  Object** arguments_;
+};
+
+
+// Cursom arguments replicate a small segment of stack that can be
+// accessed through an Arguments object the same way the actual stack
+// can.
+class CustomArguments : public Relocatable {
+ public:
+  inline CustomArguments(Object *data,
+                         JSObject *self,
+                         JSObject *holder) {
+    values_[3] = self;
+    values_[2] = holder;
+    values_[1] = Smi::FromInt(0);
+    values_[0] = data;
+  }
+  void IterateInstance(ObjectVisitor* v);
+  Object** end() { return values_ + 3; }
+ private:
+  Object* values_[4];
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARGUMENTS_H_
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
new file mode 100644
index 0000000..cd5a1bb
--- /dev/null
+++ b/src/arm/assembler-arm-inl.h
@@ -0,0 +1,255 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
+#define V8_ARM_ASSEMBLER_ARM_INL_H_
+
+#include "arm/assembler-arm.h"
+#include "cpu.h"
+
+
+namespace v8 {
+namespace internal {
+
+Condition NegateCondition(Condition cc) {
+  ASSERT(cc != al);
+  return static_cast<Condition>(cc ^ ne);
+}
+
+
+void RelocInfo::apply(intptr_t delta) {
+  if (RelocInfo::IsInternalReference(rmode_)) {
+    // absolute code pointer inside code object moves with the code object.
+    int32_t* p = reinterpret_cast<int32_t*>(pc_);
+    *p += delta;  // relocate entry
+  }
+  // We do not use pc relative addressing on ARM, so there is
+  // nothing else to do.
+}
+
+
+Address RelocInfo::target_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+}
+
+
+Object** RelocInfo::target_object_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+}
+
+
+Address* RelocInfo::target_reference_address() {
+  ASSERT(rmode_ == EXTERNAL_REFERENCE);
+  return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
+}
+
+
+Address RelocInfo::call_address() {
+  ASSERT(IsCallInstruction());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+  ASSERT(IsCallInstruction());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+}
+
+
+Object* RelocInfo::call_object() {
+  return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+  ASSERT(IsCallInstruction());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+  *call_object_address() = target;
+}
+
+
+bool RelocInfo::IsCallInstruction() {
+  // On ARM a "call instruction" is actually two instructions.
+  //   mov lr, pc
+  //   ldr pc, [pc, #XXX]
+  return (Assembler::instr_at(pc_) == kMovLrPc)
+          && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
+              == kLdrPCPattern);
+}
+
+
+Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
+  rm_ = no_reg;
+  imm32_ = immediate;
+  rmode_ = rmode;
+}
+
+
+Operand::Operand(const char* s) {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(s);
+  rmode_ = RelocInfo::EMBEDDED_STRING;
+}
+
+
+Operand::Operand(const ExternalReference& f)  {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(f.address());
+  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+
+Operand::Operand(Object** opp) {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(opp);
+  rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Context** cpp) {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(cpp);
+  rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Smi* value) {
+  rm_ = no_reg;
+  imm32_ =  reinterpret_cast<intptr_t>(value);
+  rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Register rm) {
+  rm_ = rm;
+  rs_ = no_reg;
+  shift_op_ = LSL;
+  shift_imm_ = 0;
+}
+
+
+bool Operand::is_reg() const {
+  return rm_.is_valid() &&
+         rs_.is(no_reg) &&
+         shift_op_ == LSL &&
+         shift_imm_ == 0;
+}
+
+
+void Assembler::CheckBuffer() {
+  if (buffer_space() <= kGap) {
+    GrowBuffer();
+  }
+  if (pc_offset() >= next_buffer_check_) {
+    CheckConstPool(false, true);
+  }
+}
+
+
+void Assembler::emit(Instr x) {
+  CheckBuffer();
+  *reinterpret_cast<Instr*>(pc_) = x;
+  pc_ += kInstrSize;
+}
+
+
+Address Assembler::target_address_address_at(Address pc) {
+  Instr instr = Memory::int32_at(pc);
+  // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+  ASSERT((instr & 0x0f7f0000) == 0x051f0000);
+  int offset = instr & 0xfff;  // offset_12 is unsigned
+  if ((instr & (1 << 23)) == 0) offset = -offset;  // U bit defines offset sign
+  // Verify that the constant pool comes after the instruction referencing it.
+  ASSERT(offset >= -4);
+  return pc + offset + 8;
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+  return Memory::Address_at(target_address_address_at(pc));
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+  Memory::Address_at(target_address_address_at(pc)) = target;
+  // Intuitively, we would think it is necessary to flush the instruction cache
+  // after patching a target address in the code as follows:
+  //   CPU::FlushICache(pc, sizeof(target));
+  // However, on ARM, no instruction was actually patched by the assignment
+  // above; the target address is not part of an instruction, it is patched in
+  // the constant pool and is read via a data access; the instruction accessing
+  // this address in the constant pool remains unchanged.
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
new file mode 100644
index 0000000..bc3b8e6
--- /dev/null
+++ b/src/arm/assembler-arm.cc
@@ -0,0 +1,1545 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#include "v8.h"
+
+#include "arm/assembler-arm-inl.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and CRegister
+
+Register no_reg = { -1 };
+
+Register r0  = {  0 };
+Register r1  = {  1 };
+Register r2  = {  2 };
+Register r3  = {  3 };
+Register r4  = {  4 };
+Register r5  = {  5 };
+Register r6  = {  6 };
+Register r7  = {  7 };
+Register r8  = {  8 };
+Register r9  = {  9 };
+Register r10 = { 10 };
+Register fp  = { 11 };
+Register ip  = { 12 };
+Register sp  = { 13 };
+Register lr  = { 14 };
+Register pc  = { 15 };
+
+
+CRegister no_creg = { -1 };
+
+CRegister cr0  = {  0 };
+CRegister cr1  = {  1 };
+CRegister cr2  = {  2 };
+CRegister cr3  = {  3 };
+CRegister cr4  = {  4 };
+CRegister cr5  = {  5 };
+CRegister cr6  = {  6 };
+CRegister cr7  = {  7 };
+CRegister cr8  = {  8 };
+CRegister cr9  = {  9 };
+CRegister cr10 = { 10 };
+CRegister cr11 = { 11 };
+CRegister cr12 = { 12 };
+CRegister cr13 = { 13 };
+CRegister cr14 = { 14 };
+CRegister cr15 = { 15 };
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+  // Patch the code at the current address with the supplied instructions.
+  Instr* pc = reinterpret_cast<Instr*>(pc_);
+  Instr* instr = reinterpret_cast<Instr*>(instructions);
+  for (int i = 0; i < instruction_count; i++) {
+    *(pc + i) = *(instr + i);
+  }
+
+  // Indicate that code has changed.
+  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+  // Patch the code at the current address with a call to the target.
+  UNIMPLEMENTED();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand
+// See assembler-arm-inl.h for inlined constructors
+
+Operand::Operand(Handle<Object> handle) {
+  rm_ = no_reg;
+  // Verify all Objects referred by code are NOT in new space.
+  Object* obj = *handle;
+  ASSERT(!Heap::InNewSpace(obj));
+  if (obj->IsHeapObject()) {
+    imm32_ = reinterpret_cast<intptr_t>(handle.location());
+    rmode_ = RelocInfo::EMBEDDED_OBJECT;
+  } else {
+    // no relocation needed
+    imm32_ =  reinterpret_cast<intptr_t>(obj);
+    rmode_ = RelocInfo::NONE;
+  }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
+  ASSERT(is_uint5(shift_imm));
+  ASSERT(shift_op != ROR || shift_imm != 0);  // use RRX if you mean it
+  rm_ = rm;
+  rs_ = no_reg;
+  shift_op_ = shift_op;
+  shift_imm_ = shift_imm & 31;
+  if (shift_op == RRX) {
+    // encoded as ROR with shift_imm == 0
+    ASSERT(shift_imm == 0);
+    shift_op_ = ROR;
+    shift_imm_ = 0;
+  }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
+  ASSERT(shift_op != RRX);
+  rm_ = rm;
+  rs_ = no_reg;
+  shift_op_ = shift_op;
+  rs_ = rs;
+}
+
+
+MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
+  rn_ = rn;
+  rm_ = no_reg;
+  offset_ = offset;
+  am_ = am;
+}
+
+MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
+  rn_ = rn;
+  rm_ = rm;
+  shift_op_ = LSL;
+  shift_imm_ = 0;
+  am_ = am;
+}
+
+
+MemOperand::MemOperand(Register rn, Register rm,
+                       ShiftOp shift_op, int shift_imm, AddrMode am) {
+  ASSERT(is_uint5(shift_imm));
+  rn_ = rn;
+  rm_ = rm;
+  shift_op_ = shift_op;
+  shift_imm_ = shift_imm & 31;
+  am_ = am;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Instruction encoding bits
+enum {
+  H   = 1 << 5,   // halfword (or byte)
+  S6  = 1 << 6,   // signed (or unsigned)
+  L   = 1 << 20,  // load (or store)
+  S   = 1 << 20,  // set condition code (or leave unchanged)
+  W   = 1 << 21,  // writeback base register (or leave unchanged)
+  A   = 1 << 21,  // accumulate in multiply instruction (or not)
+  B   = 1 << 22,  // unsigned byte (or word)
+  N   = 1 << 22,  // long (or short)
+  U   = 1 << 23,  // positive (or negative) offset/index
+  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
+  I   = 1 << 25,  // immediate shifter operand (or not)
+
+  B4  = 1 << 4,
+  B5  = 1 << 5,
+  B7  = 1 << 7,
+  B8  = 1 << 8,
+  B12 = 1 << 12,
+  B16 = 1 << 16,
+  B20 = 1 << 20,
+  B21 = 1 << 21,
+  B22 = 1 << 22,
+  B23 = 1 << 23,
+  B24 = 1 << 24,
+  B25 = 1 << 25,
+  B26 = 1 << 26,
+  B27 = 1 << 27,
+
+  // Instruction bit masks
+  RdMask     = 15 << 12,  // in str instruction
+  CondMask   = 15 << 28,
+  CoprocessorMask = 15 << 8,
+  OpCodeMask = 15 << 21,  // in data-processing instructions
+  Imm24Mask  = (1 << 24) - 1,
+  Off12Mask  = (1 << 12) - 1,
+  // Reserved condition
+  nv = 15 << 28
+};
+
+
+// add(sp, sp, 4) instruction (aka Pop())
+static const Instr kPopInstruction =
+    al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
+// register r is not encoded.
+static const Instr kPushRegPattern =
+    al | B26 | 4 | NegPreIndex | sp.code() * B16;
+// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
+// register r is not encoded.
+static const Instr kPopRegPattern =
+    al | B26 | L | 4 | PostIndex | sp.code() * B16;
+// mov lr, pc
+const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
+// ldr pc, [pc, #XXX]
+const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
+
+// spare_buffer_
+static const int kMinimalBufferSize = 4*KB;
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+  if (buffer == NULL) {
+    // do our own buffer management
+    if (buffer_size <= kMinimalBufferSize) {
+      buffer_size = kMinimalBufferSize;
+
+      if (spare_buffer_ != NULL) {
+        buffer = spare_buffer_;
+        spare_buffer_ = NULL;
+      }
+    }
+    if (buffer == NULL) {
+      buffer_ = NewArray<byte>(buffer_size);
+    } else {
+      buffer_ = static_cast<byte*>(buffer);
+    }
+    buffer_size_ = buffer_size;
+    own_buffer_ = true;
+
+  } else {
+    // use externally provided buffer instead
+    ASSERT(buffer_size > 0);
+    buffer_ = static_cast<byte*>(buffer);
+    buffer_size_ = buffer_size;
+    own_buffer_ = false;
+  }
+
+  // setup buffer pointers
+  ASSERT(buffer_ != NULL);
+  pc_ = buffer_;
+  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+  num_prinfo_ = 0;
+  next_buffer_check_ = 0;
+  no_const_pool_before_ = 0;
+  last_const_pool_end_ = 0;
+  last_bound_pos_ = 0;
+  current_statement_position_ = RelocInfo::kNoPosition;
+  current_position_ = RelocInfo::kNoPosition;
+  written_statement_position_ = current_statement_position_;
+  written_position_ = current_position_;
+}
+
+
+Assembler::~Assembler() {
+  if (own_buffer_) {
+    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+      spare_buffer_ = buffer_;
+    } else {
+      DeleteArray(buffer_);
+    }
+  }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+  // emit constant pool if necessary
+  CheckConstPool(true, false);
+  ASSERT(num_prinfo_ == 0);
+
+  // setup desc
+  desc->buffer = buffer_;
+  desc->buffer_size = buffer_size_;
+  desc->instr_size = pc_offset();
+  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+}
+
+
+void Assembler::Align(int m) {
+  ASSERT(m >= 4 && IsPowerOf2(m));
+  while ((pc_offset() & (m - 1)) != 0) {
+    nop();
+  }
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned)
+const int kEndOfChain = -4;
+
+
+int Assembler::target_at(int pos)  {
+  Instr instr = instr_at(pos);
+  if ((instr & ~Imm24Mask) == 0) {
+    // Emitted label constant, not part of a branch.
+    return instr - (Code::kHeaderSize - kHeapObjectTag);
+  }
+  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
+  int imm26 = ((instr & Imm24Mask) << 8) >> 6;
+  if ((instr & CondMask) == nv && (instr & B24) != 0)
+    // blx uses bit 24 to encode bit 2 of imm26
+    imm26 += 2;
+
+  return pos + kPcLoadDelta + imm26;
+}
+
+
+void Assembler::target_at_put(int pos, int target_pos) {
+  Instr instr = instr_at(pos);
+  if ((instr & ~Imm24Mask) == 0) {
+    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
+    // Emitted label constant, not part of a branch.
+    // Make label relative to Code* of generated Code object.
+    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+    return;
+  }
+  int imm26 = target_pos - (pos + kPcLoadDelta);
+  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
+  if ((instr & CondMask) == nv) {
+    // blx uses bit 24 to encode bit 2 of imm26
+    ASSERT((imm26 & 1) == 0);
+    instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
+  } else {
+    ASSERT((imm26 & 3) == 0);
+    instr &= ~Imm24Mask;
+  }
+  int imm24 = imm26 >> 2;
+  ASSERT(is_int24(imm24));
+  instr_at_put(pos, instr | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::print(Label* L) {
+  if (L->is_unused()) {
+    PrintF("unused label\n");
+  } else if (L->is_bound()) {
+    PrintF("bound label to %d\n", L->pos());
+  } else if (L->is_linked()) {
+    Label l = *L;
+    PrintF("unbound label");
+    while (l.is_linked()) {
+      PrintF("@ %d ", l.pos());
+      Instr instr = instr_at(l.pos());
+      if ((instr & ~Imm24Mask) == 0) {
+        PrintF("value\n");
+      } else {
+        ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
+        int cond = instr & CondMask;
+        const char* b;
+        const char* c;
+        if (cond == nv) {
+          b = "blx";
+          c = "";
+        } else {
+          if ((instr & B24) != 0)
+            b = "bl";
+          else
+            b = "b";
+
+          switch (cond) {
+            case eq: c = "eq"; break;
+            case ne: c = "ne"; break;
+            case hs: c = "hs"; break;
+            case lo: c = "lo"; break;
+            case mi: c = "mi"; break;
+            case pl: c = "pl"; break;
+            case vs: c = "vs"; break;
+            case vc: c = "vc"; break;
+            case hi: c = "hi"; break;
+            case ls: c = "ls"; break;
+            case ge: c = "ge"; break;
+            case lt: c = "lt"; break;
+            case gt: c = "gt"; break;
+            case le: c = "le"; break;
+            case al: c = ""; break;
+            default:
+              c = "";
+              UNREACHABLE();
+          }
+        }
+        PrintF("%s%s\n", b, c);
+      }
+      next(&l);
+    }
+  } else {
+    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+  }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  while (L->is_linked()) {
+    int fixup_pos = L->pos();
+    next(L);  // call next before overwriting link with target at fixup_pos
+    target_at_put(fixup_pos, pos);
+  }
+  L->bind_to(pos);
+
+  // Keep track of the last bound label so we don't eliminate any instructions
+  // before a bound label.
+  if (pos > last_bound_pos_)
+    last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+  if (appendix->is_linked()) {
+    if (L->is_linked()) {
+      // append appendix to L's list
+      int fixup_pos;
+      int link = L->pos();
+      do {
+        fixup_pos = link;
+        link = target_at(fixup_pos);
+      } while (link > 0);
+      ASSERT(link == kEndOfChain);
+      target_at_put(fixup_pos, appendix->pos());
+    } else {
+      // L is empty, simply use appendix
+      *L = *appendix;
+    }
+  }
+  appendix->Unuse();  // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+  ASSERT(!L->is_bound());  // label can only be bound once
+  bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+  ASSERT(L->is_linked());
+  int link = target_at(L->pos());
+  if (link > 0) {
+    L->link_to(link);
+  } else {
+    ASSERT(link == kEndOfChain);
+    L->Unuse();
+  }
+}
+
+
+// Low-level code emission routines depending on the addressing mode
+static bool fits_shifter(uint32_t imm32,
+                         uint32_t* rotate_imm,
+                         uint32_t* immed_8,
+                         Instr* instr) {
+  // imm32 must be unsigned
+  for (int rot = 0; rot < 16; rot++) {
+    uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
+    if ((imm8 <= 0xff)) {
+      *rotate_imm = rot;
+      *immed_8 = imm8;
+      return true;
+    }
+  }
+  // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+  if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
+    if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+      *instr ^= 0x2*B21;
+      return true;
+    }
+  }
+  return false;
+}
+
+
+// We have to use the temporary register for things that can be relocated even
+// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
+// space.  There is no guarantee that the relocated location can be similarly
+// encoded.
+static bool MustUseIp(RelocInfo::Mode rmode) {
+  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+    return Serializer::enabled();
+  } else if (rmode == RelocInfo::NONE) {
+    return false;
+  }
+  return true;
+}
+
+
+void Assembler::addrmod1(Instr instr,
+                         Register rn,
+                         Register rd,
+                         const Operand& x) {
+  CheckBuffer();
+  ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
+  if (!x.rm_.is_valid()) {
+    // immediate
+    uint32_t rotate_imm;
+    uint32_t immed_8;
+    if (MustUseIp(x.rmode_) ||
+        !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
+      // The immediate operand cannot be encoded as a shifter operand, so load
+      // it first to register ip and change the original instruction to use ip.
+      // However, if the original instruction is a 'mov rd, x' (not setting the
+      // condition code), then replace it with a 'ldr rd, [pc]'
+      RecordRelocInfo(x.rmode_, x.imm32_);
+      CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
+      Condition cond = static_cast<Condition>(instr & CondMask);
+      if ((instr & ~CondMask) == 13*B21) {  // mov, S not set
+        ldr(rd, MemOperand(pc, 0), cond);
+      } else {
+        ldr(ip, MemOperand(pc, 0), cond);
+        addrmod1(instr, rn, rd, Operand(ip));
+      }
+      return;
+    }
+    instr |= I | rotate_imm*B8 | immed_8;
+  } else if (!x.rs_.is_valid()) {
+    // immediate shift
+    instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+  } else {
+    // register shift
+    ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
+    instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
+  }
+  emit(instr | rn.code()*B16 | rd.code()*B12);
+  if (rn.is(pc) || x.rm_.is(pc))
+    // block constant pool emission for one instruction after reading pc
+    BlockConstPoolBefore(pc_offset() + kInstrSize);
+}
+
+
+void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
+  ASSERT((instr & ~(CondMask | B | L)) == B26);
+  int am = x.am_;
+  if (!x.rm_.is_valid()) {
+    // immediate offset
+    int offset_12 = x.offset_;
+    if (offset_12 < 0) {
+      offset_12 = -offset_12;
+      am ^= U;
+    }
+    if (!is_uint12(offset_12)) {
+      // immediate offset cannot be encoded, load it first to register ip
+      // rn (and rd in a load) should never be ip, or will be trashed
+      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+      mov(ip, Operand(x.offset_), LeaveCC,
+          static_cast<Condition>(instr & CondMask));
+      addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
+      return;
+    }
+    ASSERT(offset_12 >= 0);  // no masking needed
+    instr |= offset_12;
+  } else {
+    // register offset (shift_imm_ and shift_op_ are 0) or scaled
+    // register offset the constructors make sure than both shift_imm_
+    // and shift_op_ are initialized
+    ASSERT(!x.rm_.is(pc));
+    instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+  }
+  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
+  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
+  ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
+  ASSERT(x.rn_.is_valid());
+  int am = x.am_;
+  if (!x.rm_.is_valid()) {
+    // immediate offset
+    int offset_8 = x.offset_;
+    if (offset_8 < 0) {
+      offset_8 = -offset_8;
+      am ^= U;
+    }
+    if (!is_uint8(offset_8)) {
+      // immediate offset cannot be encoded, load it first to register ip
+      // rn (and rd in a load) should never be ip, or will be trashed
+      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+      mov(ip, Operand(x.offset_), LeaveCC,
+          static_cast<Condition>(instr & CondMask));
+      addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+      return;
+    }
+    ASSERT(offset_8 >= 0);  // no masking needed
+    instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
+  } else if (x.shift_imm_ != 0) {
+    // scaled register offset not supported, load index first
+    // rn (and rd in a load) should never be ip, or will be trashed
+    ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+    mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
+        static_cast<Condition>(instr & CondMask));
+    addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+    return;
+  } else {
+    // register offset
+    ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
+    instr |= x.rm_.code();
+  }
+  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
+  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
+  ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
+  ASSERT(rl != 0);
+  ASSERT(!rn.is(pc));
+  emit(instr | rn.code()*B16 | rl);
+}
+
+
+void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
+  // unindexed addressing is not encoded by this function
+  ASSERT_EQ((B27 | B26),
+            (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
+  ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
+  int am = x.am_;
+  int offset_8 = x.offset_;
+  ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
+  offset_8 >>= 2;
+  if (offset_8 < 0) {
+    offset_8 = -offset_8;
+    am ^= U;
+  }
+  ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
+  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
+
+  // post-indexed addressing requires W == 1; different than in addrmod2/3
+  if ((am & P) == 0)
+    am |= W;
+
+  ASSERT(offset_8 >= 0);  // no masking needed
+  emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
+}
+
+
+int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+  int target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link
+    } else {
+      target_pos = kEndOfChain;
+    }
+    L->link_to(pc_offset());
+  }
+
+  // Block the emission of the constant pool, since the branch instruction must
+  // be emitted at the pc offset recorded by the label
+  BlockConstPoolBefore(pc_offset() + kInstrSize);
+  return target_pos - (pc_offset() + kPcLoadDelta);
+}
+
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+  int target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link
+    } else {
+      target_pos = kEndOfChain;
+    }
+    L->link_to(at_offset);
+    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+  }
+}
+
+
+// Branch instructions
+void Assembler::b(int branch_offset, Condition cond) {
+  ASSERT((branch_offset & 3) == 0);
+  int imm24 = branch_offset >> 2;
+  ASSERT(is_int24(imm24));
+  emit(cond | B27 | B25 | (imm24 & Imm24Mask));
+
+  if (cond == al)
+    // dead code is a good location to emit the constant pool
+    CheckConstPool(false, false);
+}
+
+
+void Assembler::bl(int branch_offset, Condition cond) {
+  ASSERT((branch_offset & 3) == 0);
+  int imm24 = branch_offset >> 2;
+  ASSERT(is_int24(imm24));
+  emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(int branch_offset) {  // v5 and above
+  WriteRecordedPositions();
+  ASSERT((branch_offset & 1) == 0);
+  int h = ((branch_offset & 2) >> 1)*B24;
+  int imm24 = branch_offset >> 2;
+  ASSERT(is_int24(imm24));
+  emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(Register target, Condition cond) {  // v5 and above
+  WriteRecordedPositions();
+  ASSERT(!target.is(pc));
+  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
+}
+
+
+void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
+  WriteRecordedPositions();
+  ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
+  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
+}
+
+
+// Data-processing instructions
+void Assembler::and_(Register dst, Register src1, const Operand& src2,
+                     SBit s, Condition cond) {
+  addrmod1(cond | 0*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::eor(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 1*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sub(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 2*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsb(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 3*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::add(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 4*B21 | s, src1, dst, src2);
+
+  // Eliminate pattern: push(r), pop()
+  //   str(src, MemOperand(sp, 4, NegPreIndex), al);
+  //   add(sp, sp, Operand(kPointerSize));
+  // Both instructions can be eliminated.
+  int pattern_size = 2 * kInstrSize;
+  if (FLAG_push_pop_elimination &&
+      last_bound_pos_ <= (pc_offset() - pattern_size) &&
+      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+      // pattern
+      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
+      (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
+    pc_ -= 2 * kInstrSize;
+    if (FLAG_print_push_pop_elimination) {
+      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
+    }
+  }
+}
+
+
+void Assembler::adc(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 5*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sbc(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 6*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsc(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 7*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
+  addrmod1(cond | 8*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
+  addrmod1(cond | 9*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
+  addrmod1(cond | 10*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
+  addrmod1(cond | 11*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::orr(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 12*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
+  if (dst.is(pc)) {
+    WriteRecordedPositions();
+  }
+  addrmod1(cond | 13*B21 | s, r0, dst, src);
+}
+
+
+void Assembler::bic(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 14*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
+  addrmod1(cond | 15*B21 | s, r0, dst, src);
+}
+
+
+// Multiply instructions
+void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
+                    SBit s, Condition cond) {
+  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+  emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
+       src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::mul(Register dst, Register src1, Register src2,
+                    SBit s, Condition cond) {
+  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+  // dst goes in bits 16-19 for this instruction!
+  emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smlal(Register dstL,
+                      Register dstH,
+                      Register src1,
+                      Register src2,
+                      SBit s,
+                      Condition cond) {
+  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  ASSERT(!dstL.is(dstH));
+  emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+       src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smull(Register dstL,
+                      Register dstH,
+                      Register src1,
+                      Register src2,
+                      SBit s,
+                      Condition cond) {
+  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  ASSERT(!dstL.is(dstH));
+  emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
+       src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umlal(Register dstL,
+                      Register dstH,
+                      Register src1,
+                      Register src2,
+                      SBit s,
+                      Condition cond) {
+  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  ASSERT(!dstL.is(dstH));
+  emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+       src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umull(Register dstL,
+                      Register dstH,
+                      Register src1,
+                      Register src2,
+                      SBit s,
+                      Condition cond) {
+  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  ASSERT(!dstL.is(dstH));
+  emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
+       src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+// Miscellaneous arithmetic instructions
+void Assembler::clz(Register dst, Register src, Condition cond) {
+  // v5 and above.
+  ASSERT(!dst.is(pc) && !src.is(pc));
+  emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
+       15*B8 | B4 | src.code());
+}
+
+
+// Status register access instructions
+void Assembler::mrs(Register dst, SRegister s, Condition cond) {
+  ASSERT(!dst.is(pc));
+  emit(cond | B24 | s | 15*B16 | dst.code()*B12);
+}
+
+
+void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
+                    Condition cond) {
+  ASSERT(fields >= B16 && fields < B20);  // at least one field set
+  Instr instr;
+  if (!src.rm_.is_valid()) {
+    // immediate
+    uint32_t rotate_imm;
+    uint32_t immed_8;
+    if (MustUseIp(src.rmode_) ||
+        !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
+      // immediate operand cannot be encoded, load it first to register ip
+      RecordRelocInfo(src.rmode_, src.imm32_);
+      ldr(ip, MemOperand(pc, 0), cond);
+      msr(fields, Operand(ip), cond);
+      return;
+    }
+    instr = I | rotate_imm*B8 | immed_8;
+  } else {
+    ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
+    instr = src.rm_.code();
+  }
+  emit(cond | instr | B24 | B21 | fields | 15*B12);
+}
+
+
+// Load/Store instructions
+void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
+  if (dst.is(pc)) {
+    WriteRecordedPositions();
+  }
+  addrmod2(cond | B26 | L, dst, src);
+
+  // Eliminate pattern: push(r), pop(r)
+  //   str(r, MemOperand(sp, 4, NegPreIndex), al)
+  //   ldr(r, MemOperand(sp, 4, PostIndex), al)
+  // Both instructions can be eliminated.
+  int pattern_size = 2 * kInstrSize;
+  if (FLAG_push_pop_elimination &&
+      last_bound_pos_ <= (pc_offset() - pattern_size) &&
+      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+      // pattern
+      instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
+      instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
+    pc_ -= 2 * kInstrSize;
+    if (FLAG_print_push_pop_elimination) {
+      PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+    }
+  }
+}
+
+
+void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
+  addrmod2(cond | B26, src, dst);
+
+  // Eliminate pattern: pop(), push(r)
+  //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
+  // ->  str r, [sp, 0], al
+  int pattern_size = 2 * kInstrSize;
+  if (FLAG_push_pop_elimination &&
+     last_bound_pos_ <= (pc_offset() - pattern_size) &&
+     reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+     instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
+     instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
+    pc_ -= 2 * kInstrSize;
+    emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
+    if (FLAG_print_push_pop_elimination) {
+      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
+    }
+  }
+}
+
+
+void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
+  addrmod2(cond | B26 | B | L, dst, src);
+}
+
+
+void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
+  addrmod2(cond | B26 | B, src, dst);
+}
+
+
+void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
+  addrmod3(cond | L | B7 | H | B4, dst, src);
+}
+
+
+void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
+  addrmod3(cond | B7 | H | B4, src, dst);
+}
+
+
+void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
+  addrmod3(cond | L | B7 | S6 | B4, dst, src);
+}
+
+
+void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
+  addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
+}
+
+
+// Load/Store multiple instructions
+void Assembler::ldm(BlockAddrMode am,
+                    Register base,
+                    RegList dst,
+                    Condition cond) {
+  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable
+  ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
+
+  addrmod4(cond | B27 | am | L, base, dst);
+
+  // emit the constant pool after a function return implemented by ldm ..{..pc}
+  if (cond == al && (dst & pc.bit()) != 0) {
+    // There is a slight chance that the ldm instruction was actually a call,
+    // in which case it would be wrong to return into the constant pool; we
+    // recognize this case by checking if the emission of the pool was blocked
+    // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
+    // the case, we emit a jump over the pool.
+    CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
+  }
+}
+
+
+void Assembler::stm(BlockAddrMode am,
+                    Register base,
+                    RegList src,
+                    Condition cond) {
+  addrmod4(cond | B27 | am, base, src);
+}
+
+
+// Semaphore instructions
+void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
+  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+  ASSERT(!dst.is(base) && !src.is(base));
+  emit(cond | P | base.code()*B16 | dst.code()*B12 |
+       B7 | B4 | src.code());
+}
+
+
+void Assembler::swpb(Register dst,
+                     Register src,
+                     Register base,
+                     Condition cond) {
+  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+  ASSERT(!dst.is(base) && !src.is(base));
+  emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
+       B7 | B4 | src.code());
+}
+
+
+// Exception-generating instructions and debugging support
+void Assembler::stop(const char* msg) {
+#if !defined(__arm__)
+  // The simulator handles these special instructions and stops execution.
+  emit(15 << 28 | ((intptr_t) msg));
+#else
+  // Just issue a simple break instruction for now. Alternatively we could use
+  // the swi(0x9f0001) instruction on Linux.
+  bkpt(0);
+#endif
+}
+
+
+void Assembler::bkpt(uint32_t imm16) {  // v5 and above
+  ASSERT(is_uint16(imm16));
+  emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
+}
+
+
+void Assembler::swi(uint32_t imm24, Condition cond) {
+  ASSERT(is_uint24(imm24));
+  emit(cond | 15*B24 | imm24);
+}
+
+
+// Coprocessor instructions
+void Assembler::cdp(Coprocessor coproc,
+                    int opcode_1,
+                    CRegister crd,
+                    CRegister crn,
+                    CRegister crm,
+                    int opcode_2,
+                    Condition cond) {
+  ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
+  emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
+       crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
+}
+
+
+void Assembler::cdp2(Coprocessor coproc,
+                     int opcode_1,
+                     CRegister crd,
+                     CRegister crn,
+                     CRegister crm,
+                     int opcode_2) {  // v5 and above
+  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mcr(Coprocessor coproc,
+                    int opcode_1,
+                    Register rd,
+                    CRegister crn,
+                    CRegister crm,
+                    int opcode_2,
+                    Condition cond) {
+  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
+       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mcr2(Coprocessor coproc,
+                     int opcode_1,
+                     Register rd,
+                     CRegister crn,
+                     CRegister crm,
+                     int opcode_2) {  // v5 and above
+  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mrc(Coprocessor coproc,
+                    int opcode_1,
+                    Register rd,
+                    CRegister crn,
+                    CRegister crm,
+                    int opcode_2,
+                    Condition cond) {
+  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
+       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mrc2(Coprocessor coproc,
+                     int opcode_1,
+                     Register rd,
+                     CRegister crn,
+                     CRegister crm,
+                     int opcode_2) {  // v5 and above
+  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+                    CRegister crd,
+                    const MemOperand& src,
+                    LFlag l,
+                    Condition cond) {
+  addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+                    CRegister crd,
+                    Register rn,
+                    int option,
+                    LFlag l,
+                    Condition cond) {
+  // unindexed addressing
+  ASSERT(is_uint8(option));
+  emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
+       coproc*B8 | (option & 255));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+                     CRegister crd,
+                     const MemOperand& src,
+                     LFlag l) {  // v5 and above
+  ldc(coproc, crd, src, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+                     CRegister crd,
+                     Register rn,
+                     int option,
+                     LFlag l) {  // v5 and above
+  ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+                    CRegister crd,
+                    const MemOperand& dst,
+                    LFlag l,
+                    Condition cond) {
+  addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+                    CRegister crd,
+                    Register rn,
+                    int option,
+                    LFlag l,
+                    Condition cond) {
+  // unindexed addressing
+  ASSERT(is_uint8(option));
+  emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
+       coproc*B8 | (option & 255));
+}
+
+
+void Assembler::stc2(Coprocessor
+                     coproc, CRegister crd,
+                     const MemOperand& dst,
+                     LFlag l) {  // v5 and above
+  stc(coproc, crd, dst, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc2(Coprocessor coproc,
+                     CRegister crd,
+                     Register rn,
+                     int option,
+                     LFlag l) {  // v5 and above
+  stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+// Pseudo instructions
+void Assembler::lea(Register dst,
+                    const MemOperand& x,
+                    SBit s,
+                    Condition cond) {
+  int am = x.am_;
+  if (!x.rm_.is_valid()) {
+    // immediate offset
+    if ((am & P) == 0)  // post indexing
+      mov(dst, Operand(x.rn_), s, cond);
+    else if ((am & U) == 0)  // negative indexing
+      sub(dst, x.rn_, Operand(x.offset_), s, cond);
+    else
+      add(dst, x.rn_, Operand(x.offset_), s, cond);
+  } else {
+    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
+    // register offset the constructors make sure than both shift_imm_
+    // and shift_op_ are initialized.
+    ASSERT(!x.rm_.is(pc));
+    if ((am & P) == 0)  // post indexing
+      mov(dst, Operand(x.rn_), s, cond);
+    else if ((am & U) == 0)  // negative indexing
+      sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+    else
+      add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+  }
+}
+
+
+// Debugging
+void Assembler::RecordJSReturn() {
+  WriteRecordedPositions();
+  CheckBuffer();
+  RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+  if (FLAG_debug_code) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+  }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+  if (pos == RelocInfo::kNoPosition) return;
+  ASSERT(pos >= 0);
+  current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+  if (pos == RelocInfo::kNoPosition) return;
+  ASSERT(pos >= 0);
+  current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+  // Write the statement position if it is different from what was written last
+  // time.
+  if (current_statement_position_ != written_statement_position_) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+    written_statement_position_ = current_statement_position_;
+  }
+
+  // Write the position if it is different from what was written last time and
+  // also different from the written statement position.
+  if (current_position_ != written_position_ &&
+      current_position_ != written_statement_position_) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::POSITION, current_position_);
+    written_position_ = current_position_;
+  }
+}
+
+
+void Assembler::GrowBuffer() {
+  if (!own_buffer_) FATAL("external code buffer is too small");
+
+  // compute new buffer size
+  CodeDesc desc;  // the new buffer
+  if (buffer_size_ < 4*KB) {
+    desc.buffer_size = 4*KB;
+  } else if (buffer_size_ < 1*MB) {
+    desc.buffer_size = 2*buffer_size_;
+  } else {
+    desc.buffer_size = buffer_size_ + 1*MB;
+  }
+  CHECK_GT(desc.buffer_size, 0);  // no overflow
+
+  // setup new buffer
+  desc.buffer = NewArray<byte>(desc.buffer_size);
+
+  desc.instr_size = pc_offset();
+  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+  // copy the data
+  int pc_delta = desc.buffer - buffer_;
+  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+  memmove(desc.buffer, buffer_, desc.instr_size);
+  memmove(reloc_info_writer.pos() + rc_delta,
+          reloc_info_writer.pos(), desc.reloc_size);
+
+  // switch buffers
+  DeleteArray(buffer_);
+  buffer_ = desc.buffer;
+  buffer_size_ = desc.buffer_size;
+  pc_ += pc_delta;
+  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+                               reloc_info_writer.last_pc() + pc_delta);
+
+  // none of our relocation types are pc relative pointing outside the code
+  // buffer nor pc absolute pointing inside the code buffer, so there is no need
+  // to relocate any emitted relocation entries
+
+  // relocate pending relocation entries
+  for (int i = 0; i < num_prinfo_; i++) {
+    RelocInfo& rinfo = prinfo_[i];
+    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+           rinfo.rmode() != RelocInfo::POSITION);
+    if (rinfo.rmode() != RelocInfo::JS_RETURN) {
+      rinfo.set_pc(rinfo.pc() + pc_delta);
+    }
+  }
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
+  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+    // Adjust code for new modes
+    ASSERT(RelocInfo::IsJSReturn(rmode)
+           || RelocInfo::IsComment(rmode)
+           || RelocInfo::IsPosition(rmode));
+    // these modes do not need an entry in the constant pool
+  } else {
+    ASSERT(num_prinfo_ < kMaxNumPRInfo);
+    prinfo_[num_prinfo_++] = rinfo;
+    // Make sure the constant pool is not emitted in place of the next
+    // instruction for which we just recorded relocation info
+    BlockConstPoolBefore(pc_offset() + kInstrSize);
+  }
+  if (rinfo.rmode() != RelocInfo::NONE) {
+    // Don't record external references unless the heap will be serialized.
+    if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+        !Serializer::enabled() &&
+        !FLAG_debug_code) {
+      return;
+    }
+    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
+    reloc_info_writer.Write(&rinfo);
+  }
+}
+
+
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+  // Calculate the offset of the next check. It will be overwritten
+  // when a const pool is generated or when const pools are being
+  // blocked for a specific range.
+  next_buffer_check_ = pc_offset() + kCheckConstInterval;
+
+  // There is nothing to do if there are no pending relocation info entries
+  if (num_prinfo_ == 0) return;
+
+  // We emit a constant pool at regular intervals of about kDistBetweenPools
+  // or when requested by parameter force_emit (e.g. after each function).
+  // We prefer not to emit a jump unless the max distance is reached or if we
+  // are running low on slots, which can happen if a lot of constants are being
+  // emitted (e.g. --debug-code and many static references).
+  int dist = pc_offset() - last_const_pool_end_;
+  if (!force_emit && dist < kMaxDistBetweenPools &&
+      (require_jump || dist < kDistBetweenPools) &&
+      // TODO(1236125): Cleanup the "magic" number below. We know that
+      // the code generation will test every kCheckConstIntervalInst.
+      // Thus we are safe as long as we generate less than 7 constant
+      // entries per instruction.
+      (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
+    return;
+  }
+
+  // If we did not return by now, we need to emit the constant pool soon.
+
+  // However, some small sequences of instructions must not be broken up by the
+  // insertion of a constant pool; such sequences are protected by setting
+  // no_const_pool_before_, which is checked here. Also, recursive calls to
+  // CheckConstPool are blocked by no_const_pool_before_.
+  if (pc_offset() < no_const_pool_before_) {
+    // Emission is currently blocked; make sure we try again as soon as possible
+    next_buffer_check_ = no_const_pool_before_;
+
+    // Something is wrong if emission is forced and blocked at the same time
+    ASSERT(!force_emit);
+    return;
+  }
+
+  int jump_instr = require_jump ? kInstrSize : 0;
+
+  // Check that the code buffer is large enough before emitting the constant
+  // pool and relocation information (include the jump over the pool and the
+  // constant pool marker).
+  int max_needed_space =
+      jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
+  while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
+
+  // Block recursive calls to CheckConstPool
+  BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
+                       num_prinfo_*kInstrSize);
+  // Don't bother to check for the emit calls below.
+  next_buffer_check_ = no_const_pool_before_;
+
+  // Emit jump over constant pool if necessary
+  Label after_pool;
+  if (require_jump) b(&after_pool);
+
+  RecordComment("[ Constant Pool");
+
+  // Put down constant pool marker
+  // "Undefined instruction" as specified by A3.1 Instruction set encoding
+  emit(0x03000000 | num_prinfo_);
+
+  // Emit constant pool entries
+  for (int i = 0; i < num_prinfo_; i++) {
+    RelocInfo& rinfo = prinfo_[i];
+    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+           rinfo.rmode() != RelocInfo::POSITION &&
+           rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+    Instr instr = instr_at(rinfo.pc());
+
+    // Instruction to patch must be a ldr/str [pc, #offset]
+    // P and U set, B and W clear, Rn == pc, offset12 still 0
+    ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
+           (2*B25 | P | U | pc.code()*B16));
+    int delta = pc_ - rinfo.pc() - 8;
+    ASSERT(delta >= -4);  // instr could be ldr pc, [pc, #-4] followed by targ32
+    if (delta < 0) {
+      instr &= ~U;
+      delta = -delta;
+    }
+    ASSERT(is_uint12(delta));
+    instr_at_put(rinfo.pc(), instr + delta);
+    emit(rinfo.data());
+  }
+  num_prinfo_ = 0;
+  last_const_pool_end_ = pc_offset();
+
+  RecordComment("]");
+
+  if (after_pool.is_linked()) {
+    bind(&after_pool);
+  }
+
+  // Since a constant pool was just emitted, move the check offset forward by
+  // the standard interval.
+  next_buffer_check_ = pc_offset() + kCheckConstInterval;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
new file mode 100644
index 0000000..d1df08c
--- /dev/null
+++ b/src/arm/assembler-arm.h
@@ -0,0 +1,818 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+// A light-weight ARM Assembler
+// Generates user mode instructions for the ARM architecture up to version 5
+
+#ifndef V8_ARM_ASSEMBLER_ARM_H_
+#define V8_ARM_ASSEMBLER_ARM_H_
+#include <stdio.h>
+#include "assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+// Core register
+struct Register {
+  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
+  bool is(Register reg) const  { return code_ == reg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  // (unfortunately we can't make this private in a struct)
+  int code_;
+};
+
+
+extern Register no_reg;
+extern Register r0;
+extern Register r1;
+extern Register r2;
+extern Register r3;
+extern Register r4;
+extern Register r5;
+extern Register r6;
+extern Register r7;
+extern Register r8;
+extern Register r9;
+extern Register r10;
+extern Register fp;
+extern Register ip;
+extern Register sp;
+extern Register lr;
+extern Register pc;
+
+
+// Coprocessor register
+struct CRegister {
+  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
+  bool is(CRegister creg) const  { return code_ == creg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  // (unfortunately we can't make this private in a struct)
+  int code_;
+};
+
+
+extern CRegister no_creg;
+extern CRegister cr0;
+extern CRegister cr1;
+extern CRegister cr2;
+extern CRegister cr3;
+extern CRegister cr4;
+extern CRegister cr5;
+extern CRegister cr6;
+extern CRegister cr7;
+extern CRegister cr8;
+extern CRegister cr9;
+extern CRegister cr10;
+extern CRegister cr11;
+extern CRegister cr12;
+extern CRegister cr13;
+extern CRegister cr14;
+extern CRegister cr15;
+
+
+// Coprocessor number
+enum Coprocessor {
+  p0  = 0,
+  p1  = 1,
+  p2  = 2,
+  p3  = 3,
+  p4  = 4,
+  p5  = 5,
+  p6  = 6,
+  p7  = 7,
+  p8  = 8,
+  p9  = 9,
+  p10 = 10,
+  p11 = 11,
+  p12 = 12,
+  p13 = 13,
+  p14 = 14,
+  p15 = 15
+};
+
+
+// Condition field in instructions
+enum Condition {
+  eq =  0 << 28,  // Z set            equal.
+  ne =  1 << 28,  // Z clear          not equal.
+  nz =  1 << 28,  // Z clear          not zero.
+  cs =  2 << 28,  // C set            carry set.
+  hs =  2 << 28,  // C set            unsigned higher or same.
+  cc =  3 << 28,  // C clear          carry clear.
+  lo =  3 << 28,  // C clear          unsigned lower.
+  mi =  4 << 28,  // N set            negative.
+  pl =  5 << 28,  // N clear          positive or zero.
+  vs =  6 << 28,  // V set            overflow.
+  vc =  7 << 28,  // V clear          no overflow.
+  hi =  8 << 28,  // C set, Z clear   unsigned higher.
+  ls =  9 << 28,  // C clear or Z set unsigned lower or same.
+  ge = 10 << 28,  // N == V           greater or equal.
+  lt = 11 << 28,  // N != V           less than.
+  gt = 12 << 28,  // Z clear, N == V  greater than.
+  le = 13 << 28,  // Z set or N != V  less then or equal
+  al = 14 << 28   //                  always.
+};
+
+
+// Returns the equivalent of !cc.
+INLINE(Condition NegateCondition(Condition cc));
+
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+  switch (cc) {
+    case lo:
+      return hi;
+    case hi:
+      return lo;
+    case hs:
+      return ls;
+    case ls:
+      return hs;
+    case lt:
+      return gt;
+    case gt:
+      return lt;
+    case ge:
+      return le;
+    case le:
+      return ge;
+    default:
+      return cc;
+  };
+}
+
+
+// Branch hints are not used on the ARM.  They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+// Hints are not used on the arm.  Negating is trivial.
+inline Hint NegateHint(Hint ignored) { return no_hint; }
+
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants
+
+// Shifter operand shift operation
+enum ShiftOp {
+  LSL = 0 << 5,
+  LSR = 1 << 5,
+  ASR = 2 << 5,
+  ROR = 3 << 5,
+  RRX = -1
+};
+
+
+// Condition code updating mode
+enum SBit {
+  SetCC   = 1 << 20,  // set condition code
+  LeaveCC = 0 << 20   // leave condition code unchanged
+};
+
+
+// Status register selection
+enum SRegister {
+  CPSR = 0 << 22,
+  SPSR = 1 << 22
+};
+
+
+// Status register fields
+enum SRegisterField {
+  CPSR_c = CPSR | 1 << 16,
+  CPSR_x = CPSR | 1 << 17,
+  CPSR_s = CPSR | 1 << 18,
+  CPSR_f = CPSR | 1 << 19,
+  SPSR_c = SPSR | 1 << 16,
+  SPSR_x = SPSR | 1 << 17,
+  SPSR_s = SPSR | 1 << 18,
+  SPSR_f = SPSR | 1 << 19
+};
+
+// Status register field mask (or'ed SRegisterField enum values)
+typedef uint32_t SRegisterFieldMask;
+
+
+// Memory operand addressing mode
+enum AddrMode {
+  // bit encoding P U W
+  Offset       = (8|4|0) << 21,  // offset (without writeback to base)
+  PreIndex     = (8|4|1) << 21,  // pre-indexed addressing with writeback
+  PostIndex    = (0|4|0) << 21,  // post-indexed addressing with writeback
+  NegOffset    = (8|0|0) << 21,  // negative offset (without writeback to base)
+  NegPreIndex  = (8|0|1) << 21,  // negative pre-indexed with writeback
+  NegPostIndex = (0|0|0) << 21   // negative post-indexed with writeback
+};
+
+
+// Load/store multiple addressing mode
+enum BlockAddrMode {
+  // bit encoding P U W
+  da           = (0|0|0) << 21,  // decrement after
+  ia           = (0|4|0) << 21,  // increment after
+  db           = (8|0|0) << 21,  // decrement before
+  ib           = (8|4|0) << 21,  // increment before
+  da_w         = (0|0|1) << 21,  // decrement after with writeback to base
+  ia_w         = (0|4|1) << 21,  // increment after with writeback to base
+  db_w         = (8|0|1) << 21,  // decrement before with writeback to base
+  ib_w         = (8|4|1) << 21   // increment before with writeback to base
+};
+
+
+// Coprocessor load/store operand size
+enum LFlag {
+  Long  = 1 << 22,  // long load/store coprocessor
+  Short = 0 << 22   // short load/store coprocessor
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+// Class Operand represents a shifter operand in data processing instructions
+class Operand BASE_EMBEDDED {
+ public:
+  // immediate
+  INLINE(explicit Operand(int32_t immediate,
+         RelocInfo::Mode rmode = RelocInfo::NONE));
+  INLINE(explicit Operand(const ExternalReference& f));
+  INLINE(explicit Operand(const char* s));
+  INLINE(explicit Operand(Object** opp));
+  INLINE(explicit Operand(Context** cpp));
+  explicit Operand(Handle<Object> handle);
+  INLINE(explicit Operand(Smi* value));
+
+  // rm
+  INLINE(explicit Operand(Register rm));
+
+  // rm <shift_op> shift_imm
+  explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
+
+  // rm <shift_op> rs
+  explicit Operand(Register rm, ShiftOp shift_op, Register rs);
+
+  // Return true if this is a register operand.
+  INLINE(bool is_reg() const);
+
+  Register rm() const { return rm_; }
+
+ private:
+  Register rm_;
+  Register rs_;
+  ShiftOp shift_op_;
+  int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
+  int32_t imm32_;  // valid if rm_ == no_reg
+  RelocInfo::Mode rmode_;
+
+  friend class Assembler;
+};
+
+
+// Class MemOperand represents a memory operand in load and store instructions
+class MemOperand BASE_EMBEDDED {
+ public:
+  // [rn +/- offset]      Offset/NegOffset
+  // [rn +/- offset]!     PreIndex/NegPreIndex
+  // [rn], +/- offset     PostIndex/NegPostIndex
+  // offset is any signed 32-bit value; offset is first loaded to register ip if
+  // it does not fit the addressing mode (12-bit unsigned and sign bit)
+  explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
+
+  // [rn +/- rm]          Offset/NegOffset
+  // [rn +/- rm]!         PreIndex/NegPreIndex
+  // [rn], +/- rm         PostIndex/NegPostIndex
+  explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
+
+  // [rn +/- rm <shift_op> shift_imm]      Offset/NegOffset
+  // [rn +/- rm <shift_op> shift_imm]!     PreIndex/NegPreIndex
+  // [rn], +/- rm <shift_op> shift_imm     PostIndex/NegPostIndex
+  explicit MemOperand(Register rn, Register rm,
+                      ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
+
+ private:
+  Register rn_;  // base
+  Register rm_;  // register offset
+  int32_t offset_;  // valid if rm_ == no_reg
+  ShiftOp shift_op_;
+  int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
+  AddrMode am_;  // bits P, U, and W
+
+  friend class Assembler;
+};
+
+
+typedef int32_t Instr;
+
+
+extern const Instr kMovLrPc;
+extern const Instr kLdrPCPattern;
+
+
+class Assembler : public Malloced {
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  Assembler(void* buffer, int buffer_size);
+  ~Assembler();
+
+  // GetCode emits any pending (non-emitted) code and fills the descriptor
+  // desc. GetCode() is idempotent; it returns the same result if no other
+  // Assembler functions are invoked in between GetCode() calls.
+  void GetCode(CodeDesc* desc);
+
+  // Label operations & relative jumps (PPUM Appendix D)
+  //
+  // Takes a branch opcode (cc) and a label (L) and generates
+  // either a backward branch or a forward branch and links it
+  // to the label fixup chain. Usage:
+  //
+  // Label L;    // unbound label
+  // j(cc, &L);  // forward branch to unbound label
+  // bind(&L);   // bind label to the current pc
+  // j(cc, &L);  // backward branch to bound label
+  // bind(&L);   // illegal: a label may be bound only once
+  //
+  // Note: The same Label can be used for forward and backward branches
+  // but it may be bound only once.
+
+  void bind(Label* L);  // binds an unbound label L to the current code position
+
+  // Returns the branch offset to the given label from the current code position
+  // Links the label to the current position if it is still unbound
+  // Manages the jump elimination optimization if the second parameter is true.
+  int branch_offset(Label* L, bool jump_elimination_allowed);
+
+  // Puts a labels target address at the given position.
+  // The high 8 bits are set to zero.
+  void label_at_put(Label* L, int at_offset);
+
+  // Return the address in the constant pool of the code target address used by
+  // the branch/call instruction at pc.
+  INLINE(static Address target_address_address_at(Address pc));
+
+  // Read/Modify the code target address in the branch/call instruction at pc.
+  INLINE(static Address target_address_at(Address pc));
+  INLINE(static void set_target_address_at(Address pc, Address target));
+
+  // Size of an instruction.
+  static const int kInstrSize = sizeof(Instr);
+
+  // Distance between the instruction referring to the address of the call
+  // target (ldr pc, [target addr in const pool]) and the return address
+  static const int kCallTargetAddressOffset = kInstrSize;
+
+  // Distance between start of patched return sequence and the emitted address
+  // to jump to.
+  static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+
+  // Difference between address of current opcode and value read from pc
+  // register.
+  static const int kPcLoadDelta = 8;
+
+
+  // ---------------------------------------------------------------------------
+  // Code generation
+
+  // Insert the smallest number of nop instructions
+  // possible to align the pc offset to a multiple
+  // of m. m must be a power of 2 (>= 4).
+  void Align(int m);
+
+  // Branch instructions
+  void b(int branch_offset, Condition cond = al);
+  void bl(int branch_offset, Condition cond = al);
+  void blx(int branch_offset);  // v5 and above
+  void blx(Register target, Condition cond = al);  // v5 and above
+  void bx(Register target, Condition cond = al);  // v5 and above, plus v4t
+
+  // Convenience branch instructions using labels
+  void b(Label* L, Condition cond = al)  {
+    b(branch_offset(L, cond == al), cond);
+  }
+  void b(Condition cond, Label* L)  { b(branch_offset(L, cond == al), cond); }
+  void bl(Label* L, Condition cond = al)  { bl(branch_offset(L, false), cond); }
+  void bl(Condition cond, Label* L)  { bl(branch_offset(L, false), cond); }
+  void blx(Label* L)  { blx(branch_offset(L, false)); }  // v5 and above
+
+  // Data-processing instructions
+  void and_(Register dst, Register src1, const Operand& src2,
+            SBit s = LeaveCC, Condition cond = al);
+
+  void eor(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void sub(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+  void sub(Register dst, Register src1, Register src2,
+           SBit s = LeaveCC, Condition cond = al) {
+    sub(dst, src1, Operand(src2), s, cond);
+  }
+
+  void rsb(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void add(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void adc(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void sbc(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void rsc(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void tst(Register src1, const Operand& src2, Condition cond = al);
+  void tst(Register src1, Register src2, Condition cond = al) {
+    tst(src1, Operand(src2), cond);
+  }
+
+  void teq(Register src1, const Operand& src2, Condition cond = al);
+
+  void cmp(Register src1, const Operand& src2, Condition cond = al);
+  void cmp(Register src1, Register src2, Condition cond = al) {
+    cmp(src1, Operand(src2), cond);
+  }
+
+  void cmn(Register src1, const Operand& src2, Condition cond = al);
+
+  void orr(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+  void orr(Register dst, Register src1, Register src2,
+           SBit s = LeaveCC, Condition cond = al) {
+    orr(dst, src1, Operand(src2), s, cond);
+  }
+
+  void mov(Register dst, const Operand& src,
+           SBit s = LeaveCC, Condition cond = al);
+  void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
+    mov(dst, Operand(src), s, cond);
+  }
+
+  void bic(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void mvn(Register dst, const Operand& src,
+           SBit s = LeaveCC, Condition cond = al);
+
+  // Multiply instructions
+
+  void mla(Register dst, Register src1, Register src2, Register srcA,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void mul(Register dst, Register src1, Register src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void smlal(Register dstL, Register dstH, Register src1, Register src2,
+             SBit s = LeaveCC, Condition cond = al);
+
+  void smull(Register dstL, Register dstH, Register src1, Register src2,
+             SBit s = LeaveCC, Condition cond = al);
+
+  void umlal(Register dstL, Register dstH, Register src1, Register src2,
+             SBit s = LeaveCC, Condition cond = al);
+
+  void umull(Register dstL, Register dstH, Register src1, Register src2,
+             SBit s = LeaveCC, Condition cond = al);
+
+  // Miscellaneous arithmetic instructions
+
+  void clz(Register dst, Register src, Condition cond = al);  // v5 and above
+
+  // Status register access instructions
+
+  void mrs(Register dst, SRegister s, Condition cond = al);
+  void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
+
+  // Load/Store instructions
+  void ldr(Register dst, const MemOperand& src, Condition cond = al);
+  void str(Register src, const MemOperand& dst, Condition cond = al);
+  void ldrb(Register dst, const MemOperand& src, Condition cond = al);
+  void strb(Register src, const MemOperand& dst, Condition cond = al);
+  void ldrh(Register dst, const MemOperand& src, Condition cond = al);
+  void strh(Register src, const MemOperand& dst, Condition cond = al);
+  void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
+  void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
+
+  // Load/Store multiple instructions
+  void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
+  void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
+
+  // Semaphore instructions
+  void swp(Register dst, Register src, Register base, Condition cond = al);
+  void swpb(Register dst, Register src, Register base, Condition cond = al);
+
+  // Exception-generating instructions and debugging support
+  void stop(const char* msg);
+
+  void bkpt(uint32_t imm16);  // v5 and above
+  void swi(uint32_t imm24, Condition cond = al);
+
+  // Coprocessor instructions
+
+  void cdp(Coprocessor coproc, int opcode_1,
+           CRegister crd, CRegister crn, CRegister crm,
+           int opcode_2, Condition cond = al);
+
+  void cdp2(Coprocessor coproc, int opcode_1,
+            CRegister crd, CRegister crn, CRegister crm,
+            int opcode_2);  // v5 and above
+
+  void mcr(Coprocessor coproc, int opcode_1,
+           Register rd, CRegister crn, CRegister crm,
+           int opcode_2 = 0, Condition cond = al);
+
+  void mcr2(Coprocessor coproc, int opcode_1,
+            Register rd, CRegister crn, CRegister crm,
+            int opcode_2 = 0);  // v5 and above
+
+  void mrc(Coprocessor coproc, int opcode_1,
+           Register rd, CRegister crn, CRegister crm,
+           int opcode_2 = 0, Condition cond = al);
+
+  void mrc2(Coprocessor coproc, int opcode_1,
+            Register rd, CRegister crn, CRegister crm,
+            int opcode_2 = 0);  // v5 and above
+
+  void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
+           LFlag l = Short, Condition cond = al);
+  void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
+           LFlag l = Short, Condition cond = al);
+
+  void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
+            LFlag l = Short);  // v5 and above
+  void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
+            LFlag l = Short);  // v5 and above
+
+  void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+           LFlag l = Short, Condition cond = al);
+  void stc(Coprocessor coproc, CRegister crd, Register base, int option,
+           LFlag l = Short, Condition cond = al);
+
+  void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+            LFlag l = Short);  // v5 and above
+  void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
+            LFlag l = Short);  // v5 and above
+
+  // Pseudo instructions
+  void nop()  { mov(r0, Operand(r0)); }
+
+  void push(Register src, Condition cond = al) {
+    str(src, MemOperand(sp, 4, NegPreIndex), cond);
+  }
+
+  void pop(Register dst, Condition cond = al) {
+    ldr(dst, MemOperand(sp, 4, PostIndex), cond);
+  }
+
+  void pop() {
+    add(sp, sp, Operand(kPointerSize));
+  }
+
+  // Load effective address of memory operand x into register dst
+  void lea(Register dst, const MemOperand& x,
+           SBit s = LeaveCC, Condition cond = al);
+
+  // Jump unconditionally to given label.
+  void jmp(Label* L) { b(L, al); }
+
+  // Check the code size generated from label to here.
+  int InstructionsGeneratedSince(Label* l) {
+    return (pc_offset() - l->pos()) / kInstrSize;
+  }
+
+  // Debugging
+
+  // Mark address of the ExitJSFrame code.
+  void RecordJSReturn();
+
+  // Record a comment relocation entry that can be used by a disassembler.
+  // Use --debug_code to enable.
+  void RecordComment(const char* msg);
+
+  void RecordPosition(int pos);
+  void RecordStatementPosition(int pos);
+  void WriteRecordedPositions();
+
+  int pc_offset() const { return pc_ - buffer_; }
+  int current_position() const { return current_position_; }
+  int current_statement_position() const { return current_position_; }
+
+ protected:
+  int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Read/patch instructions
+  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+  void instr_at_put(byte* pc, Instr instr) {
+    *reinterpret_cast<Instr*>(pc) = instr;
+  }
+  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+  void instr_at_put(int pos, Instr instr) {
+    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+  }
+
+  // Decode branch instruction at pos and return branch target pos
+  int target_at(int pos);
+
+  // Patch branch instruction at pos to branch to given branch target pos
+  void target_at_put(int pos, int target_pos);
+
+  // Check if is time to emit a constant pool for pending reloc info entries
+  void CheckConstPool(bool force_emit, bool require_jump);
+
+  // Block the emission of the constant pool before pc_offset
+  void BlockConstPoolBefore(int pc_offset) {
+    if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
+  }
+
+ private:
+  // Code buffer:
+  // The buffer into which code and relocation info are generated.
+  byte* buffer_;
+  int buffer_size_;
+  // True if the assembler owns the buffer, false if buffer is external.
+  bool own_buffer_;
+
+  // Buffer size and constant pool distance are checked together at regular
+  // intervals of kBufferCheckInterval emitted bytes
+  static const int kBufferCheckInterval = 1*KB/2;
+  int next_buffer_check_;  // pc offset of next buffer check
+
+  // Code generation
+  // The relocation writer's position is at least kGap bytes below the end of
+  // the generated instructions. This is so that multi-instruction sequences do
+  // not have to check for overflow. The same is true for writes of large
+  // relocation info entries.
+  static const int kGap = 32;
+  byte* pc_;  // the program counter; moves forward
+
+  // Constant pool generation
+  // Pools are emitted in the instruction stream, preferably after unconditional
+  // jumps or after returns from functions (in dead code locations).
+  // If a long code sequence does not contain unconditional jumps, it is
+  // necessary to emit the constant pool before the pool gets too far from the
+  // location it is accessed from. In this case, we emit a jump over the emitted
+  // constant pool.
+  // Constants in the pool may be addresses of functions that gets relocated;
+  // if so, a relocation info entry is associated to the constant pool entry.
+
+  // Repeated checking whether the constant pool should be emitted is rather
+  // expensive. By default we only check again once a number of instructions
+  // has been generated. That also means that the sizing of the buffers is not
+  // an exact science, and that we rely on some slop to not overrun buffers.
+  static const int kCheckConstIntervalInst = 32;
+  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+
+  // Pools are emitted after function return and in dead code at (more or less)
+  // regular intervals of kDistBetweenPools bytes
+  static const int kDistBetweenPools = 1*KB;
+
+  // Constants in pools are accessed via pc relative addressing, which can
+  // reach +/-4KB thereby defining a maximum distance between the instruction
+  // and the accessed constant. We satisfy this constraint by limiting the
+  // distance between pools.
+  static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
+
+  // Emission of the constant pool may be blocked in some code sequences
+  int no_const_pool_before_;  // block emission before this pc offset
+
+  // Keep track of the last emitted pool to guarantee a maximal distance
+  int last_const_pool_end_;  // pc offset following the last constant pool
+
+  // Relocation info generation
+  // Each relocation is encoded as a variable size value
+  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+  RelocInfoWriter reloc_info_writer;
+  // Relocation info records are also used during code generation as temporary
+  // containers for constants and code target addresses until they are emitted
+  // to the constant pool. These pending relocation info records are temporarily
+  // stored in a separate buffer until a constant pool is emitted.
+  // If every instruction in a long sequence is accessing the pool, we need one
+  // pending relocation entry per instruction.
+  static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
+  RelocInfo prinfo_[kMaxNumPRInfo];  // the buffer of pending relocation info
+  int num_prinfo_;  // number of pending reloc info entries in the buffer
+
+  // The bound position, before this we cannot do instruction elimination.
+  int last_bound_pos_;
+
+  // source position information
+  int current_position_;
+  int current_statement_position_;
+  int written_position_;
+  int written_statement_position_;
+
+  // Code emission
+  inline void CheckBuffer();
+  void GrowBuffer();
+  inline void emit(Instr x);
+
+  // Instruction generation
+  void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
+  void addrmod2(Instr instr, Register rd, const MemOperand& x);
+  void addrmod3(Instr instr, Register rd, const MemOperand& x);
+  void addrmod4(Instr instr, Register rn, RegList rl);
+  void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
+
+  // Labels
+  void print(Label* L);
+  void bind_to(Label* L, int pos);
+  void link_to(Label* L, Label* appendix);
+  void next(Label* L);
+
+  // Record reloc info for current pc_
+  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+  friend class RegExpMacroAssemblerARM;
+  friend class RelocInfo;
+  friend class CodePatcher;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_ASSEMBLER_ARM_H_
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
new file mode 100644
index 0000000..d7afb37
--- /dev/null
+++ b/src/arm/builtins-arm.cc
@@ -0,0 +1,1287 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
+  // TODO(428): Don't pass the function in a static variable.
+  __ mov(ip, Operand(ExternalReference::builtin_passed_function()));
+  __ str(r1, MemOperand(ip, 0));
+
+  // The actual argument count has already been loaded into register
+  // r0, but JumpToRuntime expects r0 to contain the number of
+  // arguments including the receiver.
+  __ add(r0, r0, Operand(1));
+  __ JumpToRuntime(ExternalReference(id));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the global context.
+
+  __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ ldr(result,
+         FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the Array function from the global context.
+  __ ldr(result,
+         MemOperand(result,
+                    Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+                                 Register array_function,
+                                 Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register scratch3,
+                                 int initial_capacity,
+                                 Label* gc_required) {
+  ASSERT(initial_capacity > 0);
+  // Load the initial map from the array function.
+  __ ldr(scratch1, FieldMemOperand(array_function,
+                                   JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Allocate the JSArray object together with space for a fixed array with the
+  // requested elements.
+  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+  __ AllocateInNewSpace(size / kPointerSize,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // scratch1: initial map
+  // scratch2: start of next object
+  __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+  __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ mov(scratch3,  Operand(0));
+  __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // scratch2: start of next object
+  __ lea(scratch1, MemOperand(result, JSArray::kSize));
+  __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+  // Initialize the FixedArray and fill it with holes. FixedArray length is not
+  // stored as a smi.
+  // result: JSObject
+  // scratch1: elements array (untagged)
+  // scratch2: start of next object
+  __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+  __ mov(scratch3,  Operand(initial_capacity));
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+
+  // Fill the FixedArray with the hole value.
+  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+  ASSERT(initial_capacity <= kLoopUnfoldLimit);
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < initial_capacity; i++) {
+    __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+  }
+}
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see  below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+                            Register array_function,  // Array function.
+                            Register array_size,  // As a smi.
+                            Register result,
+                            Register elements_array_storage,
+                            Register elements_array_end,
+                            Register scratch1,
+                            Register scratch2,
+                            bool fill_with_hole,
+                            Label* gc_required) {
+  Label not_empty, allocated;
+
+  // Load the initial map from the array function.
+  __ ldr(elements_array_storage,
+         FieldMemOperand(array_function,
+                         JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ tst(array_size, array_size);
+  __ b(nz, &not_empty);
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize +
+             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size / kPointerSize,
+                        result,
+                        elements_array_end,
+                        scratch1,
+                        gc_required,
+                        TAG_OBJECT);
+  __ jmp(&allocated);
+
+  // Allocate the JSArray object together with space for a FixedArray with the
+  // requested number of elements.
+  __ bind(&not_empty);
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ mov(elements_array_end,
+         Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
+  __ add(elements_array_end,
+         elements_array_end,
+         Operand(array_size, ASR, kSmiTagSize));
+  __ AllocateInNewSpace(elements_array_end,
+                        result,
+                        scratch1,
+                        scratch2,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // elements_array_storage: initial map
+  // array_size: size of array (smi)
+  __ bind(&allocated);
+  __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+  __ str(elements_array_storage,
+         FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // array_size: size of array (smi)
+  __ add(elements_array_storage, result, Operand(JSArray::kSize));
+  __ str(elements_array_storage,
+         FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ and_(elements_array_storage,
+          elements_array_storage,
+          Operand(~kHeapObjectTagMask));
+  // Initialize the fixed array and fill it with holes. FixedArray length is not
+  // stored as a smi.
+  // result: JSObject
+  // elements_array_storage: elements array (untagged)
+  // array_size: size of array (smi)
+  ASSERT(kSmiTag == 0);
+  __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
+  // Convert array_size from smi to value.
+  __ mov(array_size,
+         Operand(array_size, ASR, kSmiTagSize));
+  __ tst(array_size, array_size);
+  // Length of the FixedArray is the number of pre-allocated elements if
+  // the actual JSArray has length 0 and the size of the JSArray for non-empty
+  // JSArrays. The length of a FixedArray is not stored as a smi.
+  __ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ str(array_size,
+         MemOperand(elements_array_storage, kPointerSize, PostIndex));
+
+  // Calculate elements array and elements array end.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // array_size: size of elements array
+  __ add(elements_array_end,
+         elements_array_storage,
+         Operand(array_size, LSL, kPointerSizeLog2));
+
+  // Fill the allocated FixedArray with the hole value if requested.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // elements_array_end: start of next object
+  if (fill_with_hole) {
+    Label loop, entry;
+    __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ str(scratch1,
+           MemOperand(elements_array_storage, kPointerSize, PostIndex));
+    __ bind(&entry);
+    __ cmp(elements_array_storage, elements_array_end);
+    __ b(lt, &loop);
+  }
+}
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+//   r0: argc
+//   r1: constructor (built-in Array function)
+//   lr: return address
+//   sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in r1 needs to be preserved for
+// entering the generic code. In both cases argc in r0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+                            Label *call_generic_code) {
+  Label argc_one_or_more, argc_two_or_more;
+
+  // Check for array construction with zero arguments or one.
+  __ cmp(r0, Operand(0));
+  __ b(ne, &argc_one_or_more);
+
+  // Handle construction of an empty array.
+  AllocateEmptyJSArray(masm,
+                       r1,
+                       r2,
+                       r3,
+                       r4,
+                       r5,
+                       JSArray::kPreallocatedArrayElements,
+                       call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
+  // Setup return value, remove receiver from stack and return.
+  __ mov(r0, r2);
+  __ add(sp, sp, Operand(kPointerSize));
+  __ Jump(lr);
+
+  // Check for one argument. Bail out if argument is not smi or if it is
+  // negative.
+  __ bind(&argc_one_or_more);
+  __ cmp(r0, Operand(1));
+  __ b(ne, &argc_two_or_more);
+  ASSERT(kSmiTag == 0);
+  __ ldr(r2, MemOperand(sp));  // Get the argument from the stack.
+  __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
+  __ b(ne, call_generic_code);
+
+  // Handle construction of an empty array of a certain size. Bail out if size
+  // is too large to actually allocate an elements array.
+  ASSERT(kSmiTag == 0);
+  __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+  __ b(ge, call_generic_code);
+
+  // r0: argc
+  // r1: constructor
+  // r2: array_size (smi)
+  // sp[0]: argument
+  AllocateJSArray(masm,
+                  r1,
+                  r2,
+                  r3,
+                  r4,
+                  r5,
+                  r6,
+                  r7,
+                  true,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
+  // Setup return value, remove receiver and argument from stack and return.
+  __ mov(r0, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Jump(lr);
+
+  // Handle construction of an array from a list of arguments.
+  __ bind(&argc_two_or_more);
+  __ mov(r2, Operand(r0, LSL, kSmiTagSize));  // Convet argc to a smi.
+
+  // r0: argc
+  // r1: constructor
+  // r2: array_size (smi)
+  // sp[0]: last argument
+  AllocateJSArray(masm,
+                  r1,
+                  r2,
+                  r3,
+                  r4,
+                  r5,
+                  r6,
+                  r7,
+                  false,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
+
+  // Fill arguments as array elements. Copy from the top of the stack (last
+  // element) to the array backing store filling it backwards. Note:
+  // elements_array_end points after the backing store therefore PreIndex is
+  // used when filling the backing store.
+  // r0: argc
+  // r3: JSArray
+  // r4: elements_array storage start (untagged)
+  // r5: elements_array_end (untagged)
+  // sp[0]: last argument
+  Label loop, entry;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
+  __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
+  __ bind(&entry);
+  __ cmp(r4, r5);
+  __ b(lt, &loop);
+
+  // Remove caller arguments and receiver from the stack, setup return value and
+  // return.
+  // r0: argc
+  // r3: JSArray
+  // sp[0]: receiver
+  __ add(sp, sp, Operand(kPointerSize));
+  __ mov(r0, r3);
+  __ Jump(lr);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, r1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array function shoud be a map.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ tst(r2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function");
+    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+    __ Assert(eq, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code if the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
+  Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+  Handle<Code> array_code(code);
+  __ Jump(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments
+  //  -- r1     : constructor function
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_constructor;
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the builtin Array function which
+    // always have a map.
+    GenerateLoadArrayFunction(masm, r2);
+    __ cmp(r1, r2);
+    __ Assert(eq, "Unexpected Array function");
+    // Initial map for the builtin Array function should be a map.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ tst(r2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function");
+    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+    __ Assert(eq, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as a constructor.
+  ArrayNativeCode(masm, &generic_constructor);
+
+  // Jump to the generic construct code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_constructor);
+  Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+  Handle<Code> generic_construct_stub(code);
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments
+  //  -- r1     : constructor function
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  Label non_function_call;
+  // Check that the function is not a smi.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &non_function_call);
+  // Check that the function is a JSFunction.
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+  __ b(ne, &non_function_call);
+
+  // Jump to the function-specific construct stub.
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
+  __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // r0: number of arguments
+  // r1: called object
+  __ bind(&non_function_call);
+
+  // Set expected number of arguments to zero (not changing r0).
+  __ mov(r2, Operand(0));
+  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+          RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  // Enter a construct frame.
+  __ EnterConstructFrame();
+
+  // Preserve the two incoming parameters on the stack.
+  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+  __ push(r0);  // Smi-tagged arguments count.
+  __ push(r1);  // Constructor function.
+
+  // Use r7 for holding undefined which is used in several places below.
+  __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+
+  // Try to allocate the object without transitioning into C code. If any of the
+  // preconditions is not met, the code bails out to the runtime call.
+  Label rt_call, allocated;
+  if (FLAG_inline_new) {
+    Label undo_allocation;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    ExternalReference debug_step_in_fp =
+        ExternalReference::debug_step_in_fp_address();
+    __ mov(r2, Operand(debug_step_in_fp));
+    __ ldr(r2, MemOperand(r2));
+    __ tst(r2, r2);
+    __ b(nz, &rt_call);
+#endif
+
+    // Load the initial map and verify that it is in fact a map.
+    // r1: constructor function
+    // r7: undefined
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ tst(r2, Operand(kSmiTagMask));
+    __ b(eq, &rt_call);
+    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+    __ b(ne, &rt_call);
+
+    // Check that the constructor is not constructing a JSFunction (see comments
+    // in Runtime_NewObject in runtime.cc). In which case the initial map's
+    // instance type would be JS_FUNCTION_TYPE.
+    // r1: constructor function
+    // r2: initial map
+    // r7: undefined
+    __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+    __ b(eq, &rt_call);
+
+    // Now allocate the JSObject on the heap.
+    // r1: constructor function
+    // r2: initial map
+    // r7: undefined
+    __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+    __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
+
+    // Allocated the JSObject, now initialize the fields. Map is set to initial
+    // map and properties and elements are set to empty fixed array.
+    // r1: constructor function
+    // r2: initial map
+    // r3: object size
+    // r4: JSObject (not tagged)
+    // r7: undefined
+    __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+    __ mov(r5, r4);
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+    __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+    __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+
+    // Fill all the in-object properties with undefined.
+    // r1: constructor function
+    // r2: initial map
+    // r3: object size (in words)
+    // r4: JSObject (not tagged)
+    // r5: First in-object property of JSObject (not tagged)
+    // r7: undefined
+    __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
+    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+    { Label loop, entry;
+      __ b(&entry);
+      __ bind(&loop);
+      __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
+      __ bind(&entry);
+      __ cmp(r5, Operand(r6));
+      __ b(lt, &loop);
+    }
+
+    // Add the object tag to make the JSObject real, so that we can continue and
+    // jump into the continuation code at any time from now on. Any failures
+    // need to undo the allocation, so that the heap is in a consistent state
+    // and verifiable.
+    __ add(r4, r4, Operand(kHeapObjectTag));
+
+    // Check if a non-empty properties array is needed. Continue with allocated
+    // object if not fall through to runtime call if it is.
+    // r1: constructor function
+    // r4: JSObject
+    // r5: start of next object (not tagged)
+    // r7: undefined
+    __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
+    // The field instance sizes contains both pre-allocated property fields and
+    // in-object properties.
+    __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+    __ and_(r6,
+            r0,
+            Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
+    __ add(r3, r3, Operand(r6, LSR, Map::kPreAllocatedPropertyFieldsByte * 8));
+    __ and_(r6, r0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
+    __ sub(r3, r3, Operand(r6, LSR, Map::kInObjectPropertiesByte * 8), SetCC);
+
+    // Done if no extra properties are to be allocated.
+    __ b(eq, &allocated);
+    __ Assert(pl, "Property allocation count failed.");
+
+    // Scale the number of elements by pointer size and add the header for
+    // FixedArrays to the start of the next object calculation from above.
+    // r1: constructor
+    // r3: number of elements in properties array
+    // r4: JSObject
+    // r5: start of next object
+    // r7: undefined
+    __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
+    __ AllocateInNewSpace(r0,
+                          r5,
+                          r6,
+                          r2,
+                          &undo_allocation,
+                          RESULT_CONTAINS_TOP);
+
+    // Initialize the FixedArray.
+    // r1: constructor
+    // r3: number of elements in properties array
+    // r4: JSObject
+    // r5: FixedArray (not tagged)
+    // r7: undefined
+    __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
+    __ mov(r2, r5);
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
+    ASSERT_EQ(1 * kPointerSize, Array::kLengthOffset);
+    __ str(r3, MemOperand(r2, kPointerSize, PostIndex));
+
+    // Initialize the fields to undefined.
+    // r1: constructor function
+    // r2: First element of FixedArray (not tagged)
+    // r3: number of elements in properties array
+    // r4: JSObject
+    // r5: FixedArray (not tagged)
+    // r7: undefined
+    __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
+    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+    { Label loop, entry;
+      __ b(&entry);
+      __ bind(&loop);
+      __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+      __ bind(&entry);
+      __ cmp(r2, Operand(r6));
+      __ b(lt, &loop);
+    }
+
+    // Store the initialized FixedArray into the properties field of
+    // the JSObject
+    // r1: constructor function
+    // r4: JSObject
+    // r5: FixedArray (not tagged)
+    __ add(r5, r5, Operand(kHeapObjectTag));  // Add the heap tag.
+    __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+
+    // Continue with JSObject being successfully allocated
+    // r1: constructor function
+    // r4: JSObject
+    __ jmp(&allocated);
+
+    // Undo the setting of the new top so that the heap is verifiable. For
+    // example, the map's unused properties potentially do not match the
+    // allocated objects unused properties.
+    // r4: JSObject (previous new top)
+    __ bind(&undo_allocation);
+    __ UndoAllocationInNewSpace(r4, r5);
+  }
+
+  // Allocate the new receiver object using the runtime call.
+  // r1: constructor function
+  __ bind(&rt_call);
+  __ push(r1);  // argument for Runtime_NewObject
+  __ CallRuntime(Runtime::kNewObject, 1);
+  __ mov(r4, r0);
+
+  // Receiver for constructor call allocated.
+  // r4: JSObject
+  __ bind(&allocated);
+  __ push(r4);
+
+  // Push the function and the allocated receiver from the stack.
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ ldr(r1, MemOperand(sp, kPointerSize));
+  __ push(r1);  // Constructor function.
+  __ push(r4);  // Receiver.
+
+  // Reload the number of arguments from the stack.
+  // r1: constructor function
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
+
+  // Setup pointer to last argument.
+  __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // Setup number of arguments for function call below
+  __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+
+  // Copy arguments and receiver to the expression stack.
+  // r0: number of arguments
+  // r2: address of last argument (caller sp)
+  // r1: constructor function
+  // r3: number of arguments (smi-tagged)
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  Label loop, entry;
+  __ b(&entry);
+  __ bind(&loop);
+  __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+  __ push(ip);
+  __ bind(&entry);
+  __ sub(r3, r3, Operand(2), SetCC);
+  __ b(ge, &loop);
+
+  // Call the function.
+  // r0: number of arguments
+  // r1: constructor function
+  ParameterCount actual(r0);
+  __ InvokeFunction(r1, actual, CALL_FUNCTION);
+
+  // Pop the function from the stack.
+  // sp[0]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ pop();
+
+  // Restore context from the frame.
+  // r0: result
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // If the result is an object (in the ECMA sense), we should get rid
+  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+  // on page 74.
+  Label use_receiver, exit;
+
+  // If the result is a smi, it is *not* an object in the ECMA sense.
+  // r0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &use_receiver);
+
+  // If the type of the result (stored in its map) is less than
+  // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE);
+  __ b(ge, &exit);
+
+  // Throw away the result of the constructor invocation and use the
+  // on-stack receiver as the result.
+  __ bind(&use_receiver);
+  __ ldr(r0, MemOperand(sp));
+
+  // Remove receiver from the stack, remove caller arguments, and
+  // return.
+  __ bind(&exit);
+  // r0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+  __ LeaveConstructFrame();
+  __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
+  __ add(sp, sp, Operand(kPointerSize));
+  __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2);
+  __ Jump(lr);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from Generate_JS_Entry
+  // r0: code entry
+  // r1: function
+  // r2: receiver
+  // r3: argc
+  // r4: argv
+  // r5-r7, cp may be clobbered
+
+  // Clear the context before we push it when entering the JS frame.
+  __ mov(cp, Operand(0));
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Set up the context from the function argument.
+  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+  // Set up the roots register.
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ mov(r10, Operand(roots_address));
+
+  // Push the function and the receiver onto the stack.
+  __ push(r1);
+  __ push(r2);
+
+  // Copy arguments to the stack in a loop.
+  // r1: function
+  // r3: argc
+  // r4: argv, i.e. points to first arg
+  Label loop, entry;
+  __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+  // r2 points past last arg.
+  __ b(&entry);
+  __ bind(&loop);
+  __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex));  // read next parameter
+  __ ldr(r0, MemOperand(r0));  // dereference handle
+  __ push(r0);  // push parameter
+  __ bind(&entry);
+  __ cmp(r4, Operand(r2));
+  __ b(ne, &loop);
+
+  // Initialize all JavaScript callee-saved registers, since they will be seen
+  // by the garbage collector as part of handlers.
+  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+  __ mov(r5, Operand(r4));
+  __ mov(r6, Operand(r4));
+  __ mov(r7, Operand(r4));
+  if (kR9Available == 1) {
+    __ mov(r9, Operand(r4));
+  }
+
+  // Invoke the code and pass argc as r0.
+  __ mov(r0, Operand(r3));
+  if (is_construct) {
+    __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+            RelocInfo::CODE_TARGET);
+  } else {
+    ParameterCount actual(r0);
+    __ InvokeFunction(r1, actual, CALL_FUNCTION);
+  }
+
+  // Exit the JS frame and remove the parameters (except function), and return.
+  // Respect ABI stack constraint.
+  __ LeaveInternalFrame();
+  __ Jump(lr);
+
+  // r0: result
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+  // 1. Make sure we have at least one argument.
+  // r0: actual number of argument
+  { Label done;
+    __ tst(r0, Operand(r0));
+    __ b(ne, &done);
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ push(r2);
+    __ add(r0, r0, Operand(1));
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call from the stack.
+  // r0: actual number of argument
+  { Label done, non_function, function;
+    __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+    __ tst(r1, Operand(kSmiTagMask));
+    __ b(eq, &non_function);
+    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+    __ b(eq, &function);
+
+    // Non-function called: Clear the function to force exception.
+    __ bind(&non_function);
+    __ mov(r1, Operand(0));
+    __ b(&done);
+
+    // Change the context eagerly because it will be used below to get the
+    // right global object.
+    __ bind(&function);
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+    __ bind(&done);
+  }
+
+  // 3. Make sure first argument is an object; convert if necessary.
+  // r0: actual number of arguments
+  // r1: function
+  { Label call_to_object, use_global_receiver, patch_receiver, done;
+    __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
+    __ ldr(r2, MemOperand(r2, -kPointerSize));
+
+    // r0: actual number of arguments
+    // r1: function
+    // r2: first argument
+    __ tst(r2, Operand(kSmiTagMask));
+    __ b(eq, &call_to_object);
+
+    __ LoadRoot(r3, Heap::kNullValueRootIndex);
+    __ cmp(r2, r3);
+    __ b(eq, &use_global_receiver);
+    __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+    __ cmp(r2, r3);
+    __ b(eq, &use_global_receiver);
+
+    __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
+    __ b(lt, &call_to_object);
+    __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
+    __ b(le, &done);
+
+    __ bind(&call_to_object);
+    __ EnterInternalFrame();
+
+    // Store number of arguments and function across the call into the runtime.
+    __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+    __ push(r0);
+    __ push(r1);
+
+    __ push(r2);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+    __ mov(r2, r0);
+
+    // Restore number of arguments and function.
+    __ pop(r1);
+    __ pop(r0);
+    __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+
+    __ LeaveInternalFrame();
+    __ b(&patch_receiver);
+
+    // Use the global receiver object from the called function as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalIndex =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
+    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+
+    __ bind(&patch_receiver);
+    __ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
+    __ str(r2, MemOperand(r3, -kPointerSize));
+
+    __ bind(&done);
+  }
+
+  // 4. Shift stuff one slot down the stack
+  // r0: actual number of arguments (including call() receiver)
+  // r1: function
+  { Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
+    __ add(r2, r2, Operand(kPointerSize));  // copy receiver too
+
+    __ bind(&loop);
+    __ ldr(ip, MemOperand(r2, -kPointerSize));
+    __ str(ip, MemOperand(r2));
+    __ sub(r2, r2, Operand(kPointerSize));
+    __ cmp(r2, sp);
+    __ b(ne, &loop);
+  }
+
+  // 5. Adjust the actual number of arguments and remove the top element.
+  // r0: actual number of arguments (including call() receiver)
+  // r1: function
+  __ sub(r0, r0, Operand(1));
+  __ add(sp, sp, Operand(kPointerSize));
+
+  // 6. Get the code for the function or the non-function builtin.
+  //    If number of expected arguments matches, then call. Otherwise restart
+  //    the arguments adaptor stub.
+  // r0: actual number of arguments
+  // r1: function
+  { Label invoke;
+    __ tst(r1, r1);
+    __ b(ne, &invoke);
+    __ mov(r2, Operand(0));  // expected arguments is 0 for CALL_NON_FUNCTION
+    __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+    __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+                         RelocInfo::CODE_TARGET);
+
+    __ bind(&invoke);
+    __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r2,
+           FieldMemOperand(r3,
+                           SharedFunctionInfo::kFormalParameterCountOffset));
+    __ ldr(r3,
+           MemOperand(r3, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+    __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ cmp(r2, r0);  // Check formal and actual parameter counts.
+    __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+                         RelocInfo::CODE_TARGET, ne);
+
+    // 7. Jump to the code in r3 without checking arguments.
+    ParameterCount expected(0);
+    __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
+  }
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+  const int kIndexOffset    = -5 * kPointerSize;
+  const int kLimitOffset    = -4 * kPointerSize;
+  const int kArgsOffset     =  2 * kPointerSize;
+  const int kRecvOffset     =  3 * kPointerSize;
+  const int kFunctionOffset =  4 * kPointerSize;
+
+  __ EnterInternalFrame();
+
+  __ ldr(r0, MemOperand(fp, kFunctionOffset));  // get the function
+  __ push(r0);
+  __ ldr(r0, MemOperand(fp, kArgsOffset));  // get the args array
+  __ push(r0);
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
+
+  Label no_preemption, retry_preemption;
+  __ bind(&retry_preemption);
+  ExternalReference stack_guard_limit_address =
+      ExternalReference::address_of_stack_guard_limit();
+  __ mov(r2, Operand(stack_guard_limit_address));
+  __ ldr(r2, MemOperand(r2));
+  __ cmp(sp, r2);
+  __ b(hi, &no_preemption);
+
+  // We have encountered a preemption or stack overflow already before we push
+  // the array contents.  Save r0 which is the Smi-tagged length of the array.
+  __ push(r0);
+
+  // Runtime routines expect at least one argument, so give it a Smi.
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ push(r0);
+  __ CallRuntime(Runtime::kStackGuard, 1);
+
+  // Since we returned, it wasn't a stack overflow.  Restore r0 and try again.
+  __ pop(r0);
+  __ b(&retry_preemption);
+
+  __ bind(&no_preemption);
+
+  // Eagerly check for stack-overflow before starting to push the arguments.
+  // r0: number of arguments.
+  // r2: stack limit.
+  Label okay;
+  __ sub(r2, sp, r2);
+
+  __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ b(hi, &okay);
+
+  // Out of stack space.
+  __ ldr(r1, MemOperand(fp, kFunctionOffset));
+  __ push(r1);
+  __ push(r0);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
+
+  // Push current limit and index.
+  __ bind(&okay);
+  __ push(r0);  // limit
+  __ mov(r1, Operand(0));  // initial index
+  __ push(r1);
+
+  // Change context eagerly to get the right global object if necessary.
+  __ ldr(r0, MemOperand(fp, kFunctionOffset));
+  __ ldr(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
+
+  // Compute the receiver.
+  Label call_to_object, use_global_receiver, push_receiver;
+  __ ldr(r0, MemOperand(fp, kRecvOffset));
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &call_to_object);
+  __ LoadRoot(r1, Heap::kNullValueRootIndex);
+  __ cmp(r0, r1);
+  __ b(eq, &use_global_receiver);
+  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, r1);
+  __ b(eq, &use_global_receiver);
+
+  // Check if the receiver is already a JavaScript object.
+  // r0: receiver
+  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, &call_to_object);
+  __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+  __ b(le, &push_receiver);
+
+  // Convert the receiver to a regular object.
+  // r0: receiver
+  __ bind(&call_to_object);
+  __ push(r0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+  __ b(&push_receiver);
+
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+
+  // Push the receiver.
+  // r0: receiver
+  __ bind(&push_receiver);
+  __ push(r0);
+
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ ldr(r0, MemOperand(fp, kIndexOffset));
+  __ b(&entry);
+
+  // Load the current argument from the arguments array and push it to the
+  // stack.
+  // r0: current argument index
+  __ bind(&loop);
+  __ ldr(r1, MemOperand(fp, kArgsOffset));
+  __ push(r1);
+  __ push(r0);
+
+  // Call the runtime to access the property in the arguments array.
+  __ CallRuntime(Runtime::kGetProperty, 2);
+  __ push(r0);
+
+  // Use inline caching to access the arguments.
+  __ ldr(r0, MemOperand(fp, kIndexOffset));
+  __ add(r0, r0, Operand(1 << kSmiTagSize));
+  __ str(r0, MemOperand(fp, kIndexOffset));
+
+  // Test if the copy loop has finished copying all the elements from the
+  // arguments object.
+  __ bind(&entry);
+  __ ldr(r1, MemOperand(fp, kLimitOffset));
+  __ cmp(r0, r1);
+  __ b(ne, &loop);
+
+  // Invoke the function.
+  ParameterCount actual(r0);
+  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+  __ ldr(r1, MemOperand(fp, kFunctionOffset));
+  __ InvokeFunction(r1, actual, CALL_FUNCTION);
+
+  // Tear down the internal frame and remove function, receiver and args.
+  __ LeaveInternalFrame();
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Jump(lr);
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+  __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
+  __ add(fp, sp, Operand(3 * kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ ldr(r1, MemOperand(fp, -3 * kPointerSize));
+  __ mov(sp, fp);
+  __ ldm(ia_w, sp, fp.bit() | lr.bit());
+  __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ add(sp, sp, Operand(kPointerSize));  // adjust for receiver
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : actual number of arguments
+  //  -- r1 : function (passed through to callee)
+  //  -- r2 : expected number of arguments
+  //  -- r3 : code entry to call
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments;
+
+  Label enough, too_few;
+  __ cmp(r0, Operand(r2));
+  __ b(lt, &too_few);
+  __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  __ b(eq, &dont_adapt_arguments);
+
+  {  // Enough parameters: actual >= expected
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Calculate copy start address into r0 and copy end address into r2.
+    // r0: actual number of arguments as a smi
+    // r1: function
+    // r2: expected number of arguments
+    // r3: code entry to call
+    __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+    // adjust for return address and receiver
+    __ add(r0, r0, Operand(2 * kPointerSize));
+    __ sub(r2, r0, Operand(r2, LSL, kPointerSizeLog2));
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r0: copy start address
+    // r1: function
+    // r2: copy end address
+    // r3: code entry to call
+
+    Label copy;
+    __ bind(&copy);
+    __ ldr(ip, MemOperand(r0, 0));
+    __ push(ip);
+    __ cmp(r0, r2);  // Compare before moving to next argument.
+    __ sub(r0, r0, Operand(kPointerSize));
+    __ b(ne, &copy);
+
+    __ b(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Calculate copy start address into r0 and copy end address is fp.
+    // r0: actual number of arguments as a smi
+    // r1: function
+    // r2: expected number of arguments
+    // r3: code entry to call
+    __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r0: copy start address
+    // r1: function
+    // r2: expected number of arguments
+    // r3: code entry to call
+    Label copy;
+    __ bind(&copy);
+    // Adjust load for return address and receiver.
+    __ ldr(ip, MemOperand(r0, 2 * kPointerSize));
+    __ push(ip);
+    __ cmp(r0, fp);  // Compare before moving to next argument.
+    __ sub(r0, r0, Operand(kPointerSize));
+    __ b(ne, &copy);
+
+    // Fill the remaining expected arguments with undefined.
+    // r1: function
+    // r2: expected number of arguments
+    // r3: code entry to call
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
+    __ sub(r2, r2, Operand(4 * kPointerSize));  // Adjust for frame.
+
+    Label fill;
+    __ bind(&fill);
+    __ push(ip);
+    __ cmp(sp, r2);
+    __ b(ne, &fill);
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ Call(r3);
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ Jump(lr);
+
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ Jump(r3);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
new file mode 100644
index 0000000..9ff02cb
--- /dev/null
+++ b/src/arm/codegen-arm-inl.h
@@ -0,0 +1,87 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_ARM_CODEGEN_ARM_INL_H_
+#define V8_ARM_CODEGEN_ARM_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+void CodeGenerator::LoadConditionAndSpill(Expression* expression,
+                                          TypeofState typeof_state,
+                                          JumpTarget* true_target,
+                                          JumpTarget* false_target,
+                                          bool force_control) {
+  LoadCondition(expression, typeof_state, true_target, false_target,
+                force_control);
+}
+
+
+void CodeGenerator::LoadAndSpill(Expression* expression,
+                                 TypeofState typeof_state) {
+  Load(expression, typeof_state);
+}
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+  Visit(statement);
+}
+
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+  VisitStatements(statements);
+}
+
+
+void Reference::GetValueAndSpill(TypeofState typeof_state) {
+  GetValue(typeof_state);
+}
+
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(SIN, args);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(COS, args);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_CODEGEN_ARM_INL_H_
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
new file mode 100644
index 0000000..cdd32f3
--- /dev/null
+++ b/src/arm/codegen-arm.cc
@@ -0,0 +1,6265 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "parser.h"
+#include "register-allocator-inl.h"
+#include "runtime.h"
+#include "scopes.h"
+
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+                                          Label* slow,
+                                          Condition cc);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+                                    Label* rhs_not_nan,
+                                    Label* slow,
+                                    bool strict);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
+static void MultiplyByKnownInt(MacroAssembler* masm,
+                               Register source,
+                               Register destination,
+                               int known_int);
+static bool IsEasyToMultiplyBy(int x);
+
+
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ push(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+      __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
+    }
+  }
+}
+
+
+void DeferredCode::RestoreRegisters() {
+  // Restore registers in reverse order due to the stack.
+  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ pop(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore) {
+      action &= ~kSyncedFlag;
+      __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
+    }
+  }
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenState implementation.
+
+CodeGenState::CodeGenState(CodeGenerator* owner)
+    : owner_(owner),
+      typeof_state_(NOT_INSIDE_TYPEOF),
+      true_target_(NULL),
+      false_target_(NULL),
+      previous_(NULL) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::CodeGenState(CodeGenerator* owner,
+                           TypeofState typeof_state,
+                           JumpTarget* true_target,
+                           JumpTarget* false_target)
+    : owner_(owner),
+      typeof_state_(typeof_state),
+      true_target_(true_target),
+      false_target_(false_target),
+      previous_(owner->state()) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::~CodeGenState() {
+  ASSERT(owner_->state() == this);
+  owner_->set_state(previous_);
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator implementation
+
+CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
+                             bool is_eval)
+    : is_eval_(is_eval),
+      script_(script),
+      deferred_(8),
+      masm_(new MacroAssembler(NULL, buffer_size)),
+      scope_(NULL),
+      frame_(NULL),
+      allocator_(NULL),
+      cc_reg_(al),
+      state_(NULL),
+      function_return_is_shadowed_(false) {
+}
+
+
+// Calling conventions:
+// fp: caller's frame pointer
+// sp: stack pointer
+// r1: called JS function
+// cp: callee's context
+
+void CodeGenerator::GenCode(FunctionLiteral* fun) {
+  ZoneList<Statement*>* body = fun->body();
+
+  // Initialize state.
+  ASSERT(scope_ == NULL);
+  scope_ = fun->scope();
+  ASSERT(allocator_ == NULL);
+  RegisterAllocator register_allocator(this);
+  allocator_ = &register_allocator;
+  ASSERT(frame_ == NULL);
+  frame_ = new VirtualFrame();
+  cc_reg_ = al;
+  {
+    CodeGenState state(this);
+
+    // Entry:
+    // Stack: receiver, arguments
+    // lr: return address
+    // fp: caller's frame pointer
+    // sp: stack pointer
+    // r1: called JS function
+    // cp: callee's context
+    allocator_->Initialize();
+    frame_->Enter();
+    // tos: code slot
+#ifdef DEBUG
+    if (strlen(FLAG_stop_at) > 0 &&
+        fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      frame_->SpillAll();
+      __ stop("stop-at");
+    }
+#endif
+
+    // Allocate space for locals and initialize them.  This also checks
+    // for stack overflow.
+    frame_->AllocateStackSlots();
+    // Initialize the function return target after the locals are set
+    // up, because it needs the expected frame height from the frame.
+    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+    function_return_is_shadowed_ = false;
+
+    VirtualFrame::SpilledScope spilled_scope;
+    if (scope_->num_heap_slots() > 0) {
+      // Allocate local context.
+      // Get outer context and create a new context based on it.
+      __ ldr(r0, frame_->Function());
+      frame_->EmitPush(r0);
+      frame_->CallRuntime(Runtime::kNewContext, 1);  // r0 holds the result
+
+#ifdef DEBUG
+      JumpTarget verified_true;
+      __ cmp(r0, Operand(cp));
+      verified_true.Branch(eq);
+      __ stop("NewContext: r0 is expected to be the same as cp");
+      verified_true.Bind();
+#endif
+      // Update context local.
+      __ str(cp, frame_->Context());
+    }
+
+    // TODO(1241774): Improve this code:
+    // 1) only needed if we have a context
+    // 2) no need to recompute context ptr every single time
+    // 3) don't copy parameter operand code from SlotOperand!
+    {
+      Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+      // Note that iteration order is relevant here! If we have the same
+      // parameter twice (e.g., function (x, y, x)), and that parameter
+      // needs to be copied into the context, it must be the last argument
+      // passed to the parameter that needs to be copied. This is a rare
+      // case so we don't check for it, instead we rely on the copying
+      // order: such a parameter is copied repeatedly into the same
+      // context location and thus the last value is what is seen inside
+      // the function.
+      for (int i = 0; i < scope_->num_parameters(); i++) {
+        Variable* par = scope_->parameter(i);
+        Slot* slot = par->slot();
+        if (slot != NULL && slot->type() == Slot::CONTEXT) {
+          ASSERT(!scope_->is_global_scope());  // no parameters in global scope
+          __ ldr(r1, frame_->ParameterAt(i));
+          // Loads r2 with context; used below in RecordWrite.
+          __ str(r1, SlotOperand(slot, r2));
+          // Load the offset into r3.
+          int slot_offset =
+              FixedArray::kHeaderSize + slot->index() * kPointerSize;
+          __ mov(r3, Operand(slot_offset));
+          __ RecordWrite(r2, r3, r1);
+        }
+      }
+    }
+
+    // Store the arguments object.  This must happen after context
+    // initialization because the arguments object may be stored in the
+    // context.
+    if (scope_->arguments() != NULL) {
+      ASSERT(scope_->arguments_shadow() != NULL);
+      Comment cmnt(masm_, "[ allocate arguments object");
+      { Reference shadow_ref(this, scope_->arguments_shadow());
+        { Reference arguments_ref(this, scope_->arguments());
+          ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+          __ ldr(r2, frame_->Function());
+          // The receiver is below the arguments, the return address,
+          // and the frame pointer on the stack.
+          const int kReceiverDisplacement = 2 + scope_->num_parameters();
+          __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
+          __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+          frame_->Adjust(3);
+          __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
+          frame_->CallStub(&stub, 3);
+          frame_->EmitPush(r0);
+          arguments_ref.SetValue(NOT_CONST_INIT);
+        }
+        shadow_ref.SetValue(NOT_CONST_INIT);
+      }
+      frame_->Drop();  // Value is no longer needed.
+    }
+
+    // Generate code to 'execute' declarations and initialize functions
+    // (source elements). In case of an illegal redeclaration we need to
+    // handle that instead of processing the declarations.
+    if (scope_->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ illegal redeclarations");
+      scope_->VisitIllegalRedeclaration(this);
+    } else {
+      Comment cmnt(masm_, "[ declarations");
+      ProcessDeclarations(scope_->declarations());
+      // Bail out if a stack-overflow exception occurred when processing
+      // declarations.
+      if (HasStackOverflow()) return;
+    }
+
+    if (FLAG_trace) {
+      frame_->CallRuntime(Runtime::kTraceEnter, 0);
+      // Ignore the return value.
+    }
+
+    // Compile the body of the function in a vanilla state. Don't
+    // bother compiling all the code if the scope has an illegal
+    // redeclaration.
+    if (!scope_->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+      bool is_builtin = Bootstrapper::IsActive();
+      bool should_trace =
+          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+      if (should_trace) {
+        frame_->CallRuntime(Runtime::kDebugTrace, 0);
+        // Ignore the return value.
+      }
+#endif
+      VisitStatementsAndSpill(body);
+    }
+  }
+
+  // Generate the return sequence if necessary.
+  if (has_valid_frame() || function_return_.is_linked()) {
+    if (!function_return_.is_linked()) {
+      CodeForReturnPosition(fun);
+    }
+    // exit
+    // r0: result
+    // sp: stack pointer
+    // fp: frame pointer
+    // cp: callee's context
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+
+    function_return_.Bind();
+    if (FLAG_trace) {
+      // Push the return value on the stack as the parameter.
+      // Runtime::TraceExit returns the parameter as it is.
+      frame_->EmitPush(r0);
+      frame_->CallRuntime(Runtime::kTraceExit, 1);
+    }
+
+    // Add a label for checking the size of the code used for returning.
+    Label check_exit_codesize;
+    masm_->bind(&check_exit_codesize);
+
+    // Tear down the frame which will restore the caller's frame pointer and
+    // the link register.
+    frame_->Exit();
+
+    // Here we use masm_-> instead of the __ macro to avoid the code coverage
+    // tool from instrumenting as we rely on the code size here.
+    masm_->add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
+    masm_->Jump(lr);
+
+    // Check that the size of the code used for returning matches what is
+    // expected by the debugger.
+    ASSERT_EQ(kJSReturnSequenceLength,
+              masm_->InstructionsGeneratedSince(&check_exit_codesize));
+  }
+
+  // Code generation state must be reset.
+  ASSERT(!has_cc());
+  ASSERT(state_ == NULL);
+  ASSERT(!function_return_is_shadowed_);
+  function_return_.Unuse();
+  DeleteFrame();
+
+  // Process any deferred code using the register allocator.
+  if (!HasStackOverflow()) {
+    ProcessDeferred();
+  }
+
+  allocator_ = NULL;
+  scope_ = NULL;
+}
+
+
+MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+  // Currently, this assertion will fail if we try to assign to
+  // a constant variable that is constant because it is read-only
+  // (such as the variable referring to a named function expression).
+  // We need to implement assignments to read-only variables.
+  // Ideally, we should do this during AST generation (by converting
+  // such assignments into expression statements); however, in general
+  // we may not be able to make the decision until past AST generation,
+  // that is when the entire program is known.
+  ASSERT(slot != NULL);
+  int index = slot->index();
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+      return frame_->ParameterAt(index);
+
+    case Slot::LOCAL:
+      return frame_->LocalAt(index);
+
+    case Slot::CONTEXT: {
+      // Follow the context chain if necessary.
+      ASSERT(!tmp.is(cp));  // do not overwrite context register
+      Register context = cp;
+      int chain_length = scope()->ContextChainLength(slot->var()->scope());
+      for (int i = 0; i < chain_length; i++) {
+        // Load the closure.
+        // (All contexts, even 'with' contexts, have a closure,
+        // and it is the same for all contexts inside a function.
+        // There is no need to go to the function context first.)
+        __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+        // Load the function context (which is the incoming, outer context).
+        __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
+        context = tmp;
+      }
+      // We may have a 'with' context now. Get the function context.
+      // (In fact this mov may never be the needed, since the scope analysis
+      // may not permit a direct context access in this case and thus we are
+      // always at a function context. However it is safe to dereference be-
+      // cause the function context of a function context is itself. Before
+      // deleting this mov we should try to create a counter-example first,
+      // though...)
+      __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+      return ContextOperand(tmp, index);
+    }
+
+    default:
+      UNREACHABLE();
+      return MemOperand(r0, 0);
+  }
+}
+
+
+MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
+    Slot* slot,
+    Register tmp,
+    Register tmp2,
+    JumpTarget* slow) {
+  ASSERT(slot->type() == Slot::CONTEXT);
+  Register context = cp;
+
+  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
+        __ tst(tmp2, tmp2);
+        slow->Branch(ne);
+      }
+      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
+      context = tmp;
+    }
+  }
+  // Check that last extension is NULL.
+  __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
+  __ tst(tmp2, tmp2);
+  slow->Branch(ne);
+  __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+  return ContextOperand(tmp, slot->index());
+}
+
+
+// Loads a value on TOS. If it is a boolean value, the result may have been
+// (partially) translated into branches, or it may have set the condition
+// code register. If force_cc is set, the value is forced to set the
+// condition code register and no value is pushed. If the condition code
+// register was set, has_cc() is true and cc_reg_ contains the condition to
+// test for 'true'.
+void CodeGenerator::LoadCondition(Expression* x,
+                                  TypeofState typeof_state,
+                                  JumpTarget* true_target,
+                                  JumpTarget* false_target,
+                                  bool force_cc) {
+  ASSERT(!has_cc());
+  int original_height = frame_->height();
+
+  { CodeGenState new_state(this, typeof_state, true_target, false_target);
+    Visit(x);
+
+    // If we hit a stack overflow, we may not have actually visited
+    // the expression.  In that case, we ensure that we have a
+    // valid-looking frame state because we will continue to generate
+    // code as we unwind the C++ stack.
+    //
+    // It's possible to have both a stack overflow and a valid frame
+    // state (eg, a subexpression overflowed, visiting it returned
+    // with a dummied frame state, and visiting this expression
+    // returned with a normal-looking state).
+    if (HasStackOverflow() &&
+        has_valid_frame() &&
+        !has_cc() &&
+        frame_->height() == original_height) {
+      true_target->Jump();
+    }
+  }
+  if (force_cc && frame_ != NULL && !has_cc()) {
+    // Convert the TOS value to a boolean in the condition code register.
+    ToBoolean(true_target, false_target);
+  }
+  ASSERT(!force_cc || !has_valid_frame() || has_cc());
+  ASSERT(!has_valid_frame() ||
+         (has_cc() && frame_->height() == original_height) ||
+         (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  JumpTarget true_target;
+  JumpTarget false_target;
+  LoadCondition(x, typeof_state, &true_target, &false_target, false);
+
+  if (has_cc()) {
+    // Convert cc_reg_ into a boolean value.
+    JumpTarget loaded;
+    JumpTarget materialize_true;
+    materialize_true.Branch(cc_reg_);
+    __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+    frame_->EmitPush(r0);
+    loaded.Jump();
+    materialize_true.Bind();
+    __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+    frame_->EmitPush(r0);
+    loaded.Bind();
+    cc_reg_ = al;
+  }
+
+  if (true_target.is_linked() || false_target.is_linked()) {
+    // We have at least one condition value that has been "translated"
+    // into a branch, thus it needs to be loaded explicitly.
+    JumpTarget loaded;
+    if (frame_ != NULL) {
+      loaded.Jump();  // Don't lose the current TOS.
+    }
+    bool both = true_target.is_linked() && false_target.is_linked();
+    // Load "true" if necessary.
+    if (true_target.is_linked()) {
+      true_target.Bind();
+      __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+      frame_->EmitPush(r0);
+    }
+    // If both "true" and "false" need to be loaded jump across the code for
+    // "false".
+    if (both) {
+      loaded.Jump();
+    }
+    // Load "false" if necessary.
+    if (false_target.is_linked()) {
+      false_target.Bind();
+      __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+      frame_->EmitPush(r0);
+    }
+    // A value is loaded on all paths reaching this point.
+    loaded.Bind();
+  }
+  ASSERT(has_valid_frame());
+  ASSERT(!has_cc());
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadGlobal() {
+  VirtualFrame::SpilledScope spilled_scope;
+  __ ldr(r0, GlobalObject());
+  frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::LoadGlobalReceiver(Register scratch) {
+  VirtualFrame::SpilledScope spilled_scope;
+  __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ ldr(scratch,
+         FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
+  frame_->EmitPush(scratch);
+}
+
+
+// TODO(1241834): Get rid of this function in favor of just using Load, now
+// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
+// variables w/o reference errors elsewhere.
+void CodeGenerator::LoadTypeofExpression(Expression* x) {
+  VirtualFrame::SpilledScope spilled_scope;
+  Variable* variable = x->AsVariableProxy()->AsVariable();
+  if (variable != NULL && !variable->is_this() && variable->is_global()) {
+    // NOTE: This is somewhat nasty. We force the compiler to load
+    // the variable as if through '<global>.<variable>' to make sure we
+    // do not get reference errors.
+    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+    Literal key(variable->name());
+    // TODO(1241834): Fetch the position from the variable instead of using
+    // no position.
+    Property property(&global, &key, RelocInfo::kNoPosition);
+    LoadAndSpill(&property);
+  } else {
+    LoadAndSpill(x, INSIDE_TYPEOF);
+  }
+}
+
+
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
+    : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+  cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+  cgen_->UnloadReference(this);
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ LoadReference");
+  Expression* e = ref->expression();
+  Property* property = e->AsProperty();
+  Variable* var = e->AsVariableProxy()->AsVariable();
+
+  if (property != NULL) {
+    // The expression is either a property or a variable proxy that rewrites
+    // to a property.
+    LoadAndSpill(property->obj());
+    // We use a named reference if the key is a literal symbol, unless it is
+    // a string that can be legally parsed as an integer.  This is because
+    // otherwise we will not get into the slow case code that handles [] on
+    // String objects.
+    Literal* literal = property->key()->AsLiteral();
+    uint32_t dummy;
+    if (literal != NULL &&
+        literal->handle()->IsSymbol() &&
+        !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+      ref->set_type(Reference::NAMED);
+    } else {
+      LoadAndSpill(property->key());
+      ref->set_type(Reference::KEYED);
+    }
+  } else if (var != NULL) {
+    // The expression is a variable proxy that does not rewrite to a
+    // property.  Global variables are treated as named property references.
+    if (var->is_global()) {
+      LoadGlobal();
+      ref->set_type(Reference::NAMED);
+    } else {
+      ASSERT(var->slot() != NULL);
+      ref->set_type(Reference::SLOT);
+    }
+  } else {
+    // Anything else is a runtime error.
+    LoadAndSpill(e);
+    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+  }
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+  VirtualFrame::SpilledScope spilled_scope;
+  // Pop a reference from the stack while preserving TOS.
+  Comment cmnt(masm_, "[ UnloadReference");
+  int size = ref->size();
+  if (size > 0) {
+    frame_->EmitPop(r0);
+    frame_->Drop(size);
+    frame_->EmitPush(r0);
+  }
+}
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
+// register to a boolean in the condition code register. The code
+// may jump to 'false_target' in case the register converts to 'false'.
+void CodeGenerator::ToBoolean(JumpTarget* true_target,
+                              JumpTarget* false_target) {
+  VirtualFrame::SpilledScope spilled_scope;
+  // Note: The generated code snippet does not change stack variables.
+  //       Only the condition code should be set.
+  frame_->EmitPop(r0);
+
+  // Fast case checks
+
+  // Check if the value is 'false'.
+  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+  __ cmp(r0, ip);
+  false_target->Branch(eq);
+
+  // Check if the value is 'true'.
+  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+  __ cmp(r0, ip);
+  true_target->Branch(eq);
+
+  // Check if the value is 'undefined'.
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
+  false_target->Branch(eq);
+
+  // Check if the value is a smi.
+  __ cmp(r0, Operand(Smi::FromInt(0)));
+  false_target->Branch(eq);
+  __ tst(r0, Operand(kSmiTagMask));
+  true_target->Branch(eq);
+
+  // Slow case: call the runtime.
+  frame_->EmitPush(r0);
+  frame_->CallRuntime(Runtime::kToBool, 1);
+  // Convert the result (r0) to a condition code.
+  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+  __ cmp(r0, ip);
+
+  cc_reg_ = ne;
+}
+
+
+void CodeGenerator::GenericBinaryOperation(Token::Value op,
+                                           OverwriteMode overwrite_mode,
+                                           int constant_rhs) {
+  VirtualFrame::SpilledScope spilled_scope;
+  // sp[0] : y
+  // sp[1] : x
+  // result : r0
+
+  // Stub is entered with a call: 'return address' is in lr.
+  switch (op) {
+    case Token::ADD:  // fall through.
+    case Token::SUB:  // fall through.
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR: {
+      frame_->EmitPop(r0);  // r0 : y
+      frame_->EmitPop(r1);  // r1 : x
+      GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs);
+      frame_->CallStub(&stub, 0);
+      break;
+    }
+
+    case Token::COMMA:
+      frame_->EmitPop(r0);
+      // simply discard left value
+      frame_->Drop();
+      break;
+
+    default:
+      // Other cases should have been handled before this point.
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+  DeferredInlineSmiOperation(Token::Value op,
+                             int value,
+                             bool reversed,
+                             OverwriteMode overwrite_mode)
+      : op_(op),
+        value_(value),
+        reversed_(reversed),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlinedSmiOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Token::Value op_;
+  int value_;
+  bool reversed_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiOperation::Generate() {
+  switch (op_) {
+    case Token::ADD: {
+      // Revert optimistic add.
+      if (reversed_) {
+        __ sub(r0, r0, Operand(Smi::FromInt(value_)));
+        __ mov(r1, Operand(Smi::FromInt(value_)));
+      } else {
+        __ sub(r1, r0, Operand(Smi::FromInt(value_)));
+        __ mov(r0, Operand(Smi::FromInt(value_)));
+      }
+      break;
+    }
+
+    case Token::SUB: {
+      // Revert optimistic sub.
+      if (reversed_) {
+        __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
+        __ mov(r1, Operand(Smi::FromInt(value_)));
+      } else {
+        __ add(r1, r0, Operand(Smi::FromInt(value_)));
+        __ mov(r0, Operand(Smi::FromInt(value_)));
+      }
+      break;
+    }
+
+    // For these operations there is no optimistic operation that needs to be
+    // reverted.
+    case Token::MUL:
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND: {
+      if (reversed_) {
+        __ mov(r1, Operand(Smi::FromInt(value_)));
+      } else {
+        __ mov(r1, Operand(r0));
+        __ mov(r0, Operand(Smi::FromInt(value_)));
+      }
+      break;
+    }
+
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR: {
+      if (!reversed_) {
+        __ mov(r1, Operand(r0));
+        __ mov(r0, Operand(Smi::FromInt(value_)));
+      } else {
+        UNREACHABLE();  // Should have been handled in SmiOperation.
+      }
+      break;
+    }
+
+    default:
+      // Other cases should have been handled before this point.
+      UNREACHABLE();
+      break;
+  }
+
+  GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
+  __ CallStub(&stub);
+}
+
+
+static bool PopCountLessThanEqual2(unsigned int x) {
+  x &= x - 1;
+  return (x & (x - 1)) == 0;
+}
+
+
+// Returns the index of the lowest bit set.
+static int BitPosition(unsigned x) {
+  int bit_posn = 0;
+  while ((x & 0xf) == 0) {
+    bit_posn += 4;
+    x >>= 4;
+  }
+  while ((x & 1) == 0) {
+    bit_posn++;
+    x >>= 1;
+  }
+  return bit_posn;
+}
+
+
+void CodeGenerator::SmiOperation(Token::Value op,
+                                 Handle<Object> value,
+                                 bool reversed,
+                                 OverwriteMode mode) {
+  VirtualFrame::SpilledScope spilled_scope;
+  // NOTE: This is an attempt to inline (a bit) more of the code for
+  // some possible smi operations (like + and -) when (at least) one
+  // of the operands is a literal smi. With this optimization, the
+  // performance of the system is increased by ~15%, and the generated
+  // code size is increased by ~1% (measured on a combination of
+  // different benchmarks).
+
+  // sp[0] : operand
+
+  int int_value = Smi::cast(*value)->value();
+
+  JumpTarget exit;
+  frame_->EmitPop(r0);
+
+  bool something_to_inline = true;
+  switch (op) {
+    case Token::ADD: {
+      DeferredCode* deferred =
+          new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+
+      __ add(r0, r0, Operand(value), SetCC);
+      deferred->Branch(vs);
+      __ tst(r0, Operand(kSmiTagMask));
+      deferred->Branch(ne);
+      deferred->BindExit();
+      break;
+    }
+
+    case Token::SUB: {
+      DeferredCode* deferred =
+          new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+
+      if (reversed) {
+        __ rsb(r0, r0, Operand(value), SetCC);
+      } else {
+        __ sub(r0, r0, Operand(value), SetCC);
+      }
+      deferred->Branch(vs);
+      __ tst(r0, Operand(kSmiTagMask));
+      deferred->Branch(ne);
+      deferred->BindExit();
+      break;
+    }
+
+
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND: {
+      DeferredCode* deferred =
+        new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+      __ tst(r0, Operand(kSmiTagMask));
+      deferred->Branch(ne);
+      switch (op) {
+        case Token::BIT_OR:  __ orr(r0, r0, Operand(value)); break;
+        case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
+        case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
+        default: UNREACHABLE();
+      }
+      deferred->BindExit();
+      break;
+    }
+
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR: {
+      if (reversed) {
+        something_to_inline = false;
+        break;
+      }
+      int shift_value = int_value & 0x1f;  // least significant 5 bits
+      DeferredCode* deferred =
+        new DeferredInlineSmiOperation(op, shift_value, false, mode);
+      __ tst(r0, Operand(kSmiTagMask));
+      deferred->Branch(ne);
+      __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // remove tags
+      switch (op) {
+        case Token::SHL: {
+          if (shift_value != 0) {
+            __ mov(r2, Operand(r2, LSL, shift_value));
+          }
+          // check that the *unsigned* result fits in a smi
+          __ add(r3, r2, Operand(0x40000000), SetCC);
+          deferred->Branch(mi);
+          break;
+        }
+        case Token::SHR: {
+          // LSR by immediate 0 means shifting 32 bits.
+          if (shift_value != 0) {
+            __ mov(r2, Operand(r2, LSR, shift_value));
+          }
+          // check that the *unsigned* result fits in a smi
+          // neither of the two high-order bits can be set:
+          // - 0x80000000: high bit would be lost when smi tagging
+          // - 0x40000000: this number would convert to negative when
+          // smi tagging these two cases can only happen with shifts
+          // by 0 or 1 when handed a valid smi
+          __ and_(r3, r2, Operand(0xc0000000), SetCC);
+          deferred->Branch(ne);
+          break;
+        }
+        case Token::SAR: {
+          if (shift_value != 0) {
+            // ASR by immediate 0 means shifting 32 bits.
+            __ mov(r2, Operand(r2, ASR, shift_value));
+          }
+          break;
+        }
+        default: UNREACHABLE();
+      }
+      __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+      deferred->BindExit();
+      break;
+    }
+
+    case Token::MOD: {
+      if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
+        something_to_inline = false;
+        break;
+      }
+      DeferredCode* deferred =
+        new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+      unsigned mask = (0x80000000u | kSmiTagMask);
+      __ tst(r0, Operand(mask));
+      deferred->Branch(ne);  // Go to deferred code on non-Smis and negative.
+      mask = (int_value << kSmiTagSize) - 1;
+      __ and_(r0, r0, Operand(mask));
+      deferred->BindExit();
+      break;
+    }
+
+    case Token::MUL: {
+      if (!IsEasyToMultiplyBy(int_value)) {
+        something_to_inline = false;
+        break;
+      }
+      DeferredCode* deferred =
+        new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+      unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
+      max_smi_that_wont_overflow <<= kSmiTagSize;
+      unsigned mask = 0x80000000u;
+      while ((mask & max_smi_that_wont_overflow) == 0) {
+        mask |= mask >> 1;
+      }
+      mask |= kSmiTagMask;
+      // This does a single mask that checks for a too high value in a
+      // conservative way and for a non-Smi.  It also filters out negative
+      // numbers, unfortunately, but since this code is inline we prefer
+      // brevity to comprehensiveness.
+      __ tst(r0, Operand(mask));
+      deferred->Branch(ne);
+      MultiplyByKnownInt(masm_, r0, r0, int_value);
+      deferred->BindExit();
+      break;
+    }
+
+    default:
+      something_to_inline = false;
+      break;
+  }
+
+  if (!something_to_inline) {
+    if (!reversed) {
+      frame_->EmitPush(r0);
+      __ mov(r0, Operand(value));
+      frame_->EmitPush(r0);
+      GenericBinaryOperation(op, mode, int_value);
+    } else {
+      __ mov(ip, Operand(value));
+      frame_->EmitPush(ip);
+      frame_->EmitPush(r0);
+      GenericBinaryOperation(op, mode, kUnknownIntValue);
+    }
+  }
+
+  exit.Bind();
+}
+
+
+void CodeGenerator::Comparison(Condition cc,
+                               Expression* left,
+                               Expression* right,
+                               bool strict) {
+  if (left != NULL) LoadAndSpill(left);
+  if (right != NULL) LoadAndSpill(right);
+
+  VirtualFrame::SpilledScope spilled_scope;
+  // sp[0] : y
+  // sp[1] : x
+  // result : cc register
+
+  // Strict only makes sense for equality comparisons.
+  ASSERT(!strict || cc == eq);
+
+  JumpTarget exit;
+  JumpTarget smi;
+  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
+  if (cc == gt || cc == le) {
+    cc = ReverseCondition(cc);
+    frame_->EmitPop(r1);
+    frame_->EmitPop(r0);
+  } else {
+    frame_->EmitPop(r0);
+    frame_->EmitPop(r1);
+  }
+  __ orr(r2, r0, Operand(r1));
+  __ tst(r2, Operand(kSmiTagMask));
+  smi.Branch(eq);
+
+  // Perform non-smi comparison by stub.
+  // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
+  // We call with 0 args because there are 0 on the stack.
+  CompareStub stub(cc, strict);
+  frame_->CallStub(&stub, 0);
+  __ cmp(r0, Operand(0));
+  exit.Jump();
+
+  // Do smi comparisons by pointer comparison.
+  smi.Bind();
+  __ cmp(r1, Operand(r0));
+
+  exit.Bind();
+  cc_reg_ = cc;
+}
+
+
+class CallFunctionStub: public CodeStub {
+ public:
+  CallFunctionStub(int argc, InLoopFlag in_loop)
+      : argc_(argc), in_loop_(in_loop) {}
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  int argc_;
+  InLoopFlag in_loop_;
+
+#if defined(DEBUG)
+  void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
+#endif  // defined(DEBUG)
+
+  Major MajorKey() { return CallFunction; }
+  int MinorKey() { return argc_; }
+  InLoopFlag InLoop() { return in_loop_; }
+};
+
+
+// Call the function on the stack with the given arguments.
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+                                         int position) {
+  VirtualFrame::SpilledScope spilled_scope;
+  // Push the arguments ("left-to-right") on the stack.
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    LoadAndSpill(args->at(i));
+  }
+
+  // Record the position for debugging purposes.
+  CodeForSourcePosition(position);
+
+  // Use the shared code stub to call the function.
+  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub call_function(arg_count, in_loop);
+  frame_->CallStub(&call_function, arg_count + 1);
+
+  // Restore context and pop function from the stack.
+  __ ldr(cp, frame_->Context());
+  frame_->Drop();  // discard the TOS
+}
+
+
+void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(has_cc());
+  Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
+  target->Branch(cc);
+  cc_reg_ = al;
+}
+
+
+void CodeGenerator::CheckStack() {
+  VirtualFrame::SpilledScope spilled_scope;
+  if (FLAG_check_stack) {
+    Comment cmnt(masm_, "[ check stack");
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    // Put the lr setup instruction in the delay slot.  kInstrSize is added to
+    // the implicit 8 byte offset that always applies to operations with pc and
+    // gives a return address 12 bytes down.
+    masm_->add(lr, pc, Operand(Assembler::kInstrSize));
+    masm_->cmp(sp, Operand(ip));
+    StackCheckStub stub;
+    // Call the stub if lower.
+    masm_->mov(pc,
+               Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+                       RelocInfo::CODE_TARGET),
+               LeaveCC,
+               lo);
+  }
+}
+
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
+    VisitAndSpill(statements->at(i));
+  }
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitBlock(Block* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Block");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  VisitStatementsAndSpill(node->statements());
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  VirtualFrame::SpilledScope spilled_scope;
+  __ mov(r0, Operand(pairs));
+  frame_->EmitPush(r0);
+  frame_->EmitPush(cp);
+  __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+  frame_->EmitPush(r0);
+  frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+  // The result is discarded.
+}
+
+
+void CodeGenerator::VisitDeclaration(Declaration* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Declaration");
+  Variable* var = node->proxy()->var();
+  ASSERT(var != NULL);  // must have been resolved
+  Slot* slot = var->slot();
+
+  // If it was not possible to allocate the variable at compile time,
+  // we need to "declare" it at runtime to make sure it actually
+  // exists in the local context.
+  if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    // Variables with a "LOOKUP" slot were introduced as non-locals
+    // during variable resolution and must have mode DYNAMIC.
+    ASSERT(var->is_dynamic());
+    // For now, just do a runtime call.
+    frame_->EmitPush(cp);
+    __ mov(r0, Operand(var->name()));
+    frame_->EmitPush(r0);
+    // Declaration nodes are always declared in only two modes.
+    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
+    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
+    __ mov(r0, Operand(Smi::FromInt(attr)));
+    frame_->EmitPush(r0);
+    // Push initial value, if any.
+    // Note: For variables we must not push an initial value (such as
+    // 'undefined') because we may have a (legal) redeclaration and we
+    // must not destroy the current value.
+    if (node->mode() == Variable::CONST) {
+      __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+      frame_->EmitPush(r0);
+    } else if (node->fun() != NULL) {
+      LoadAndSpill(node->fun());
+    } else {
+      __ mov(r0, Operand(0));  // no initial value!
+      frame_->EmitPush(r0);
+    }
+    frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
+    // Ignore the return value (declarations are statements).
+    ASSERT(frame_->height() == original_height);
+    return;
+  }
+
+  ASSERT(!var->is_global());
+
+  // If we have a function or a constant, we need to initialize the variable.
+  Expression* val = NULL;
+  if (node->mode() == Variable::CONST) {
+    val = new Literal(Factory::the_hole_value());
+  } else {
+    val = node->fun();  // NULL if we don't have a function
+  }
+
+  if (val != NULL) {
+    {
+      // Set initial value.
+      Reference target(this, node->proxy());
+      LoadAndSpill(val);
+      target.SetValue(NOT_CONST_INIT);
+      // The reference is removed from the stack (preserving TOS) when
+      // it goes out of scope.
+    }
+    // Get rid of the assigned value (declarations are statements).
+    frame_->Drop();
+  }
+  ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  CodeForStatementPosition(node);
+  Expression* expression = node->expression();
+  expression->MarkAsStatement();
+  LoadAndSpill(expression);
+  frame_->Drop();
+  ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "// EmptyStatement");
+  CodeForStatementPosition(node);
+  // nothing to do
+  ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ IfStatement");
+  // Generate different code depending on which parts of the if statement
+  // are present or not.
+  bool has_then_stm = node->HasThenStatement();
+  bool has_else_stm = node->HasElseStatement();
+
+  CodeForStatementPosition(node);
+
+  JumpTarget exit;
+  if (has_then_stm && has_else_stm) {
+    Comment cmnt(masm_, "[ IfThenElse");
+    JumpTarget then;
+    JumpTarget else_;
+    // if (cond)
+    LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+                          &then, &else_, true);
+    if (frame_ != NULL) {
+      Branch(false, &else_);
+    }
+    // then
+    if (frame_ != NULL || then.is_linked()) {
+      then.Bind();
+      VisitAndSpill(node->then_statement());
+    }
+    if (frame_ != NULL) {
+      exit.Jump();
+    }
+    // else
+    if (else_.is_linked()) {
+      else_.Bind();
+      VisitAndSpill(node->else_statement());
+    }
+
+  } else if (has_then_stm) {
+    Comment cmnt(masm_, "[ IfThen");
+    ASSERT(!has_else_stm);
+    JumpTarget then;
+    // if (cond)
+    LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+                          &then, &exit, true);
+    if (frame_ != NULL) {
+      Branch(false, &exit);
+    }
+    // then
+    if (frame_ != NULL || then.is_linked()) {
+      then.Bind();
+      VisitAndSpill(node->then_statement());
+    }
+
+  } else if (has_else_stm) {
+    Comment cmnt(masm_, "[ IfElse");
+    ASSERT(!has_then_stm);
+    JumpTarget else_;
+    // if (!cond)
+    LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+                          &exit, &else_, true);
+    if (frame_ != NULL) {
+      Branch(true, &exit);
+    }
+    // else
+    if (frame_ != NULL || else_.is_linked()) {
+      else_.Bind();
+      VisitAndSpill(node->else_statement());
+    }
+
+  } else {
+    Comment cmnt(masm_, "[ If");
+    ASSERT(!has_then_stm && !has_else_stm);
+    // if (cond)
+    LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+                          &exit, &exit, false);
+    if (frame_ != NULL) {
+      if (has_cc()) {
+        cc_reg_ = al;
+      } else {
+        frame_->Drop();
+      }
+    }
+  }
+
+  // end
+  if (exit.is_linked()) {
+    exit.Bind();
+  }
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ContinueStatement");
+  CodeForStatementPosition(node);
+  node->target()->continue_target()->Jump();
+}
+
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ BreakStatement");
+  CodeForStatementPosition(node);
+  node->target()->break_target()->Jump();
+}
+
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ReturnStatement");
+
+  CodeForStatementPosition(node);
+  LoadAndSpill(node->expression());
+  if (function_return_is_shadowed_) {
+    frame_->EmitPop(r0);
+    function_return_.Jump();
+  } else {
+    // Pop the result from the frame and prepare the frame for
+    // returning thus making it easier to merge.
+    frame_->EmitPop(r0);
+    frame_->PrepareForReturn();
+
+    function_return_.Jump();
+  }
+}
+
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ WithEnterStatement");
+  CodeForStatementPosition(node);
+  LoadAndSpill(node->expression());
+  if (node->is_catch_block()) {
+    frame_->CallRuntime(Runtime::kPushCatchContext, 1);
+  } else {
+    frame_->CallRuntime(Runtime::kPushContext, 1);
+  }
+#ifdef DEBUG
+  JumpTarget verified_true;
+  __ cmp(r0, Operand(cp));
+  verified_true.Branch(eq);
+  __ stop("PushContext: r0 is expected to be the same as cp");
+  verified_true.Bind();
+#endif
+  // Update context local.
+  __ str(cp, frame_->Context());
+  ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ WithExitStatement");
+  CodeForStatementPosition(node);
+  // Pop context.
+  __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
+  // Update context local.
+  __ str(cp, frame_->Context());
+  ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ SwitchStatement");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  LoadAndSpill(node->tag());
+
+  JumpTarget next_test;
+  JumpTarget fall_through;
+  JumpTarget default_entry;
+  JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
+  ZoneList<CaseClause*>* cases = node->cases();
+  int length = cases->length();
+  CaseClause* default_clause = NULL;
+
+  for (int i = 0; i < length; i++) {
+    CaseClause* clause = cases->at(i);
+    if (clause->is_default()) {
+      // Remember the default clause and compile it at the end.
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case clause");
+    // Compile the test.
+    next_test.Bind();
+    next_test.Unuse();
+    // Duplicate TOS.
+    __ ldr(r0, frame_->Top());
+    frame_->EmitPush(r0);
+    Comparison(eq, NULL, clause->label(), true);
+    Branch(false, &next_test);
+
+    // Before entering the body from the test, remove the switch value from
+    // the stack.
+    frame_->Drop();
+
+    // Label the body so that fall through is enabled.
+    if (i > 0 && cases->at(i - 1)->is_default()) {
+      default_exit.Bind();
+    } else {
+      fall_through.Bind();
+      fall_through.Unuse();
+    }
+    VisitStatementsAndSpill(clause->statements());
+
+    // If control flow can fall through from the body, jump to the next body
+    // or the end of the statement.
+    if (frame_ != NULL) {
+      if (i < length - 1 && cases->at(i + 1)->is_default()) {
+        default_entry.Jump();
+      } else {
+        fall_through.Jump();
+      }
+    }
+  }
+
+  // The final "test" removes the switch value.
+  next_test.Bind();
+  frame_->Drop();
+
+  // If there is a default clause, compile it.
+  if (default_clause != NULL) {
+    Comment cmnt(masm_, "[ Default clause");
+    default_entry.Bind();
+    VisitStatementsAndSpill(default_clause->statements());
+    // If control flow can fall out of the default and there is a case after
+    // it, jup to that case's body.
+    if (frame_ != NULL && default_exit.is_bound()) {
+      default_exit.Jump();
+    }
+  }
+
+  if (fall_through.is_linked()) {
+    fall_through.Bind();
+  }
+
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ LoopStatement");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
+  // known result for the test expression, with no side effects.
+  enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
+  if (node->cond() == NULL) {
+    ASSERT(node->type() == LoopStatement::FOR_LOOP);
+    info = ALWAYS_TRUE;
+  } else {
+    Literal* lit = node->cond()->AsLiteral();
+    if (lit != NULL) {
+      if (lit->IsTrue()) {
+        info = ALWAYS_TRUE;
+      } else if (lit->IsFalse()) {
+        info = ALWAYS_FALSE;
+      }
+    }
+  }
+
+  switch (node->type()) {
+    case LoopStatement::DO_LOOP: {
+      JumpTarget body(JumpTarget::BIDIRECTIONAL);
+
+      // Label the top of the loop for the backward CFG edge.  If the test
+      // is always true we can use the continue target, and if the test is
+      // always false there is no need.
+      if (info == ALWAYS_TRUE) {
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else if (info == ALWAYS_FALSE) {
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else {
+        ASSERT(info == DONT_KNOW);
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        body.Bind();
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      VisitAndSpill(node->body());
+
+      // Compile the test.
+      if (info == ALWAYS_TRUE) {
+        if (has_valid_frame()) {
+          // If control can fall off the end of the body, jump back to the
+          // top.
+          node->continue_target()->Jump();
+        }
+      } else if (info == ALWAYS_FALSE) {
+        // If we have a continue in the body, we only have to bind its jump
+        // target.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);
+        // We have to compile the test expression if it can be reached by
+        // control flow falling out of the body or via continue.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+        if (has_valid_frame()) {
+          LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+                                &body, node->break_target(), true);
+          if (has_valid_frame()) {
+            // A invalid frame here indicates that control did not
+            // fall out of the test expression.
+            Branch(true, &body);
+          }
+        }
+      }
+      break;
+    }
+
+    case LoopStatement::WHILE_LOOP: {
+      // If the test is never true and has no side effects there is no need
+      // to compile the test or body.
+      if (info == ALWAYS_FALSE) break;
+
+      // Label the top of the loop with the continue target for the backward
+      // CFG edge.
+      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->Bind();
+
+      if (info == DONT_KNOW) {
+        JumpTarget body;
+        LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+                              &body, node->break_target(), true);
+        if (has_valid_frame()) {
+          // A NULL frame indicates that control did not fall out of the
+          // test expression.
+          Branch(false, node->break_target());
+        }
+        if (has_valid_frame() || body.is_linked()) {
+          body.Bind();
+        }
+      }
+
+      if (has_valid_frame()) {
+        CheckStack();  // TODO(1222600): ignore if body contains calls.
+        VisitAndSpill(node->body());
+
+        // If control flow can fall out of the body, jump back to the top.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
+        }
+      }
+      break;
+    }
+
+    case LoopStatement::FOR_LOOP: {
+      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+      if (node->init() != NULL) {
+        VisitAndSpill(node->init());
+      }
+
+      // There is no need to compile the test or body.
+      if (info == ALWAYS_FALSE) break;
+
+      // If there is no update statement, label the top of the loop with the
+      // continue target, otherwise with the loop target.
+      if (node->next() == NULL) {
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        loop.Bind();
+      }
+
+      // If the test is always true, there is no need to compile it.
+      if (info == DONT_KNOW) {
+        JumpTarget body;
+        LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+                              &body, node->break_target(), true);
+        if (has_valid_frame()) {
+          Branch(false, node->break_target());
+        }
+        if (has_valid_frame() || body.is_linked()) {
+          body.Bind();
+        }
+      }
+
+      if (has_valid_frame()) {
+        CheckStack();  // TODO(1222600): ignore if body contains calls.
+        VisitAndSpill(node->body());
+
+        if (node->next() == NULL) {
+          // If there is no update statement and control flow can fall out
+          // of the loop, jump directly to the continue label.
+          if (has_valid_frame()) {
+            node->continue_target()->Jump();
+          }
+        } else {
+          // If there is an update statement and control flow can reach it
+          // via falling out of the body of the loop or continuing, we
+          // compile the update statement.
+          if (node->continue_target()->is_linked()) {
+            node->continue_target()->Bind();
+          }
+          if (has_valid_frame()) {
+            // Record source position of the statement as this code which is
+            // after the code for the body actually belongs to the loop
+            // statement and not the body.
+            CodeForStatementPosition(node);
+            VisitAndSpill(node->next());
+            loop.Jump();
+          }
+        }
+      }
+      break;
+    }
+  }
+
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ForInStatement");
+  CodeForStatementPosition(node);
+
+  JumpTarget primitive;
+  JumpTarget jsobject;
+  JumpTarget fixed_array;
+  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+  JumpTarget end_del_check;
+  JumpTarget exit;
+
+  // Get the object to enumerate over (converted to JSObject).
+  LoadAndSpill(node->enumerable());
+
+  // Both SpiderMonkey and kjs ignore null and undefined in contrast
+  // to the specification.  12.6.4 mandates a call to ToObject.
+  frame_->EmitPop(r0);
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
+  exit.Branch(eq);
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r0, ip);
+  exit.Branch(eq);
+
+  // Stack layout in body:
+  // [iteration counter (Smi)]
+  // [length of array]
+  // [FixedArray]
+  // [Map or 0]
+  // [Object]
+
+  // Check if enumerable is already a JSObject
+  __ tst(r0, Operand(kSmiTagMask));
+  primitive.Branch(eq);
+  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+  jsobject.Branch(hs);
+
+  primitive.Bind();
+  frame_->EmitPush(r0);
+  Result arg_count(r0);
+  __ mov(r0, Operand(0));
+  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
+
+  jsobject.Bind();
+  // Get the set of properties (as a FixedArray or Map).
+  frame_->EmitPush(r0);  // duplicate the object being enumerated
+  frame_->EmitPush(r0);
+  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a Map, we can do a fast modification check.
+  // Otherwise, we got a FixedArray, and we have to do a slow check.
+  __ mov(r2, Operand(r0));
+  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+  __ cmp(r1, ip);
+  fixed_array.Branch(ne);
+
+  // Get enum cache
+  __ mov(r1, Operand(r0));
+  __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
+  __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
+  __ ldr(r2,
+         FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  frame_->EmitPush(r0);  // map
+  frame_->EmitPush(r2);  // enum cache bridge cache
+  __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
+  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+  frame_->EmitPush(r0);
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  frame_->EmitPush(r0);
+  entry.Jump();
+
+  fixed_array.Bind();
+  __ mov(r1, Operand(Smi::FromInt(0)));
+  frame_->EmitPush(r1);  // insert 0 in place of Map
+  frame_->EmitPush(r0);
+
+  // Push the length of the array and the initial index onto the stack.
+  __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
+  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+  frame_->EmitPush(r0);
+  __ mov(r0, Operand(Smi::FromInt(0)));  // init index
+  frame_->EmitPush(r0);
+
+  // Condition.
+  entry.Bind();
+  // sp[0] : index
+  // sp[1] : array/enum cache length
+  // sp[2] : array or enum cache
+  // sp[3] : 0 or map
+  // sp[4] : enumerable
+  // Grab the current frame's height for the break and continue
+  // targets only after all the state is pushed on the frame.
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  __ ldr(r0, frame_->ElementAt(0));  // load the current count
+  __ ldr(r1, frame_->ElementAt(1));  // load the length
+  __ cmp(r0, Operand(r1));  // compare to the array length
+  node->break_target()->Branch(hs);
+
+  __ ldr(r0, frame_->ElementAt(0));
+
+  // Get the i'th entry of the array.
+  __ ldr(r2, frame_->ElementAt(2));
+  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+  // Get Map or 0.
+  __ ldr(r2, frame_->ElementAt(3));
+  // Check if this (still) matches the map of the enumerable.
+  // If not, we have to filter the key.
+  __ ldr(r1, frame_->ElementAt(4));
+  __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r1, Operand(r2));
+  end_del_check.Branch(eq);
+
+  // Convert the entry to a string (or null if it isn't a property anymore).
+  __ ldr(r0, frame_->ElementAt(4));  // push enumerable
+  frame_->EmitPush(r0);
+  frame_->EmitPush(r3);  // push entry
+  Result arg_count_reg(r0);
+  __ mov(r0, Operand(1));
+  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
+  __ mov(r3, Operand(r0));
+
+  // If the property has been removed while iterating, we just skip it.
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r3, ip);
+  node->continue_target()->Branch(eq);
+
+  end_del_check.Bind();
+  // Store the entry in the 'each' expression and take another spin in the
+  // loop.  r3: i'th entry of the enum cache (or string there of)
+  frame_->EmitPush(r3);  // push entry
+  { Reference each(this, node->each());
+    if (!each.is_illegal()) {
+      if (each.size() > 0) {
+        __ ldr(r0, frame_->ElementAt(each.size()));
+        frame_->EmitPush(r0);
+      }
+      // If the reference was to a slot we rely on the convenient property
+      // that it doesn't matter whether a value (eg, r3 pushed above) is
+      // right on top of or right underneath a zero-sized reference.
+      each.SetValue(NOT_CONST_INIT);
+      if (each.size() > 0) {
+        // It's safe to pop the value lying on top of the reference before
+        // unloading the reference itself (which preserves the top of stack,
+        // ie, now the topmost value of the non-zero sized reference), since
+        // we will discard the top of stack after unloading the reference
+        // anyway.
+        frame_->EmitPop(r0);
+      }
+    }
+  }
+  // Discard the i'th entry pushed above or else the remainder of the
+  // reference, whichever is currently on top of the stack.
+  frame_->Drop();
+
+  // Body.
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  VisitAndSpill(node->body());
+
+  // Next.  Reestablish a spilled frame in case we are coming here via
+  // a continue in the body.
+  node->continue_target()->Bind();
+  frame_->SpillAll();
+  frame_->EmitPop(r0);
+  __ add(r0, r0, Operand(Smi::FromInt(1)));
+  frame_->EmitPush(r0);
+  entry.Jump();
+
+  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
+  // any frame.
+  node->break_target()->Bind();
+  frame_->Drop(5);
+
+  // Exit.
+  exit.Bind();
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+  ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitTryCatch(TryCatch* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ TryCatch");
+  CodeForStatementPosition(node);
+
+  JumpTarget try_block;
+  JumpTarget exit;
+
+  try_block.Call();
+  // --- Catch block ---
+  frame_->EmitPush(r0);
+
+  // Store the caught exception in the catch variable.
+  { Reference ref(this, node->catch_var());
+    ASSERT(ref.is_slot());
+    // Here we make use of the convenient property that it doesn't matter
+    // whether a value is immediately on top of or underneath a zero-sized
+    // reference.
+    ref.SetValue(NOT_CONST_INIT);
+  }
+
+  // Remove the exception from the stack.
+  frame_->Drop();
+
+  VisitStatementsAndSpill(node->catch_block()->statements());
+  if (frame_ != NULL) {
+    exit.Jump();
+  }
+
+
+  // --- Try block ---
+  try_block.Bind();
+
+  frame_->PushTryHandler(TRY_CATCH_HANDLER);
+  int handler_height = frame_->height();
+
+  // Shadow the labels for all escapes from the try block, including
+  // returns. During shadowing, the original label is hidden as the
+  // LabelShadow and operations on the original actually affect the
+  // shadowing label.
+  //
+  // We should probably try to unify the escaping labels and the return
+  // label.
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
+  for (int i = 0; i < nof_escapes; i++) {
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+  }
+
+  // Generate code for the statements in the try block.
+  VisitStatementsAndSpill(node->try_block()->statements());
+
+  // Stop the introduced shadowing and count the number of required unlinks.
+  // After shadowing stops, the original labels are unshadowed and the
+  // LabelShadows represent the formerly shadowing labels.
+  bool has_unlinks = false;
+  for (int i = 0; i < shadows.length(); i++) {
+    shadows[i]->StopShadowing();
+    has_unlinks = has_unlinks || shadows[i]->is_linked();
+  }
+  function_return_is_shadowed_ = function_return_was_shadowed;
+
+  // Get an external reference to the handler address.
+  ExternalReference handler_address(Top::k_handler_address);
+
+  // If we can fall off the end of the try block, unlink from try chain.
+  if (has_valid_frame()) {
+    // The next handler address is on top of the frame.  Unlink from
+    // the handler list and drop the rest of this handler from the
+    // frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    frame_->EmitPop(r1);
+    __ mov(r3, Operand(handler_address));
+    __ str(r1, MemOperand(r3));
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+    if (has_unlinks) {
+      exit.Jump();
+    }
+  }
+
+  // Generate unlink code for the (formerly) shadowing labels that have been
+  // jumped to.  Deallocate each shadow target.
+  for (int i = 0; i < shadows.length(); i++) {
+    if (shadows[i]->is_linked()) {
+      // Unlink from try chain;
+      shadows[i]->Bind();
+      // Because we can be jumping here (to spilled code) from unspilled
+      // code, we need to reestablish a spilled frame at this block.
+      frame_->SpillAll();
+
+      // Reload sp from the top handler, because some statements that we
+      // break from (eg, for...in) may have left stuff on the stack.
+      __ mov(r3, Operand(handler_address));
+      __ ldr(sp, MemOperand(r3));
+      frame_->Forget(frame_->height() - handler_height);
+
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      frame_->EmitPop(r1);
+      __ str(r1, MemOperand(r3));
+      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+        frame_->PrepareForReturn();
+      }
+      shadows[i]->other_target()->Jump();
+    }
+  }
+
+  exit.Bind();
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitTryFinally(TryFinally* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ TryFinally");
+  CodeForStatementPosition(node);
+
+  // State: Used to keep track of reason for entering the finally
+  // block. Should probably be extended to hold information for
+  // break/continue from within the try block.
+  enum { FALLING, THROWING, JUMPING };
+
+  JumpTarget try_block;
+  JumpTarget finally_block;
+
+  try_block.Call();
+
+  frame_->EmitPush(r0);  // save exception object on the stack
+  // In case of thrown exceptions, this is where we continue.
+  __ mov(r2, Operand(Smi::FromInt(THROWING)));
+  finally_block.Jump();
+
+  // --- Try block ---
+  try_block.Bind();
+
+  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+  int handler_height = frame_->height();
+
+  // Shadow the labels for all escapes from the try block, including
+  // returns.  Shadowing hides the original label as the LabelShadow and
+  // operations on the original actually affect the shadowing label.
+  //
+  // We should probably try to unify the escaping labels and the return
+  // label.
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
+  for (int i = 0; i < nof_escapes; i++) {
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+  }
+
+  // Generate code for the statements in the try block.
+  VisitStatementsAndSpill(node->try_block()->statements());
+
+  // Stop the introduced shadowing and count the number of required unlinks.
+  // After shadowing stops, the original labels are unshadowed and the
+  // LabelShadows represent the formerly shadowing labels.
+  int nof_unlinks = 0;
+  for (int i = 0; i < shadows.length(); i++) {
+    shadows[i]->StopShadowing();
+    if (shadows[i]->is_linked()) nof_unlinks++;
+  }
+  function_return_is_shadowed_ = function_return_was_shadowed;
+
+  // Get an external reference to the handler address.
+  ExternalReference handler_address(Top::k_handler_address);
+
+  // If we can fall off the end of the try block, unlink from the try
+  // chain and set the state on the frame to FALLING.
+  if (has_valid_frame()) {
+    // The next handler address is on top of the frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    frame_->EmitPop(r1);
+    __ mov(r3, Operand(handler_address));
+    __ str(r1, MemOperand(r3));
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+    // Fake a top of stack value (unneeded when FALLING) and set the
+    // state in r2, then jump around the unlink blocks if any.
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+    frame_->EmitPush(r0);
+    __ mov(r2, Operand(Smi::FromInt(FALLING)));
+    if (nof_unlinks > 0) {
+      finally_block.Jump();
+    }
+  }
+
+  // Generate code to unlink and set the state for the (formerly)
+  // shadowing targets that have been jumped to.
+  for (int i = 0; i < shadows.length(); i++) {
+    if (shadows[i]->is_linked()) {
+      // If we have come from the shadowed return, the return value is
+      // in (a non-refcounted reference to) r0.  We must preserve it
+      // until it is pushed.
+      //
+      // Because we can be jumping here (to spilled code) from
+      // unspilled code, we need to reestablish a spilled frame at
+      // this block.
+      shadows[i]->Bind();
+      frame_->SpillAll();
+
+      // Reload sp from the top handler, because some statements that
+      // we break from (eg, for...in) may have left stuff on the
+      // stack.
+      __ mov(r3, Operand(handler_address));
+      __ ldr(sp, MemOperand(r3));
+      frame_->Forget(frame_->height() - handler_height);
+
+      // Unlink this handler and drop it from the frame.  The next
+      // handler address is currently on top of the frame.
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      frame_->EmitPop(r1);
+      __ str(r1, MemOperand(r3));
+      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+      if (i == kReturnShadowIndex) {
+        // If this label shadowed the function return, materialize the
+        // return value on the stack.
+        frame_->EmitPush(r0);
+      } else {
+        // Fake TOS for targets that shadowed breaks and continues.
+        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+        frame_->EmitPush(r0);
+      }
+      __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
+      if (--nof_unlinks > 0) {
+        // If this is not the last unlink block, jump around the next.
+        finally_block.Jump();
+      }
+    }
+  }
+
+  // --- Finally block ---
+  finally_block.Bind();
+
+  // Push the state on the stack.
+  frame_->EmitPush(r2);
+
+  // We keep two elements on the stack - the (possibly faked) result
+  // and the state - while evaluating the finally block.
+  //
+  // Generate code for the statements in the finally block.
+  VisitStatementsAndSpill(node->finally_block()->statements());
+
+  if (has_valid_frame()) {
+    // Restore state and return value or faked TOS.
+    frame_->EmitPop(r2);
+    frame_->EmitPop(r0);
+  }
+
+  // Generate code to jump to the right destination for all used
+  // formerly shadowing targets.  Deallocate each shadow target.
+  for (int i = 0; i < shadows.length(); i++) {
+    if (has_valid_frame() && shadows[i]->is_bound()) {
+      JumpTarget* original = shadows[i]->other_target();
+      __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
+      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+        JumpTarget skip;
+        skip.Branch(ne);
+        frame_->PrepareForReturn();
+        original->Jump();
+        skip.Bind();
+      } else {
+        original->Branch(eq);
+      }
+    }
+  }
+
+  if (has_valid_frame()) {
+    // Check if we need to rethrow the exception.
+    JumpTarget exit;
+    __ cmp(r2, Operand(Smi::FromInt(THROWING)));
+    exit.Branch(ne);
+
+    // Rethrow exception.
+    frame_->EmitPush(r0);
+    frame_->CallRuntime(Runtime::kReThrow, 1);
+
+    // Done.
+    exit.Bind();
+  }
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ DebuggerStatament");
+  CodeForStatementPosition(node);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  frame_->CallRuntime(Runtime::kDebugBreak, 0);
+#endif
+  // Ignore the return value.
+  ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(boilerplate->IsBoilerplate());
+
+  // Push the boilerplate on the stack.
+  __ mov(r0, Operand(boilerplate));
+  frame_->EmitPush(r0);
+
+  // Create a new closure.
+  frame_->EmitPush(cp);
+  frame_->CallRuntime(Runtime::kNewClosure, 2);
+  frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ FunctionLiteral");
+
+  // Build the function boilerplate and instantiate it.
+  Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+  // Check for stack-overflow exception.
+  if (HasStackOverflow()) {
+    ASSERT(frame_->height() == original_height);
+    return;
+  }
+  InstantiateBoilerplate(boilerplate);
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
+  InstantiateBoilerplate(node->boilerplate());
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitConditional(Conditional* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Conditional");
+  JumpTarget then;
+  JumpTarget else_;
+  LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+                        &then, &else_, true);
+  if (has_valid_frame()) {
+    Branch(false, &else_);
+  }
+  if (has_valid_frame() || then.is_linked()) {
+    then.Bind();
+    LoadAndSpill(node->then_expression(), typeof_state());
+  }
+  if (else_.is_linked()) {
+    JumpTarget exit;
+    if (has_valid_frame()) exit.Jump();
+    else_.Bind();
+    LoadAndSpill(node->else_expression(), typeof_state());
+    if (exit.is_linked()) exit.Bind();
+  }
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+  VirtualFrame::SpilledScope spilled_scope;
+  if (slot->type() == Slot::LOOKUP) {
+    ASSERT(slot->var()->is_dynamic());
+
+    JumpTarget slow;
+    JumpTarget done;
+
+    // Generate fast-case code for variables that might be shadowed by
+    // eval-introduced variables.  Eval is used a lot without
+    // introducing variables.  In those cases, we do not want to
+    // perform a runtime call for all variables in the scope
+    // containing the eval.
+    if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+      LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
+      // If there was no control flow to slow, we can exit early.
+      if (!slow.is_linked()) {
+        frame_->EmitPush(r0);
+        return;
+      }
+
+      done.Jump();
+
+    } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+      Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+      // Only generate the fast case for locals that rewrite to slots.
+      // This rules out argument loads.
+      if (potential_slot != NULL) {
+        __ ldr(r0,
+               ContextSlotOperandCheckExtensions(potential_slot,
+                                                 r1,
+                                                 r2,
+                                                 &slow));
+        if (potential_slot->var()->mode() == Variable::CONST) {
+          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+          __ cmp(r0, ip);
+          __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+        }
+        // There is always control flow to slow from
+        // ContextSlotOperandCheckExtensions so we have to jump around
+        // it.
+        done.Jump();
+      }
+    }
+
+    slow.Bind();
+    frame_->EmitPush(cp);
+    __ mov(r0, Operand(slot->var()->name()));
+    frame_->EmitPush(r0);
+
+    if (typeof_state == INSIDE_TYPEOF) {
+      frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    } else {
+      frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+    }
+
+    done.Bind();
+    frame_->EmitPush(r0);
+
+  } else {
+    // Note: We would like to keep the assert below, but it fires because of
+    // some nasty code in LoadTypeofExpression() which should be removed...
+    // ASSERT(!slot->var()->is_dynamic());
+
+    // Special handling for locals allocated in registers.
+    __ ldr(r0, SlotOperand(slot, r2));
+    frame_->EmitPush(r0);
+    if (slot->var()->mode() == Variable::CONST) {
+      // Const slots may contain 'the hole' value (the constant hasn't been
+      // initialized yet) which needs to be converted into the 'undefined'
+      // value.
+      Comment cmnt(masm_, "[ Unhole const");
+      frame_->EmitPop(r0);
+      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+      __ cmp(r0, ip);
+      __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+      frame_->EmitPush(r0);
+    }
+  }
+}
+
+
+void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
+                                                      TypeofState typeof_state,
+                                                      Register tmp,
+                                                      Register tmp2,
+                                                      JumpTarget* slow) {
+  // Check that no extension objects have been created by calls to
+  // eval from the current scope to the global scope.
+  Register context = cp;
+  Scope* s = scope();
+  while (s != NULL) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
+        __ tst(tmp2, tmp2);
+        slow->Branch(ne);
+      }
+      // Load next context in chain.
+      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
+      context = tmp;
+    }
+    // If no outer scope calls eval, we do not need to check more
+    // context extensions.
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    s = s->outer_scope();
+  }
+
+  if (s->is_eval_scope()) {
+    Label next, fast;
+    if (!context.is(tmp)) {
+      __ mov(tmp, Operand(context));
+    }
+    __ bind(&next);
+    // Terminate at global context.
+    __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+    __ cmp(tmp2, ip);
+    __ b(eq, &fast);
+    // Check that extension is NULL.
+    __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
+    __ tst(tmp2, tmp2);
+    slow->Branch(ne);
+    // Load next context in chain.
+    __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
+    __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
+    __ b(&next);
+    __ bind(&fast);
+  }
+
+  // All extension objects were empty and it is safe to use a global
+  // load IC call.
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  // Load the global object.
+  LoadGlobal();
+  // Setup the name register.
+  Result name(r2);
+  __ mov(r2, Operand(slot->var()->name()));
+  // Call IC stub.
+  if (typeof_state == INSIDE_TYPEOF) {
+    frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
+  } else {
+    frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
+  }
+
+  // Drop the global object. The result is in r0.
+  frame_->Drop();
+}
+
+
+void CodeGenerator::VisitSlot(Slot* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Slot");
+  LoadFromSlot(node, typeof_state());
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ VariableProxy");
+
+  Variable* var = node->var();
+  Expression* expr = var->rewrite();
+  if (expr != NULL) {
+    Visit(expr);
+  } else {
+    ASSERT(var->is_global());
+    Reference ref(this, node);
+    ref.GetValueAndSpill(typeof_state());
+  }
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitLiteral(Literal* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Literal");
+  __ mov(r0, Operand(node->handle()));
+  frame_->EmitPush(r0);
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ RexExp Literal");
+
+  // Retrieve the literal array and check the allocated entry.
+
+  // Load the function of this activation.
+  __ ldr(r1, frame_->Function());
+
+  // Load the literals array of the function.
+  __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ ldr(r2, FieldMemOperand(r1, literal_offset));
+
+  JumpTarget done;
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r2, ip);
+  done.Branch(ne);
+
+  // If the entry is undefined we call the runtime system to computed
+  // the literal.
+  frame_->EmitPush(r1);  // literal array  (0)
+  __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
+  frame_->EmitPush(r0);  // literal index  (1)
+  __ mov(r0, Operand(node->pattern()));  // RegExp pattern (2)
+  frame_->EmitPush(r0);
+  __ mov(r0, Operand(node->flags()));  // RegExp flags   (3)
+  frame_->EmitPush(r0);
+  frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  __ mov(r2, Operand(r0));
+
+  done.Bind();
+  // Push the literal.
+  frame_->EmitPush(r2);
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+// This deferred code stub will be used for creating the boilerplate
+// by calling Runtime_CreateObjectLiteralBoilerplate.
+// Each created boilerplate is stored in the JSFunction and they are
+// therefore context dependent.
+class DeferredObjectLiteral: public DeferredCode {
+ public:
+  explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
+    set_comment("[ DeferredObjectLiteral");
+  }
+
+  virtual void Generate();
+
+ private:
+  ObjectLiteral* node_;
+};
+
+
+void DeferredObjectLiteral::Generate() {
+  // Argument is passed in r1.
+
+  // If the entry is undefined we call the runtime system to compute
+  // the literal.
+  // Literal array (0).
+  __ push(r1);
+  // Literal index (1).
+  __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
+  __ push(r0);
+  // Constant properties (2).
+  __ mov(r0, Operand(node_->constant_properties()));
+  __ push(r0);
+  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  __ mov(r2, Operand(r0));
+  // Result is returned in r2.
+}
+
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ObjectLiteral");
+
+  DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
+
+  // Retrieve the literal array and check the allocated entry.
+
+  // Load the function of this activation.
+  __ ldr(r1, frame_->Function());
+
+  // Load the literals array of the function.
+  __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ ldr(r2, FieldMemOperand(r1, literal_offset));
+
+  // Check whether we need to materialize the object literal boilerplate.
+  // If so, jump to the deferred code.
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r2, Operand(ip));
+  deferred->Branch(eq);
+  deferred->BindExit();
+
+  // Push the object literal boilerplate.
+  frame_->EmitPush(r2);
+
+  // Clone the boilerplate object.
+  Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+  if (node->depth() == 1) {
+    clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+  }
+  frame_->CallRuntime(clone_function_id, 1);
+  frame_->EmitPush(r0);  // save the result
+  // r0: cloned object literal
+
+  for (int i = 0; i < node->properties()->length(); i++) {
+    ObjectLiteral::Property* property = node->properties()->at(i);
+    Literal* key = property->key();
+    Expression* value = property->value();
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        break;
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
+        // else fall through
+      case ObjectLiteral::Property::COMPUTED:  // fall through
+      case ObjectLiteral::Property::PROTOTYPE: {
+        frame_->EmitPush(r0);  // dup the result
+        LoadAndSpill(key);
+        LoadAndSpill(value);
+        frame_->CallRuntime(Runtime::kSetProperty, 3);
+        // restore r0
+        __ ldr(r0, frame_->Top());
+        break;
+      }
+      case ObjectLiteral::Property::SETTER: {
+        frame_->EmitPush(r0);
+        LoadAndSpill(key);
+        __ mov(r0, Operand(Smi::FromInt(1)));
+        frame_->EmitPush(r0);
+        LoadAndSpill(value);
+        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+        __ ldr(r0, frame_->Top());
+        break;
+      }
+      case ObjectLiteral::Property::GETTER: {
+        frame_->EmitPush(r0);
+        LoadAndSpill(key);
+        __ mov(r0, Operand(Smi::FromInt(0)));
+        frame_->EmitPush(r0);
+        LoadAndSpill(value);
+        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+        __ ldr(r0, frame_->Top());
+        break;
+      }
+    }
+  }
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+// This deferred code stub will be used for creating the boilerplate
+// by calling Runtime_CreateArrayLiteralBoilerplate.
+// Each created boilerplate is stored in the JSFunction and they are
+// therefore context dependent.
+class DeferredArrayLiteral: public DeferredCode {
+ public:
+  explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
+    set_comment("[ DeferredArrayLiteral");
+  }
+
+  virtual void Generate();
+
+ private:
+  ArrayLiteral* node_;
+};
+
+
+void DeferredArrayLiteral::Generate() {
+  // Argument is passed in r1.
+
+  // If the entry is undefined we call the runtime system to computed
+  // the literal.
+  // Literal array (0).
+  __ push(r1);
+  // Literal index (1).
+  __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
+  __ push(r0);
+  // Constant properties (2).
+  __ mov(r0, Operand(node_->literals()));
+  __ push(r0);
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+  __ mov(r2, Operand(r0));
+  // Result is returned in r2.
+}
+
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ArrayLiteral");
+
+  DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
+
+  // Retrieve the literal array and check the allocated entry.
+
+  // Load the function of this activation.
+  __ ldr(r1, frame_->Function());
+
+  // Load the literals array of the function.
+  __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ ldr(r2, FieldMemOperand(r1, literal_offset));
+
+  // Check whether we need to materialize the object literal boilerplate.
+  // If so, jump to the deferred code.
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r2, Operand(ip));
+  deferred->Branch(eq);
+  deferred->BindExit();
+
+  // Push the object literal boilerplate.
+  frame_->EmitPush(r2);
+
+  // Clone the boilerplate object.
+  Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+  if (node->depth() == 1) {
+    clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+  }
+  frame_->CallRuntime(clone_function_id, 1);
+  frame_->EmitPush(r0);  // save the result
+  // r0: cloned object literal
+
+  // Generate code to set the elements in the array that are not
+  // literals.
+  for (int i = 0; i < node->values()->length(); i++) {
+    Expression* value = node->values()->at(i);
+
+    // If value is a literal the property value is already set in the
+    // boilerplate object.
+    if (value->AsLiteral() != NULL) continue;
+    // If value is a materialized literal the property value is already set
+    // in the boilerplate object if it is simple.
+    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
+
+    // The property must be set by generated code.
+    LoadAndSpill(value);
+    frame_->EmitPop(r0);
+
+    // Fetch the object literal.
+    __ ldr(r1, frame_->Top());
+    // Get the elements array.
+    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+
+    // Write to the indexed properties array.
+    int offset = i * kPointerSize + FixedArray::kHeaderSize;
+    __ str(r0, FieldMemOperand(r1, offset));
+
+    // Update the write barrier for the array address.
+    __ mov(r3, Operand(offset));
+    __ RecordWrite(r1, r3, r2);
+  }
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  // Call runtime routine to allocate the catch extension object and
+  // assign the exception value to the catch variable.
+  Comment cmnt(masm_, "[ CatchExtensionObject");
+  LoadAndSpill(node->key());
+  LoadAndSpill(node->value());
+  frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+  frame_->EmitPush(r0);
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Assignment");
+
+  { Reference target(this, node->target());
+    if (target.is_illegal()) {
+      // Fool the virtual frame into thinking that we left the assignment's
+      // value on the frame.
+      __ mov(r0, Operand(Smi::FromInt(0)));
+      frame_->EmitPush(r0);
+      ASSERT(frame_->height() == original_height + 1);
+      return;
+    }
+
+    if (node->op() == Token::ASSIGN ||
+        node->op() == Token::INIT_VAR ||
+        node->op() == Token::INIT_CONST) {
+      LoadAndSpill(node->value());
+
+    } else {
+      // +=, *= and similar binary assignments.
+      // Get the old value of the lhs.
+      target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+      Literal* literal = node->value()->AsLiteral();
+      bool overwrite =
+          (node->value()->AsBinaryOperation() != NULL &&
+           node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+      if (literal != NULL && literal->handle()->IsSmi()) {
+        SmiOperation(node->binary_op(),
+                     literal->handle(),
+                     false,
+                     overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
+        frame_->EmitPush(r0);
+
+      } else {
+        LoadAndSpill(node->value());
+        GenericBinaryOperation(node->binary_op(),
+                               overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
+        frame_->EmitPush(r0);
+      }
+    }
+
+    Variable* var = node->target()->AsVariableProxy()->AsVariable();
+    if (var != NULL &&
+        (var->mode() == Variable::CONST) &&
+        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+      // Assignment ignored - leave the value on the stack.
+
+    } else {
+      CodeForSourcePosition(node->position());
+      if (node->op() == Token::INIT_CONST) {
+        // Dynamic constant initializations must use the function context
+        // and initialize the actual constant declared. Dynamic variable
+        // initializations are simply assignments and use SetValue.
+        target.SetValue(CONST_INIT);
+      } else {
+        target.SetValue(NOT_CONST_INIT);
+      }
+    }
+  }
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitThrow(Throw* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Throw");
+
+  LoadAndSpill(node->exception());
+  CodeForSourcePosition(node->position());
+  frame_->CallRuntime(Runtime::kThrow, 1);
+  frame_->EmitPush(r0);
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitProperty(Property* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Property");
+
+  { Reference property(this, node);
+    property.GetValueAndSpill(typeof_state());
+  }
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitCall(Call* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Call");
+
+  Expression* function = node->expression();
+  ZoneList<Expression*>* args = node->arguments();
+
+  // Standard function call.
+  // Check if the function is a variable or a property.
+  Variable* var = function->AsVariableProxy()->AsVariable();
+  Property* property = function->AsProperty();
+
+  // ------------------------------------------------------------------------
+  // Fast-case: Use inline caching.
+  // ---
+  // According to ECMA-262, section 11.2.3, page 44, the function to call
+  // must be resolved after the arguments have been evaluated. The IC code
+  // automatically handles this by loading the arguments before the function
+  // is resolved in cache misses (this also holds for megamorphic calls).
+  // ------------------------------------------------------------------------
+
+  if (var != NULL && var->is_possibly_eval()) {
+    // ----------------------------------
+    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
+    // ----------------------------------
+
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    // Prepare stack for call to resolved function.
+    LoadAndSpill(function);
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    frame_->EmitPush(r2);  // Slot for receiver
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      LoadAndSpill(args->at(i));
+    }
+
+    // Prepare stack for call to ResolvePossiblyDirectEval.
+    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
+    frame_->EmitPush(r1);
+    if (arg_count > 0) {
+      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+      frame_->EmitPush(r1);
+    } else {
+      frame_->EmitPush(r2);
+    }
+
+    // Resolve the call.
+    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+    // Touch up stack with the right values for the function and the receiver.
+    __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
+    __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+    __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
+    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+
+    // Call the function.
+    CodeForSourcePosition(node->position());
+
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub call_function(arg_count, in_loop);
+    frame_->CallStub(&call_function, arg_count + 1);
+
+    __ ldr(cp, frame_->Context());
+    // Remove the function from the stack.
+    frame_->Drop();
+    frame_->EmitPush(r0);
+
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
+    // ----------------------------------
+
+    // Push the name of the function and the receiver onto the stack.
+    __ mov(r0, Operand(var->name()));
+    frame_->EmitPush(r0);
+
+    // Pass the global object as the receiver and let the IC stub
+    // patch the stack to use the global proxy as 'this' in the
+    // invoked function.
+    LoadGlobal();
+
+    // Load the arguments.
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      LoadAndSpill(args->at(i));
+    }
+
+    // Setup the receiver register and call the IC initialization code.
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
+    CodeForSourcePosition(node->position());
+    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
+                           arg_count + 1);
+    __ ldr(cp, frame_->Context());
+    // Remove the function from the stack.
+    frame_->Drop();
+    frame_->EmitPush(r0);
+
+  } else if (var != NULL && var->slot() != NULL &&
+             var->slot()->type() == Slot::LOOKUP) {
+    // ----------------------------------
+    // JavaScript example: 'with (obj) foo(1, 2, 3)'  // foo is in obj
+    // ----------------------------------
+
+    // Load the function
+    frame_->EmitPush(cp);
+    __ mov(r0, Operand(var->name()));
+    frame_->EmitPush(r0);
+    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+    // r0: slot value; r1: receiver
+
+    // Load the receiver.
+    frame_->EmitPush(r0);  // function
+    frame_->EmitPush(r1);  // receiver
+
+    // Call the function.
+    CallWithArguments(args, node->position());
+    frame_->EmitPush(r0);
+
+  } else if (property != NULL) {
+    // Check if the key is a literal string.
+    Literal* literal = property->key()->AsLiteral();
+
+    if (literal != NULL && literal->handle()->IsSymbol()) {
+      // ------------------------------------------------------------------
+      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
+      // ------------------------------------------------------------------
+
+      // Push the name of the function and the receiver onto the stack.
+      __ mov(r0, Operand(literal->handle()));
+      frame_->EmitPush(r0);
+      LoadAndSpill(property->obj());
+
+      // Load the arguments.
+      int arg_count = args->length();
+      for (int i = 0; i < arg_count; i++) {
+        LoadAndSpill(args->at(i));
+      }
+
+      // Set the receiver register and call the IC initialization code.
+      InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+      Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
+      CodeForSourcePosition(node->position());
+      frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
+      __ ldr(cp, frame_->Context());
+
+      // Remove the function from the stack.
+      frame_->Drop();
+
+      frame_->EmitPush(r0);  // push after get rid of function from the stack
+
+    } else {
+      // -------------------------------------------
+      // JavaScript example: 'array[index](1, 2, 3)'
+      // -------------------------------------------
+
+      // Load the function to call from the property through a reference.
+      Reference ref(this, property);
+      ref.GetValueAndSpill(NOT_INSIDE_TYPEOF);  // receiver
+
+      // Pass receiver to called function.
+      if (property->is_synthetic()) {
+        LoadGlobalReceiver(r0);
+      } else {
+        __ ldr(r0, frame_->ElementAt(ref.size()));
+        frame_->EmitPush(r0);
+      }
+
+      // Call the function.
+      CallWithArguments(args, node->position());
+      frame_->EmitPush(r0);
+    }
+
+  } else {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
+    // ----------------------------------
+
+    // Load the function.
+    LoadAndSpill(function);
+
+    // Pass the global proxy as the receiver.
+    LoadGlobalReceiver(r0);
+
+    // Call the function.
+    CallWithArguments(args, node->position());
+    frame_->EmitPush(r0);
+  }
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitCallNew(CallNew* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ CallNew");
+
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments. This is different from ordinary calls, where the
+  // actual function to call is resolved after the arguments have been
+  // evaluated.
+
+  // Compute function to call and use the global object as the
+  // receiver. There is no need to use the global proxy here because
+  // it will always be replaced with a newly allocated object.
+  LoadAndSpill(node->expression());
+  LoadGlobal();
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = node->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    LoadAndSpill(args->at(i));
+  }
+
+  // r0: the number of arguments.
+  Result num_args(r0);
+  __ mov(r0, Operand(arg_count));
+
+  // Load the function into r1 as per calling convention.
+  Result function(r1);
+  __ ldr(r1, frame_->ElementAt(arg_count + 1));
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  CodeForSourcePosition(node->position());
+  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+  frame_->CallCodeObject(ic,
+                         RelocInfo::CONSTRUCT_CALL,
+                         &num_args,
+                         &function,
+                         arg_count + 1);
+
+  // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
+  __ str(r0, frame_->Top());
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 1);
+  JumpTarget leave, null, function, non_function_constructor;
+
+  // Load the object into r0.
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(r0);
+
+  // If the object is a smi, we return null.
+  __ tst(r0, Operand(kSmiTagMask));
+  null.Branch(eq);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
+  null.Branch(lt);
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+  function.Branch(eq);
+
+  // Check if the constructor in the map is a function.
+  __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
+  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+  non_function_constructor.Branch(ne);
+
+  // The r0 register now contains the constructor function. Grab the
+  // instance class name from there.
+  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
+  frame_->EmitPush(r0);
+  leave.Jump();
+
+  // Functions have class 'Function'.
+  function.Bind();
+  __ mov(r0, Operand(Factory::function_class_symbol()));
+  frame_->EmitPush(r0);
+  leave.Jump();
+
+  // Objects with a non-function constructor have class 'Object'.
+  non_function_constructor.Bind();
+  __ mov(r0, Operand(Factory::Object_symbol()));
+  frame_->EmitPush(r0);
+  leave.Jump();
+
+  // Non-JS objects have class null.
+  null.Bind();
+  __ LoadRoot(r0, Heap::kNullValueRootIndex);
+  frame_->EmitPush(r0);
+
+  // All done.
+  leave.Bind();
+}
+
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 1);
+  JumpTarget leave;
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(r0);  // r0 contains object.
+  // if (object->IsSmi()) return the object.
+  __ tst(r0, Operand(kSmiTagMask));
+  leave.Branch(eq);
+  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
+  __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
+  leave.Branch(ne);
+  // Load the value.
+  __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+  leave.Bind();
+  frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 2);
+  JumpTarget leave;
+  LoadAndSpill(args->at(0));  // Load the object.
+  LoadAndSpill(args->at(1));  // Load the value.
+  frame_->EmitPop(r0);  // r0 contains value
+  frame_->EmitPop(r1);  // r1 contains object
+  // if (object->IsSmi()) return object.
+  __ tst(r1, Operand(kSmiTagMask));
+  leave.Branch(eq);
+  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
+  __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
+  leave.Branch(ne);
+  // Store the value.
+  __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
+  // Update the write barrier.
+  __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
+  __ RecordWrite(r1, r2, r3);
+  // Leave.
+  leave.Bind();
+  frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 1);
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(r0);
+  __ tst(r0, Operand(kSmiTagMask));
+  cc_reg_ = eq;
+}
+
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
+  ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (ShouldGenerateLog(args->at(0))) {
+    LoadAndSpill(args->at(1));
+    LoadAndSpill(args->at(2));
+    __ CallRuntime(Runtime::kLog, 2);
+  }
+#endif
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 1);
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(r0);
+  __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
+  cc_reg_ = eq;
+}
+
+
+// This should generate code that performs a charCodeAt() call or returns
+// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
+// It is not yet implemented on ARM, so it always goes to the slow case.
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 2);
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 1);
+  LoadAndSpill(args->at(0));
+  JumpTarget answer;
+  // We need the CC bits to come out as not_equal in the case where the
+  // object is a smi.  This can't be done with the usual test opcode so
+  // we use XOR to get the right CC bits.
+  frame_->EmitPop(r0);
+  __ and_(r1, r0, Operand(kSmiTagMask));
+  __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
+  answer.Branch(ne);
+  // It is a heap object - get the map. Check if the object is a JS array.
+  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+  answer.Bind();
+  cc_reg_ = eq;
+}
+
+
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 0);
+
+  // Get the frame pointer for the calling frame.
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &check_frame_marker);
+  __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+  __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+  cc_reg_ = eq;
+}
+
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 0);
+
+  // Seed the result with the formal parameters count, which will be used
+  // in case no arguments adaptor frame is found below the current frame.
+  __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+
+  // Call the shared stub to get to the arguments.length.
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
+  frame_->CallStub(&stub, 0);
+  frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 1);
+
+  // Satisfy contract with ArgumentsAccessStub:
+  // Load the key into r1 and the formal parameters count into r0.
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(r1);
+  __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+
+  // Call the shared stub to get to arguments[key].
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  frame_->CallStub(&stub, 0);
+  frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 0);
+  __ Call(ExternalReference::random_positive_smi_function().address(),
+          RelocInfo::RUNTIME_ENTRY);
+  frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  LoadAndSpill(args->at(0));
+  switch (op) {
+    case SIN:
+      frame_->CallRuntime(Runtime::kMath_sin, 1);
+      break;
+    case COS:
+      frame_->CallRuntime(Runtime::kMath_cos, 1);
+      break;
+  }
+  frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  LoadAndSpill(args->at(0));
+  LoadAndSpill(args->at(1));
+  frame_->EmitPop(r0);
+  frame_->EmitPop(r1);
+  __ cmp(r0, Operand(r1));
+  cc_reg_ = eq;
+}
+
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  if (CheckForInlineRuntimeCall(node)) {
+    ASSERT((has_cc() && frame_->height() == original_height) ||
+           (!has_cc() && frame_->height() == original_height + 1));
+    return;
+  }
+
+  ZoneList<Expression*>* args = node->arguments();
+  Comment cmnt(masm_, "[ CallRuntime");
+  Runtime::Function* function = node->function();
+
+  if (function == NULL) {
+    // Prepare stack for calling JS runtime function.
+    __ mov(r0, Operand(node->name()));
+    frame_->EmitPush(r0);
+    // Push the builtins object found in the current global object.
+    __ ldr(r1, GlobalObject());
+    __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
+    frame_->EmitPush(r0);
+  }
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    LoadAndSpill(args->at(i));
+  }
+
+  if (function == NULL) {
+    // Call the JS runtime function.
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
+    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
+    __ ldr(cp, frame_->Context());
+    frame_->Drop();
+    frame_->EmitPush(r0);
+  } else {
+    // Call the C runtime function.
+    frame_->CallRuntime(function, arg_count);
+    frame_->EmitPush(r0);
+  }
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ UnaryOperation");
+
+  Token::Value op = node->op();
+
+  if (op == Token::NOT) {
+    LoadConditionAndSpill(node->expression(),
+                          NOT_INSIDE_TYPEOF,
+                          false_target(),
+                          true_target(),
+                          true);
+    // LoadCondition may (and usually does) leave a test and branch to
+    // be emitted by the caller.  In that case, negate the condition.
+    if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
+
+  } else if (op == Token::DELETE) {
+    Property* property = node->expression()->AsProperty();
+    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
+    if (property != NULL) {
+      LoadAndSpill(property->obj());
+      LoadAndSpill(property->key());
+      Result arg_count(r0);
+      __ mov(r0, Operand(1));  // not counting receiver
+      frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+
+    } else if (variable != NULL) {
+      Slot* slot = variable->slot();
+      if (variable->is_global()) {
+        LoadGlobal();
+        __ mov(r0, Operand(variable->name()));
+        frame_->EmitPush(r0);
+        Result arg_count(r0);
+        __ mov(r0, Operand(1));  // not counting receiver
+        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+
+      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+        // lookup the context holding the named variable
+        frame_->EmitPush(cp);
+        __ mov(r0, Operand(variable->name()));
+        frame_->EmitPush(r0);
+        frame_->CallRuntime(Runtime::kLookupContext, 2);
+        // r0: context
+        frame_->EmitPush(r0);
+        __ mov(r0, Operand(variable->name()));
+        frame_->EmitPush(r0);
+        Result arg_count(r0);
+        __ mov(r0, Operand(1));  // not counting receiver
+        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+
+      } else {
+        // Default: Result of deleting non-global, not dynamically
+        // introduced variables is false.
+        __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+      }
+
+    } else {
+      // Default: Result of deleting expressions is true.
+      LoadAndSpill(node->expression());  // may have side-effects
+      frame_->Drop();
+      __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+    }
+    frame_->EmitPush(r0);
+
+  } else if (op == Token::TYPEOF) {
+    // Special case for loading the typeof expression; see comment on
+    // LoadTypeofExpression().
+    LoadTypeofExpression(node->expression());
+    frame_->CallRuntime(Runtime::kTypeof, 1);
+    frame_->EmitPush(r0);  // r0 has result
+
+  } else {
+    LoadAndSpill(node->expression());
+    frame_->EmitPop(r0);
+    switch (op) {
+      case Token::NOT:
+      case Token::DELETE:
+      case Token::TYPEOF:
+        UNREACHABLE();  // handled above
+        break;
+
+      case Token::SUB: {
+        bool overwrite =
+            (node->expression()->AsBinaryOperation() != NULL &&
+             node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+        UnarySubStub stub(overwrite);
+        frame_->CallStub(&stub, 0);
+        break;
+      }
+
+      case Token::BIT_NOT: {
+        // smi check
+        JumpTarget smi_label;
+        JumpTarget continue_label;
+        __ tst(r0, Operand(kSmiTagMask));
+        smi_label.Branch(eq);
+
+        frame_->EmitPush(r0);
+        Result arg_count(r0);
+        __ mov(r0, Operand(0));  // not counting receiver
+        frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
+
+        continue_label.Jump();
+        smi_label.Bind();
+        __ mvn(r0, Operand(r0));
+        __ bic(r0, r0, Operand(kSmiTagMask));  // bit-clear inverted smi-tag
+        continue_label.Bind();
+        break;
+      }
+
+      case Token::VOID:
+        // since the stack top is cached in r0, popping and then
+        // pushing a value can be done by just writing to r0.
+        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+        break;
+
+      case Token::ADD: {
+        // Smi check.
+        JumpTarget continue_label;
+        __ tst(r0, Operand(kSmiTagMask));
+        continue_label.Branch(eq);
+        frame_->EmitPush(r0);
+        Result arg_count(r0);
+        __ mov(r0, Operand(0));  // not counting receiver
+        frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+        continue_label.Bind();
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+    frame_->EmitPush(r0);  // r0 has result
+  }
+  ASSERT(!has_valid_frame() ||
+         (has_cc() && frame_->height() == original_height) ||
+         (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ CountOperation");
+
+  bool is_postfix = node->is_postfix();
+  bool is_increment = node->op() == Token::INC;
+
+  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
+  bool is_const = (var != NULL && var->mode() == Variable::CONST);
+
+  // Postfix: Make room for the result.
+  if (is_postfix) {
+     __ mov(r0, Operand(0));
+     frame_->EmitPush(r0);
+  }
+
+  { Reference target(this, node->expression());
+    if (target.is_illegal()) {
+      // Spoof the virtual frame to have the expected height (one higher
+      // than on entry).
+      if (!is_postfix) {
+        __ mov(r0, Operand(Smi::FromInt(0)));
+        frame_->EmitPush(r0);
+      }
+      ASSERT(frame_->height() == original_height + 1);
+      return;
+    }
+    target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+    frame_->EmitPop(r0);
+
+    JumpTarget slow;
+    JumpTarget exit;
+
+    // Load the value (1) into register r1.
+    __ mov(r1, Operand(Smi::FromInt(1)));
+
+    // Check for smi operand.
+    __ tst(r0, Operand(kSmiTagMask));
+    slow.Branch(ne);
+
+    // Postfix: Store the old value as the result.
+    if (is_postfix) {
+      __ str(r0, frame_->ElementAt(target.size()));
+    }
+
+    // Perform optimistic increment/decrement.
+    if (is_increment) {
+      __ add(r0, r0, Operand(r1), SetCC);
+    } else {
+      __ sub(r0, r0, Operand(r1), SetCC);
+    }
+
+    // If the increment/decrement didn't overflow, we're done.
+    exit.Branch(vc);
+
+    // Revert optimistic increment/decrement.
+    if (is_increment) {
+      __ sub(r0, r0, Operand(r1));
+    } else {
+      __ add(r0, r0, Operand(r1));
+    }
+
+    // Slow case: Convert to number.
+    slow.Bind();
+    {
+      // Convert the operand to a number.
+      frame_->EmitPush(r0);
+      Result arg_count(r0);
+      __ mov(r0, Operand(0));
+      frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+    }
+    if (is_postfix) {
+      // Postfix: store to result (on the stack).
+      __ str(r0, frame_->ElementAt(target.size()));
+    }
+
+    // Compute the new value.
+    __ mov(r1, Operand(Smi::FromInt(1)));
+    frame_->EmitPush(r0);
+    frame_->EmitPush(r1);
+    if (is_increment) {
+      frame_->CallRuntime(Runtime::kNumberAdd, 2);
+    } else {
+      frame_->CallRuntime(Runtime::kNumberSub, 2);
+    }
+
+    // Store the new value in the target if not const.
+    exit.Bind();
+    frame_->EmitPush(r0);
+    if (!is_const) target.SetValue(NOT_CONST_INIT);
+  }
+
+  // Postfix: Discard the new value and use the old.
+  if (is_postfix) frame_->EmitPop(r0);
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ BinaryOperation");
+  Token::Value op = node->op();
+
+  // According to ECMA-262 section 11.11, page 58, the binary logical
+  // operators must yield the result of one of the two expressions
+  // before any ToBoolean() conversions. This means that the value
+  // produced by a && or || operator is not necessarily a boolean.
+
+  // NOTE: If the left hand side produces a materialized value (not in
+  // the CC register), we force the right hand side to do the
+  // same. This is necessary because we may have to branch to the exit
+  // after evaluating the left hand side (due to the shortcut
+  // semantics), but the compiler must (statically) know if the result
+  // of compiling the binary operation is materialized or not.
+
+  if (op == Token::AND) {
+    JumpTarget is_true;
+    LoadConditionAndSpill(node->left(),
+                          NOT_INSIDE_TYPEOF,
+                          &is_true,
+                          false_target(),
+                          false);
+    if (has_valid_frame() && !has_cc()) {
+      // The left-hand side result is on top of the virtual frame.
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
+
+      __ ldr(r0, frame_->Top());  // Duplicate the stack top.
+      frame_->EmitPush(r0);
+      // Avoid popping the result if it converts to 'false' using the
+      // standard ToBoolean() conversion as described in ECMA-262,
+      // section 9.2, page 30.
+      ToBoolean(&pop_and_continue, &exit);
+      Branch(false, &exit);
+
+      // Pop the result of evaluating the first part.
+      pop_and_continue.Bind();
+      frame_->EmitPop(r0);
+
+      // Evaluate right side expression.
+      is_true.Bind();
+      LoadAndSpill(node->right());
+
+      // Exit (always with a materialized value).
+      exit.Bind();
+    } else if (has_cc() || is_true.is_linked()) {
+      // The left-hand side is either (a) partially compiled to
+      // control flow with a final branch left to emit or (b) fully
+      // compiled to control flow and possibly true.
+      if (has_cc()) {
+        Branch(false, false_target());
+      }
+      is_true.Bind();
+      LoadConditionAndSpill(node->right(),
+                            NOT_INSIDE_TYPEOF,
+                            true_target(),
+                            false_target(),
+                            false);
+    } else {
+      // Nothing to do.
+      ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
+    }
+
+  } else if (op == Token::OR) {
+    JumpTarget is_false;
+    LoadConditionAndSpill(node->left(),
+                          NOT_INSIDE_TYPEOF,
+                          true_target(),
+                          &is_false,
+                          false);
+    if (has_valid_frame() && !has_cc()) {
+      // The left-hand side result is on top of the virtual frame.
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
+
+      __ ldr(r0, frame_->Top());
+      frame_->EmitPush(r0);
+      // Avoid popping the result if it converts to 'true' using the
+      // standard ToBoolean() conversion as described in ECMA-262,
+      // section 9.2, page 30.
+      ToBoolean(&exit, &pop_and_continue);
+      Branch(true, &exit);
+
+      // Pop the result of evaluating the first part.
+      pop_and_continue.Bind();
+      frame_->EmitPop(r0);
+
+      // Evaluate right side expression.
+      is_false.Bind();
+      LoadAndSpill(node->right());
+
+      // Exit (always with a materialized value).
+      exit.Bind();
+    } else if (has_cc() || is_false.is_linked()) {
+      // The left-hand side is either (a) partially compiled to
+      // control flow with a final branch left to emit or (b) fully
+      // compiled to control flow and possibly false.
+      if (has_cc()) {
+        Branch(true, true_target());
+      }
+      is_false.Bind();
+      LoadConditionAndSpill(node->right(),
+                            NOT_INSIDE_TYPEOF,
+                            true_target(),
+                            false_target(),
+                            false);
+    } else {
+      // Nothing to do.
+      ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
+    }
+
+  } else {
+    // Optimize for the case where (at least) one of the expressions
+    // is a literal small integer.
+    Literal* lliteral = node->left()->AsLiteral();
+    Literal* rliteral = node->right()->AsLiteral();
+    // NOTE: The code below assumes that the slow cases (calls to runtime)
+    // never return a constant/immutable object.
+    bool overwrite_left =
+        (node->left()->AsBinaryOperation() != NULL &&
+         node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
+    bool overwrite_right =
+        (node->right()->AsBinaryOperation() != NULL &&
+         node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
+
+    if (rliteral != NULL && rliteral->handle()->IsSmi()) {
+      LoadAndSpill(node->left());
+      SmiOperation(node->op(),
+                   rliteral->handle(),
+                   false,
+                   overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
+
+    } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
+      LoadAndSpill(node->right());
+      SmiOperation(node->op(),
+                   lliteral->handle(),
+                   true,
+                   overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
+
+    } else {
+      OverwriteMode overwrite_mode = NO_OVERWRITE;
+      if (overwrite_left) {
+        overwrite_mode = OVERWRITE_LEFT;
+      } else if (overwrite_right) {
+        overwrite_mode = OVERWRITE_RIGHT;
+      }
+      LoadAndSpill(node->left());
+      LoadAndSpill(node->right());
+      GenericBinaryOperation(node->op(), overwrite_mode);
+    }
+    frame_->EmitPush(r0);
+  }
+  ASSERT(!has_valid_frame() ||
+         (has_cc() && frame_->height() == original_height) ||
+         (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  __ ldr(r0, frame_->Function());
+  frame_->EmitPush(r0);
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ CompareOperation");
+
+  // Get the expressions from the node.
+  Expression* left = node->left();
+  Expression* right = node->right();
+  Token::Value op = node->op();
+
+  // To make null checks efficient, we check if either left or right is the
+  // literal 'null'. If so, we optimize the code by inlining a null check
+  // instead of calling the (very) general runtime routine for checking
+  // equality.
+  if (op == Token::EQ || op == Token::EQ_STRICT) {
+    bool left_is_null =
+        left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
+    bool right_is_null =
+        right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
+    // The 'null' value can only be equal to 'null' or 'undefined'.
+    if (left_is_null || right_is_null) {
+      LoadAndSpill(left_is_null ? right : left);
+      frame_->EmitPop(r0);
+      __ LoadRoot(ip, Heap::kNullValueRootIndex);
+      __ cmp(r0, ip);
+
+      // The 'null' value is only equal to 'undefined' if using non-strict
+      // comparisons.
+      if (op != Token::EQ_STRICT) {
+        true_target()->Branch(eq);
+
+        __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+        __ cmp(r0, Operand(ip));
+        true_target()->Branch(eq);
+
+        __ tst(r0, Operand(kSmiTagMask));
+        false_target()->Branch(eq);
+
+        // It can be an undetectable object.
+        __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+        __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset));
+        __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
+        __ cmp(r0, Operand(1 << Map::kIsUndetectable));
+      }
+
+      cc_reg_ = eq;
+      ASSERT(has_cc() && frame_->height() == original_height);
+      return;
+    }
+  }
+
+  // To make typeof testing for natives implemented in JavaScript really
+  // efficient, we generate special code for expressions of the form:
+  // 'typeof <expression> == <string>'.
+  UnaryOperation* operation = left->AsUnaryOperation();
+  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
+      (operation != NULL && operation->op() == Token::TYPEOF) &&
+      (right->AsLiteral() != NULL &&
+       right->AsLiteral()->handle()->IsString())) {
+    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
+
+    // Load the operand, move it to register r1.
+    LoadTypeofExpression(operation->expression());
+    frame_->EmitPop(r1);
+
+    if (check->Equals(Heap::number_symbol())) {
+      __ tst(r1, Operand(kSmiTagMask));
+      true_target()->Branch(eq);
+      __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+      __ cmp(r1, ip);
+      cc_reg_ = eq;
+
+    } else if (check->Equals(Heap::string_symbol())) {
+      __ tst(r1, Operand(kSmiTagMask));
+      false_target()->Branch(eq);
+
+      __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+
+      // It can be an undetectable string object.
+      __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
+      __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
+      __ cmp(r2, Operand(1 << Map::kIsUndetectable));
+      false_target()->Branch(eq);
+
+      __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+      __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
+      cc_reg_ = lt;
+
+    } else if (check->Equals(Heap::boolean_symbol())) {
+      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+      __ cmp(r1, ip);
+      true_target()->Branch(eq);
+      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+      __ cmp(r1, ip);
+      cc_reg_ = eq;
+
+    } else if (check->Equals(Heap::undefined_symbol())) {
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+      __ cmp(r1, ip);
+      true_target()->Branch(eq);
+
+      __ tst(r1, Operand(kSmiTagMask));
+      false_target()->Branch(eq);
+
+      // It can be an undetectable object.
+      __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+      __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
+      __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
+      __ cmp(r2, Operand(1 << Map::kIsUndetectable));
+
+      cc_reg_ = eq;
+
+    } else if (check->Equals(Heap::function_symbol())) {
+      __ tst(r1, Operand(kSmiTagMask));
+      false_target()->Branch(eq);
+      __ CompareObjectType(r1, r1, r1, JS_FUNCTION_TYPE);
+      cc_reg_ = eq;
+
+    } else if (check->Equals(Heap::object_symbol())) {
+      __ tst(r1, Operand(kSmiTagMask));
+      false_target()->Branch(eq);
+
+      __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+      __ LoadRoot(ip, Heap::kNullValueRootIndex);
+      __ cmp(r1, ip);
+      true_target()->Branch(eq);
+
+      // It can be an undetectable object.
+      __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
+      __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+      __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+      false_target()->Branch(eq);
+
+      __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+      __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+      false_target()->Branch(lt);
+      __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+      cc_reg_ = le;
+
+    } else {
+      // Uncommon case: typeof testing against a string literal that is
+      // never returned from the typeof operator.
+      false_target()->Jump();
+    }
+    ASSERT(!has_valid_frame() ||
+           (has_cc() && frame_->height() == original_height));
+    return;
+  }
+
+  switch (op) {
+    case Token::EQ:
+      Comparison(eq, left, right, false);
+      break;
+
+    case Token::LT:
+      Comparison(lt, left, right);
+      break;
+
+    case Token::GT:
+      Comparison(gt, left, right);
+      break;
+
+    case Token::LTE:
+      Comparison(le, left, right);
+      break;
+
+    case Token::GTE:
+      Comparison(ge, left, right);
+      break;
+
+    case Token::EQ_STRICT:
+      Comparison(eq, left, right, true);
+      break;
+
+    case Token::IN: {
+      LoadAndSpill(left);
+      LoadAndSpill(right);
+      Result arg_count(r0);
+      __ mov(r0, Operand(1));  // not counting receiver
+      frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
+      frame_->EmitPush(r0);
+      break;
+    }
+
+    case Token::INSTANCEOF: {
+      LoadAndSpill(left);
+      LoadAndSpill(right);
+      InstanceofStub stub;
+      frame_->CallStub(&stub, 2);
+      // At this point if instanceof succeeded then r0 == 0.
+      __ tst(r0, Operand(r0));
+      cc_reg_ = eq;
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+  ASSERT((has_cc() && frame_->height() == original_height) ||
+         (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() { return true; }
+#endif
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+Handle<String> Reference::GetName() {
+  ASSERT(type_ == NAMED);
+  Property* property = expression_->AsProperty();
+  if (property == NULL) {
+    // Global variable reference treated as a named property reference.
+    VariableProxy* proxy = expression_->AsVariableProxy();
+    ASSERT(proxy->AsVariable() != NULL);
+    ASSERT(proxy->AsVariable()->is_global());
+    return proxy->name();
+  } else {
+    Literal* raw_name = property->key()->AsLiteral();
+    ASSERT(raw_name != NULL);
+    return Handle<String>(String::cast(*raw_name->handle()));
+  }
+}
+
+
+void Reference::GetValue(TypeofState typeof_state) {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_illegal());
+  ASSERT(!cgen_->has_cc());
+  MacroAssembler* masm = cgen_->masm();
+  Property* property = expression_->AsProperty();
+  if (property != NULL) {
+    cgen_->CodeForSourcePosition(property->position());
+  }
+
+  switch (type_) {
+    case SLOT: {
+      Comment cmnt(masm, "[ Load from Slot");
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      ASSERT(slot != NULL);
+      cgen_->LoadFromSlot(slot, typeof_state);
+      break;
+    }
+
+    case NAMED: {
+      // TODO(1241834): Make sure that this it is safe to ignore the
+      // distinction between expressions in a typeof and not in a typeof. If
+      // there is a chance that reference errors can be thrown below, we
+      // must distinguish between the two kinds of loads (typeof expression
+      // loads must not throw a reference error).
+      VirtualFrame* frame = cgen_->frame();
+      Comment cmnt(masm, "[ Load from named Property");
+      Handle<String> name(GetName());
+      Variable* var = expression_->AsVariableProxy()->AsVariable();
+      Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+      // Setup the name register.
+      Result name_reg(r2);
+      __ mov(r2, Operand(name));
+      ASSERT(var == NULL || var->is_global());
+      RelocInfo::Mode rmode = (var == NULL)
+                            ? RelocInfo::CODE_TARGET
+                            : RelocInfo::CODE_TARGET_CONTEXT;
+      frame->CallCodeObject(ic, rmode, &name_reg, 0);
+      frame->EmitPush(r0);
+      break;
+    }
+
+    case KEYED: {
+      // TODO(1241834): Make sure that this it is safe to ignore the
+      // distinction between expressions in a typeof and not in a typeof.
+
+      // TODO(181): Implement inlined version of array indexing once
+      // loop nesting is properly tracked on ARM.
+      VirtualFrame* frame = cgen_->frame();
+      Comment cmnt(masm, "[ Load from keyed Property");
+      ASSERT(property != NULL);
+      Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+      Variable* var = expression_->AsVariableProxy()->AsVariable();
+      ASSERT(var == NULL || var->is_global());
+      RelocInfo::Mode rmode = (var == NULL)
+                            ? RelocInfo::CODE_TARGET
+                            : RelocInfo::CODE_TARGET_CONTEXT;
+      frame->CallCodeObject(ic, rmode, 0);
+      frame->EmitPush(r0);
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void Reference::SetValue(InitState init_state) {
+  ASSERT(!is_illegal());
+  ASSERT(!cgen_->has_cc());
+  MacroAssembler* masm = cgen_->masm();
+  VirtualFrame* frame = cgen_->frame();
+  Property* property = expression_->AsProperty();
+  if (property != NULL) {
+    cgen_->CodeForSourcePosition(property->position());
+  }
+
+  switch (type_) {
+    case SLOT: {
+      Comment cmnt(masm, "[ Store to Slot");
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      ASSERT(slot != NULL);
+      if (slot->type() == Slot::LOOKUP) {
+        ASSERT(slot->var()->is_dynamic());
+
+        // For now, just do a runtime call.
+        frame->EmitPush(cp);
+        __ mov(r0, Operand(slot->var()->name()));
+        frame->EmitPush(r0);
+
+        if (init_state == CONST_INIT) {
+          // Same as the case for a normal store, but ignores attribute
+          // (e.g. READ_ONLY) of context slot so that we can initialize
+          // const properties (introduced via eval("const foo = (some
+          // expr);")). Also, uses the current function context instead of
+          // the top context.
+          //
+          // Note that we must declare the foo upon entry of eval(), via a
+          // context slot declaration, but we cannot initialize it at the
+          // same time, because the const declaration may be at the end of
+          // the eval code (sigh...) and the const variable may have been
+          // used before (where its value is 'undefined'). Thus, we can only
+          // do the initialization when we actually encounter the expression
+          // and when the expression operands are defined and valid, and
+          // thus we need the split into 2 operations: declaration of the
+          // context slot followed by initialization.
+          frame->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+        } else {
+          frame->CallRuntime(Runtime::kStoreContextSlot, 3);
+        }
+        // Storing a variable must keep the (new) value on the expression
+        // stack. This is necessary for compiling assignment expressions.
+        frame->EmitPush(r0);
+
+      } else {
+        ASSERT(!slot->var()->is_dynamic());
+
+        JumpTarget exit;
+        if (init_state == CONST_INIT) {
+          ASSERT(slot->var()->mode() == Variable::CONST);
+          // Only the first const initialization must be executed (the slot
+          // still contains 'the hole' value). When the assignment is
+          // executed, the code is identical to a normal store (see below).
+          Comment cmnt(masm, "[ Init const");
+          __ ldr(r2, cgen_->SlotOperand(slot, r2));
+          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+          __ cmp(r2, ip);
+          exit.Branch(ne);
+        }
+
+        // We must execute the store.  Storing a variable must keep the
+        // (new) value on the stack. This is necessary for compiling
+        // assignment expressions.
+        //
+        // Note: We will reach here even with slot->var()->mode() ==
+        // Variable::CONST because of const declarations which will
+        // initialize consts to 'the hole' value and by doing so, end up
+        // calling this code.  r2 may be loaded with context; used below in
+        // RecordWrite.
+        frame->EmitPop(r0);
+        __ str(r0, cgen_->SlotOperand(slot, r2));
+        frame->EmitPush(r0);
+        if (slot->type() == Slot::CONTEXT) {
+          // Skip write barrier if the written value is a smi.
+          __ tst(r0, Operand(kSmiTagMask));
+          exit.Branch(eq);
+          // r2 is loaded with context when calling SlotOperand above.
+          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+          __ mov(r3, Operand(offset));
+          __ RecordWrite(r2, r3, r1);
+        }
+        // If we definitely did not jump over the assignment, we do not need
+        // to bind the exit label.  Doing so can defeat peephole
+        // optimization.
+        if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
+          exit.Bind();
+        }
+      }
+      break;
+    }
+
+    case NAMED: {
+      Comment cmnt(masm, "[ Store to named Property");
+      // Call the appropriate IC code.
+      Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+      Handle<String> name(GetName());
+
+      Result value(r0);
+      frame->EmitPop(r0);
+
+      // Setup the name register.
+      Result property_name(r2);
+      __ mov(r2, Operand(name));
+      frame->CallCodeObject(ic,
+                            RelocInfo::CODE_TARGET,
+                            &value,
+                            &property_name,
+                            0);
+      frame->EmitPush(r0);
+      break;
+    }
+
+    case KEYED: {
+      Comment cmnt(masm, "[ Store to keyed Property");
+      Property* property = expression_->AsProperty();
+      ASSERT(property != NULL);
+      cgen_->CodeForSourcePosition(property->position());
+
+      // Call IC code.
+      Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+      // TODO(1222589): Make the IC grab the values from the stack.
+      Result value(r0);
+      frame->EmitPop(r0);  // value
+      frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
+      frame->EmitPush(r0);
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
+// instruction.  On pre-ARM5 hardware this routine gives the wrong answer for 0
+// (31 instead of 32).
+static void CountLeadingZeros(
+    MacroAssembler* masm,
+    Register source,
+    Register scratch,
+    Register zeros) {
+#ifdef CAN_USE_ARMV5_INSTRUCTIONS
+  __ clz(zeros, source);  // This instruction is only supported after ARM5.
+#else
+  __ mov(zeros, Operand(0));
+  __ mov(scratch, source);
+  // Top 16.
+  __ tst(scratch, Operand(0xffff0000));
+  __ add(zeros, zeros, Operand(16), LeaveCC, eq);
+  __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
+  // Top 8.
+  __ tst(scratch, Operand(0xff000000));
+  __ add(zeros, zeros, Operand(8), LeaveCC, eq);
+  __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
+  // Top 4.
+  __ tst(scratch, Operand(0xf0000000));
+  __ add(zeros, zeros, Operand(4), LeaveCC, eq);
+  __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
+  // Top 2.
+  __ tst(scratch, Operand(0xc0000000));
+  __ add(zeros, zeros, Operand(2), LeaveCC, eq);
+  __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
+  // Top bit.
+  __ tst(scratch, Operand(0x80000000u));
+  __ add(zeros, zeros, Operand(1), LeaveCC, eq);
+#endif
+}
+
+
+// Takes a Smi and converts to an IEEE 64 bit floating point value in two
+// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
+// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
+// scratch register.  Destroys the source register.  No GC occurs during this
+// stub so you don't have to set up the frame.
+class ConvertToDoubleStub : public CodeStub {
+ public:
+  ConvertToDoubleStub(Register result_reg_1,
+                      Register result_reg_2,
+                      Register source_reg,
+                      Register scratch_reg)
+      : result1_(result_reg_1),
+        result2_(result_reg_2),
+        source_(source_reg),
+        zeros_(scratch_reg) { }
+
+ private:
+  Register result1_;
+  Register result2_;
+  Register source_;
+  Register zeros_;
+
+  // Minor key encoding in 16 bits.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 14> {};
+
+  Major MajorKey() { return ConvertToDouble; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return  result1_.code() +
+           (result2_.code() << 4) +
+           (source_.code() << 8) +
+           (zeros_.code() << 12);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "ConvertToDoubleStub"; }
+
+#ifdef DEBUG
+  void Print() { PrintF("ConvertToDoubleStub\n"); }
+#endif
+};
+
+
+void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
+#ifndef BIG_ENDIAN_FLOATING_POINT
+  Register exponent = result1_;
+  Register mantissa = result2_;
+#else
+  Register exponent = result2_;
+  Register mantissa = result1_;
+#endif
+  Label not_special;
+  // Convert from Smi to integer.
+  __ mov(source_, Operand(source_, ASR, kSmiTagSize));
+  // Move sign bit from source to destination.  This works because the sign bit
+  // in the exponent word of the double has the same position and polarity as
+  // the 2's complement sign bit in a Smi.
+  ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
+  // Subtract from 0 if source was negative.
+  __ rsb(source_, source_, Operand(0), LeaveCC, ne);
+  __ cmp(source_, Operand(1));
+  __ b(gt, &not_special);
+
+  // We have -1, 0 or 1, which we treat specially.
+  __ cmp(source_, Operand(0));
+  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+  static const uint32_t exponent_word_for_1 =
+      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
+  // 1, 0 and -1 all have 0 for the second word.
+  __ mov(mantissa, Operand(0));
+  __ Ret();
+
+  __ bind(&not_special);
+  // Count leading zeros.  Uses result2 for a scratch register on pre-ARM5.
+  // Gets the wrong answer for 0, but we already checked for that case above.
+  CountLeadingZeros(masm, source_, mantissa, zeros_);
+  // Compute exponent and or it into the exponent register.
+  // We use result2 as a scratch register here.
+  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
+  __ orr(exponent,
+         exponent,
+         Operand(mantissa, LSL, HeapNumber::kExponentShift));
+  // Shift up the source chopping the top bit off.
+  __ add(zeros_, zeros_, Operand(1));
+  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+  __ mov(source_, Operand(source_, LSL, zeros_));
+  // Compute lower part of fraction (last 12 bits).
+  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
+  // And the top (top 20 bits).
+  __ orr(exponent,
+         exponent,
+         Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
+  __ Ret();
+}
+
+
+// This stub can convert a signed int32 to a heap number (double).  It does
+// not work for int32s that are in Smi range!  No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public CodeStub {
+ public:
+  WriteInt32ToHeapNumberStub(Register the_int,
+                             Register the_heap_number,
+                             Register scratch)
+      : the_int_(the_int),
+        the_heap_number_(the_heap_number),
+        scratch_(scratch) { }
+
+ private:
+  Register the_int_;
+  Register the_heap_number_;
+  Register scratch_;
+
+  // Minor key encoding in 16 bits.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 14> {};
+
+  Major MajorKey() { return WriteInt32ToHeapNumber; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return  the_int_.code() +
+           (the_heap_number_.code() << 4) +
+           (scratch_.code() << 8);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
+
+#ifdef DEBUG
+  void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
+#endif
+};
+
+
+// See comment for class.
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
+  Label max_negative_int;
+  // the_int_ has the answer which is a signed int32 but not a Smi.
+  // We test for the special value that has a different exponent.  This test
+  // has the neat side effect of setting the flags according to the sign.
+  ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  __ cmp(the_int_, Operand(0x80000000u));
+  __ b(eq, &max_negative_int);
+  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
+  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+  uint32_t non_smi_exponent =
+      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+  __ mov(scratch_, Operand(non_smi_exponent));
+  // Set the sign bit in scratch_ if the value was negative.
+  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
+  // Subtract from 0 if the value was negative.
+  __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
+  // We should be masking the implict first digit of the mantissa away here,
+  // but it just ends up combining harmlessly with the last digit of the
+  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
+  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
+  __ str(scratch_, FieldMemOperand(the_heap_number_,
+                                   HeapNumber::kExponentOffset));
+  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
+  __ str(scratch_, FieldMemOperand(the_heap_number_,
+                                   HeapNumber::kMantissaOffset));
+  __ Ret();
+
+  __ bind(&max_negative_int);
+  // The max negative int32 is stored as a positive number in the mantissa of
+  // a double because it uses a sign bit instead of using two's complement.
+  // The actual mantissa bits stored are all 0 because the implicit most
+  // significant 1 bit is not stored.
+  non_smi_exponent += 1 << HeapNumber::kExponentShift;
+  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
+  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+  __ mov(ip, Operand(0));
+  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+  __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+                                          Label* slow,
+                                          Condition cc) {
+  Label not_identical;
+  __ cmp(r0, Operand(r1));
+  __ b(ne, &not_identical);
+
+  Register exp_mask_reg = r5;
+  __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+  // so we do the second best thing - test it ourselves.
+  Label heap_number, return_equal;
+  // They are both equal and they are not both Smis so both of them are not
+  // Smis.  If it's not a heap number, then return equal.
+  if (cc == lt || cc == gt) {
+    __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
+    __ b(ge, slow);
+  } else {
+    __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+    __ b(eq, &heap_number);
+    // Comparing JS objects with <=, >= is complicated.
+    if (cc != eq) {
+      __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+      __ b(ge, slow);
+    }
+  }
+  __ bind(&return_equal);
+  if (cc == lt) {
+    __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
+  } else if (cc == gt) {
+    __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
+  } else {
+    __ mov(r0, Operand(0));        // Things are <=, >=, ==, === themselves.
+  }
+  __ mov(pc, Operand(lr));  // Return.
+
+  // For less and greater we don't have to check for NaN since the result of
+  // x < x is false regardless.  For the others here is some code to check
+  // for NaN.
+  if (cc != lt && cc != gt) {
+    __ bind(&heap_number);
+    // It is a heap number, so return non-equal if it's NaN and equal if it's
+    // not NaN.
+    // The representation of NaN values has all exponent bits (52..62) set,
+    // and not all mantissa bits (0..51) clear.
+    // Read top bits of double representation (second word of value).
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+    // Test that exponent bits are all set.
+    __ and_(r3, r2, Operand(exp_mask_reg));
+    __ cmp(r3, Operand(exp_mask_reg));
+    __ b(ne, &return_equal);
+
+    // Shift out flag and all exponent bits, retaining only mantissa.
+    __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+    // Or with all low-bits of mantissa.
+    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+    __ orr(r0, r3, Operand(r2), SetCC);
+    // For equal we already have the right value in r0:  Return zero (equal)
+    // if all bits in mantissa are zero (it's an Infinity) and non-zero if not
+    // (it's a NaN).  For <= and >= we need to load r0 with the failing value
+    // if it's a NaN.
+    if (cc != eq) {
+      // All-zero means Infinity means equal.
+      __ mov(pc, Operand(lr), LeaveCC, eq);  // Return equal
+      if (cc == le) {
+        __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
+      } else {
+        __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
+      }
+    }
+    __ mov(pc, Operand(lr));  // Return.
+  }
+  // No fall through here.
+
+  __ bind(&not_identical);
+}
+
+
+// See comment at call site.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+                                    Label* rhs_not_nan,
+                                    Label* slow,
+                                    bool strict) {
+  Label lhs_is_smi;
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &lhs_is_smi);
+
+  // Rhs is a Smi.  Check whether the non-smi is a heap number.
+  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+  if (strict) {
+    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // succeed.  Return non-equal (r0 is already not zero)
+    __ mov(pc, Operand(lr), LeaveCC, ne);  // Return.
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
+    // the runtime.
+    __ b(ne, slow);
+  }
+
+  // Rhs is a smi, lhs is a number.
+  __ push(lr);
+  __ mov(r7, Operand(r1));
+  ConvertToDoubleStub stub1(r3, r2, r7, r6);
+  __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+  // r3 and r2 are rhs as double.
+  __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+  __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+  // We now have both loaded as doubles but we can skip the lhs nan check
+  // since it's a Smi.
+  __ pop(lr);
+  __ jmp(rhs_not_nan);
+
+  __ bind(&lhs_is_smi);
+  // Lhs is a Smi.  Check whether the non-smi is a heap number.
+  __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+  if (strict) {
+    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // succeed.  Return non-equal.
+    __ mov(r0, Operand(1), LeaveCC, ne);  // Non-zero indicates not equal.
+    __ mov(pc, Operand(lr), LeaveCC, ne);  // Return.
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
+    // the runtime.
+    __ b(ne, slow);
+  }
+
+  // Lhs is a smi, rhs is a number.
+  // r0 is Smi and r1 is heap number.
+  __ push(lr);
+  __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+  __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+  __ mov(r7, Operand(r0));
+  ConvertToDoubleStub stub2(r1, r0, r7, r6);
+  __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+  __ pop(lr);
+  // Fall through to both_loaded_as_doubles.
+}
+
+
+void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) {
+  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+  Register lhs_exponent = exp_first ? r0 : r1;
+  Register rhs_exponent = exp_first ? r2 : r3;
+  Register lhs_mantissa = exp_first ? r1 : r0;
+  Register rhs_mantissa = exp_first ? r3 : r2;
+  Label one_is_nan, neither_is_nan;
+
+  Register exp_mask_reg = r5;
+
+  __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+  __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
+  __ cmp(r4, Operand(exp_mask_reg));
+  __ b(ne, rhs_not_nan);
+  __ mov(r4,
+         Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+         SetCC);
+  __ b(ne, &one_is_nan);
+  __ cmp(rhs_mantissa, Operand(0));
+  __ b(ne, &one_is_nan);
+
+  __ bind(rhs_not_nan);
+  __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+  __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
+  __ cmp(r4, Operand(exp_mask_reg));
+  __ b(ne, &neither_is_nan);
+  __ mov(r4,
+         Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+         SetCC);
+  __ b(ne, &one_is_nan);
+  __ cmp(lhs_mantissa, Operand(0));
+  __ b(eq, &neither_is_nan);
+
+  __ bind(&one_is_nan);
+  // NaN comparisons always fail.
+  // Load whatever we need in r0 to make the comparison fail.
+  if (cc == lt || cc == le) {
+    __ mov(r0, Operand(GREATER));
+  } else {
+    __ mov(r0, Operand(LESS));
+  }
+  __ mov(pc, Operand(lr));  // Return.
+
+  __ bind(&neither_is_nan);
+}
+
+
+// See comment at call site.
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+  Register lhs_exponent = exp_first ? r0 : r1;
+  Register rhs_exponent = exp_first ? r2 : r3;
+  Register lhs_mantissa = exp_first ? r1 : r0;
+  Register rhs_mantissa = exp_first ? r3 : r2;
+
+  // r0, r1, r2, r3 have the two doubles.  Neither is a NaN.
+  if (cc == eq) {
+    // Doubles are not equal unless they have the same bit pattern.
+    // Exception: 0 and -0.
+    __ cmp(lhs_mantissa, Operand(rhs_mantissa));
+    __ orr(r0, lhs_mantissa, Operand(rhs_mantissa), LeaveCC, ne);
+    // Return non-zero if the numbers are unequal.
+    __ mov(pc, Operand(lr), LeaveCC, ne);
+
+    __ sub(r0, lhs_exponent, Operand(rhs_exponent), SetCC);
+    // If exponents are equal then return 0.
+    __ mov(pc, Operand(lr), LeaveCC, eq);
+
+    // Exponents are unequal.  The only way we can return that the numbers
+    // are equal is if one is -0 and the other is 0.  We already dealt
+    // with the case where both are -0 or both are 0.
+    // We start by seeing if the mantissas (that are equal) or the bottom
+    // 31 bits of the rhs exponent are non-zero.  If so we return not
+    // equal.
+    __ orr(r4, rhs_mantissa, Operand(rhs_exponent, LSL, kSmiTagSize), SetCC);
+    __ mov(r0, Operand(r4), LeaveCC, ne);
+    __ mov(pc, Operand(lr), LeaveCC, ne);  // Return conditionally.
+    // Now they are equal if and only if the lhs exponent is zero in its
+    // low 31 bits.
+    __ mov(r0, Operand(lhs_exponent, LSL, kSmiTagSize));
+    __ mov(pc, Operand(lr));
+  } else {
+    // Call a native function to do a comparison between two non-NaNs.
+    // Call C routine that may not cause GC or other trouble.
+    __ mov(r5, Operand(ExternalReference::compare_doubles()));
+    __ Jump(r5);  // Tail call.
+  }
+}
+
+
+// See comment at call site.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
+    // If either operand is a JSObject or an oddball value, then they are
+    // not equal since their pointers are different.
+    // There is no test for undetectability in strict equality.
+    ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    Label first_non_object;
+    // Get the type of the first operand into r2 and compare it with
+    // FIRST_JS_OBJECT_TYPE.
+    __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
+    __ b(lt, &first_non_object);
+
+    // Return non-zero (r0 is not zero)
+    Label return_not_equal;
+    __ bind(&return_not_equal);
+    __ mov(pc, Operand(lr));  // Return.
+
+    __ bind(&first_non_object);
+    // Check for oddballs: true, false, null, undefined.
+    __ cmp(r2, Operand(ODDBALL_TYPE));
+    __ b(eq, &return_not_equal);
+
+    __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
+    __ b(ge, &return_not_equal);
+
+    // Check for oddballs: true, false, null, undefined.
+    __ cmp(r3, Operand(ODDBALL_TYPE));
+    __ b(eq, &return_not_equal);
+}
+
+
+// See comment at call site.
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+                                       Label* both_loaded_as_doubles,
+                                       Label* not_heap_numbers,
+                                       Label* slow) {
+  __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
+  __ b(ne, not_heap_numbers);
+  __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
+  __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case.
+
+  // Both are heap numbers.  Load them up then jump to the code we have
+  // for that.
+  __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+  __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+  __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+  __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+  __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for symbol-to-symbol equality.
+static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
+  // r2 is object type of r0.
+  __ tst(r2, Operand(kIsNotStringMask));
+  __ b(ne, slow);
+  __ tst(r2, Operand(kIsSymbolMask));
+  __ b(eq, slow);
+  __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
+  __ b(ge, slow);
+  __ tst(r3, Operand(kIsSymbolMask));
+  __ b(eq, slow);
+
+  // Both are symbols.  We already checked they weren't the same pointer
+  // so they are not equal.
+  __ mov(r0, Operand(1));   // Non-zero indicates not equal.
+  __ mov(pc, Operand(lr));  // Return.
+}
+
+
+// On entry r0 and r1 are the things to be compared.  On exit r0 is 0,
+// positive or negative to indicate the result of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+  Label slow;  // Call builtin.
+  Label not_smis, both_loaded_as_doubles, rhs_not_nan;
+
+  // NOTICE! This code is only reached after a smi-fast-case check, so
+  // it is certain that at least one operand isn't a smi.
+
+  // Handle the case where the objects are identical.  Either returns the answer
+  // or goes to slow.  Only falls through if the objects were not identical.
+  EmitIdenticalObjectComparison(masm, &slow, cc_);
+
+  // If either is a Smi (we know that not both are), then they can only
+  // be strictly equal if the other is a HeapNumber.
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT_EQ(0, Smi::FromInt(0));
+  __ and_(r2, r0, Operand(r1));
+  __ tst(r2, Operand(kSmiTagMask));
+  __ b(ne, &not_smis);
+  // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
+  // 1) Return the answer.
+  // 2) Go to slow.
+  // 3) Fall through to both_loaded_as_doubles.
+  // 4) Jump to rhs_not_nan.
+  // In cases 3 and 4 we have found out we were dealing with a number-number
+  // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
+  EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_);
+
+  __ bind(&both_loaded_as_doubles);
+  // r0, r1, r2, r3 are the double representations of the left hand side
+  // and the right hand side.
+
+  // Checks for NaN in the doubles we have loaded.  Can return the answer or
+  // fall through if neither is a NaN.  Also binds rhs_not_nan.
+  EmitNanCheck(masm, &rhs_not_nan, cc_);
+
+  // Compares two doubles in r0, r1, r2, r3 that are not NaNs.  Returns the
+  // answer.  Never falls through.
+  EmitTwoNonNanDoubleComparison(masm, cc_);
+
+  __ bind(&not_smis);
+  // At this point we know we are dealing with two different objects,
+  // and neither of them is a Smi.  The objects are in r0 and r1.
+  if (strict_) {
+    // This returns non-equal for some object types, or falls through if it
+    // was not lucky.
+    EmitStrictTwoHeapObjectCompare(masm);
+  }
+
+  Label check_for_symbols;
+  // Check for heap-number-heap-number comparison.  Can jump to slow case,
+  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
+  // that case.  If the inputs are not doubles then jumps to check_for_symbols.
+  // In this case r2 will contain the type of r0.
+  EmitCheckForTwoHeapNumbers(masm,
+                             &both_loaded_as_doubles,
+                             &check_for_symbols,
+                             &slow);
+
+  __ bind(&check_for_symbols);
+  if (cc_ == eq) {
+    // Either jumps to slow or returns the answer.  Assumes that r2 is the type
+    // of r0 on entry.
+    EmitCheckForSymbols(masm, &slow);
+  }
+
+  __ bind(&slow);
+  __ push(lr);
+  __ push(r1);
+  __ push(r0);
+  // Figure out which native to call and setup the arguments.
+  Builtins::JavaScript native;
+  int arg_count = 1;  // Not counting receiver.
+  if (cc_ == eq) {
+    native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+  } else {
+    native = Builtins::COMPARE;
+    int ncr;  // NaN compare result
+    if (cc_ == lt || cc_ == le) {
+      ncr = GREATER;
+    } else {
+      ASSERT(cc_ == gt || cc_ == ge);  // remaining cases
+      ncr = LESS;
+    }
+    arg_count++;
+    __ mov(r0, Operand(Smi::FromInt(ncr)));
+    __ push(r0);
+  }
+
+  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ mov(r0, Operand(arg_count));
+  __ InvokeBuiltin(native, CALL_JS);
+  __ cmp(r0, Operand(0));
+  __ pop(pc);
+}
+
+
+// Allocates a heap number or jumps to the label if the young space is full and
+// a scavenge is needed.
+static void AllocateHeapNumber(
+    MacroAssembler* masm,
+    Label* need_gc,       // Jump here if young space is full.
+    Register result,  // The tagged address of the new heap number.
+    Register scratch1,  // A scratch register.
+    Register scratch2) {  // Another scratch register.
+  // Allocate an object in the heap for the heap number and tag it as a heap
+  // object.
+  __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
+                        result,
+                        scratch1,
+                        scratch2,
+                        need_gc,
+                        TAG_OBJECT);
+
+  // Get heap number map and store it in the allocated object.
+  __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+  __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+}
+
+
+// We fall into this code if the operands were Smis, but the result was
+// not (eg. overflow).  We branch into this code (to the not_smi label) if
+// the operands were not both Smi.  The operands are in r0 and r1.  In order
+// to call the C-implemented binary fp operation routines we need to end up
+// with the double precision floating point operands in r0 and r1 (for the
+// value in r1) and r2 and r3 (for the value in r0).
+static void HandleBinaryOpSlowCases(MacroAssembler* masm,
+                                    Label* not_smi,
+                                    const Builtins::JavaScript& builtin,
+                                    Token::Value operation,
+                                    OverwriteMode mode) {
+  Label slow, slow_pop_2_first, do_the_call;
+  Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
+  // Smi-smi case (overflow).
+  // Since both are Smis there is no heap number to overwrite, so allocate.
+  // The new heap number is in r5.  r6 and r7 are scratch.
+  AllocateHeapNumber(masm, &slow, r5, r6, r7);
+  // Write Smi from r0 to r3 and r2 in double format.  r6 is scratch.
+  __ mov(r7, Operand(r0));
+  ConvertToDoubleStub stub1(r3, r2, r7, r6);
+  __ push(lr);
+  __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+  // Write Smi from r1 to r1 and r0 in double format.  r6 is scratch.
+  __ mov(r7, Operand(r1));
+  ConvertToDoubleStub stub2(r1, r0, r7, r6);
+  __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+  __ pop(lr);
+  __ jmp(&do_the_call);  // Tail call.  No return.
+
+  // We jump to here if something goes wrong (one param is not a number of any
+  // sort or new-space allocation fails).
+  __ bind(&slow);
+  __ push(r1);
+  __ push(r0);
+  __ mov(r0, Operand(1));  // Set number of arguments.
+  __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.  No return.
+
+  // We branch here if at least one of r0 and r1 is not a Smi.
+  __ bind(not_smi);
+  if (mode == NO_OVERWRITE) {
+    // In the case where there is no chance of an overwritable float we may as
+    // well do the allocation immediately while r0 and r1 are untouched.
+    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+  }
+
+  // Move r0 to a double in r2-r3.
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
+  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+  __ b(ne, &slow);
+  if (mode == OVERWRITE_RIGHT) {
+    __ mov(r5, Operand(r0));  // Overwrite this heap number.
+  }
+  // Calling convention says that second double is in r2 and r3.
+  __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+  __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+  __ jmp(&finished_loading_r0);
+  __ bind(&r0_is_smi);
+  if (mode == OVERWRITE_RIGHT) {
+    // We can't overwrite a Smi so get address of new heap number into r5.
+    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+  }
+  // Write Smi from r0 to r3 and r2 in double format.
+  __ mov(r7, Operand(r0));
+  ConvertToDoubleStub stub3(r3, r2, r7, r6);
+  __ push(lr);
+  __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
+  __ pop(lr);
+  __ bind(&finished_loading_r0);
+
+  // Move r1 to a double in r0-r1.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
+  __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+  __ b(ne, &slow);
+  if (mode == OVERWRITE_LEFT) {
+    __ mov(r5, Operand(r1));  // Overwrite this heap number.
+  }
+  // Calling convention says that first double is in r0 and r1.
+  __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+  __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+  __ jmp(&finished_loading_r1);
+  __ bind(&r1_is_smi);
+  if (mode == OVERWRITE_LEFT) {
+    // We can't overwrite a Smi so get address of new heap number into r5.
+    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+  }
+  // Write Smi from r1 to r1 and r0 in double format.
+  __ mov(r7, Operand(r1));
+  ConvertToDoubleStub stub4(r1, r0, r7, r6);
+  __ push(lr);
+  __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
+  __ pop(lr);
+  __ bind(&finished_loading_r1);
+
+  __ bind(&do_the_call);
+  // r0: Left value (least significant part of mantissa).
+  // r1: Left value (sign, exponent, top of mantissa).
+  // r2: Right value (least significant part of mantissa).
+  // r3: Right value (sign, exponent, top of mantissa).
+  // r5: Address of heap number for result.
+  __ push(lr);   // For later.
+  __ push(r5);   // Address of heap number that is answer.
+  __ AlignStack(0);
+  // Call C routine that may not cause GC or other trouble.
+  __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
+  __ Call(r5);
+  __ pop(r4);  // Address of heap number.
+  __ cmp(r4, Operand(Smi::FromInt(0)));
+  __ pop(r4, eq);  // Conditional pop instruction to get rid of alignment push.
+  // Store answer in the overwritable heap number.
+#if !defined(USE_ARM_EABI)
+  // Double returned in fp coprocessor register 0 and 1, encoded as register
+  // cr8.  Offsets must be divisible by 4 for coprocessor so we need to
+  // substract the tag from r4.
+  __ sub(r5, r4, Operand(kHeapObjectTag));
+  __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
+#else
+  // Double returned in registers 0 and 1.
+  __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
+  __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
+#endif
+  __ mov(r0, Operand(r4));
+  // And we are done.
+  __ pop(pc);
+}
+
+
+// Tries to get a signed int32 out of a double precision floating point heap
+// number.  Rounds towards 0.  Fastest for doubles that are in the ranges
+// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff.  This corresponds
+// almost to the range of signed int32 values that are not Smis.  Jumps to the
+// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
+// (excluding the endpoints).
+static void GetInt32(MacroAssembler* masm,
+                     Register source,
+                     Register dest,
+                     Register scratch,
+                     Register scratch2,
+                     Label* slow) {
+  Label right_exponent, done;
+  // Get exponent word.
+  __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
+  // Get exponent alone in scratch2.
+  __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
+  // Load dest with zero.  We use this either for the final shift or
+  // for the answer.
+  __ mov(dest, Operand(0));
+  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
+  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).  This is
+  // the exponent that we are fastest at and also the highest exponent we can
+  // handle here.
+  const uint32_t non_smi_exponent =
+      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+  __ cmp(scratch2, Operand(non_smi_exponent));
+  // If we have a match of the int32-but-not-Smi exponent then skip some logic.
+  __ b(eq, &right_exponent);
+  // If the exponent is higher than that then go to slow case.  This catches
+  // numbers that don't fit in a signed int32, infinities and NaNs.
+  __ b(gt, slow);
+
+  // We know the exponent is smaller than 30 (biased).  If it is less than
+  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+  // it rounds to zero.
+  const uint32_t zero_exponent =
+      (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+  __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
+  // Dest already has a Smi zero.
+  __ b(lt, &done);
+  // We have a shifted exponent between 0 and 30 in scratch2.
+  __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
+  // We now have the exponent in dest.  Subtract from 30 to get
+  // how much to shift down.
+  __ rsb(dest, dest, Operand(30));
+
+  __ bind(&right_exponent);
+  // Get the top bits of the mantissa.
+  __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
+  // Put back the implicit 1.
+  __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
+  // Shift up the mantissa bits to take up the space the exponent used to take.
+  // We just orred in the implicit bit so that took care of one and we want to
+  // leave the sign bit 0 so we subtract 2 bits from the shift distance.
+  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+  __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
+  // Put sign in zero flag.
+  __ tst(scratch, Operand(HeapNumber::kSignMask));
+  // Get the second half of the double.  For some exponents we don't actually
+  // need this because the bits get shifted out again, but it's probably slower
+  // to test than just to do it.
+  __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+  // Shift down 22 bits to get the last 10 bits.
+  __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+  // Move down according to the exponent.
+  __ mov(dest, Operand(scratch, LSR, dest));
+  // Fix sign if sign bit was set.
+  __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+  __ bind(&done);
+}
+
+
+// For bitwise ops where the inputs are not both Smis we here try to determine
+// whether both inputs are either Smis or at least heap numbers that can be
+// represented by a 32 bit signed value.  We truncate towards zero as required
+// by the ES spec.  If this is the case we do the bitwise op and see if the
+// result is a Smi.  If so, great, otherwise we try to find a heap number to
+// write the answer into (either by allocating or by overwriting).
+// On entry the operands are in r0 and r1.  On exit the answer is in r0.
+void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
+  Label slow, result_not_a_smi;
+  Label r0_is_smi, r1_is_smi;
+  Label done_checking_r0, done_checking_r1;
+
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
+  __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+  __ b(ne, &slow);
+  GetInt32(masm, r1, r3, r4, r5, &slow);
+  __ jmp(&done_checking_r1);
+  __ bind(&r1_is_smi);
+  __ mov(r3, Operand(r1, ASR, 1));
+  __ bind(&done_checking_r1);
+
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
+  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+  __ b(ne, &slow);
+  GetInt32(masm, r0, r2, r4, r5, &slow);
+  __ jmp(&done_checking_r0);
+  __ bind(&r0_is_smi);
+  __ mov(r2, Operand(r0, ASR, 1));
+  __ bind(&done_checking_r0);
+
+  // r0 and r1: Original operands (Smi or heap numbers).
+  // r2 and r3: Signed int32 operands.
+  switch (op_) {
+    case Token::BIT_OR:  __ orr(r2, r2, Operand(r3)); break;
+    case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
+    case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
+    case Token::SAR:
+      // Use only the 5 least significant bits of the shift count.
+      __ and_(r2, r2, Operand(0x1f));
+      __ mov(r2, Operand(r3, ASR, r2));
+      break;
+    case Token::SHR:
+      // Use only the 5 least significant bits of the shift count.
+      __ and_(r2, r2, Operand(0x1f));
+      __ mov(r2, Operand(r3, LSR, r2), SetCC);
+      // SHR is special because it is required to produce a positive answer.
+      // The code below for writing into heap numbers isn't capable of writing
+      // the register as an unsigned int so we go to slow case if we hit this
+      // case.
+      __ b(mi, &slow);
+      break;
+    case Token::SHL:
+      // Use only the 5 least significant bits of the shift count.
+      __ and_(r2, r2, Operand(0x1f));
+      __ mov(r2, Operand(r3, LSL, r2));
+      break;
+    default: UNREACHABLE();
+  }
+  // check that the *signed* result fits in a smi
+  __ add(r3, r2, Operand(0x40000000), SetCC);
+  __ b(mi, &result_not_a_smi);
+  __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+  __ Ret();
+
+  Label have_to_allocate, got_a_heap_number;
+  __ bind(&result_not_a_smi);
+  switch (mode_) {
+    case OVERWRITE_RIGHT: {
+      __ tst(r0, Operand(kSmiTagMask));
+      __ b(eq, &have_to_allocate);
+      __ mov(r5, Operand(r0));
+      break;
+    }
+    case OVERWRITE_LEFT: {
+      __ tst(r1, Operand(kSmiTagMask));
+      __ b(eq, &have_to_allocate);
+      __ mov(r5, Operand(r1));
+      break;
+    }
+    case NO_OVERWRITE: {
+      // Get a new heap number in r5.  r6 and r7 are scratch.
+      AllocateHeapNumber(masm, &slow, r5, r6, r7);
+    }
+    default: break;
+  }
+  __ bind(&got_a_heap_number);
+  // r2: Answer as signed int32.
+  // r5: Heap number to write answer into.
+
+  // Nothing can go wrong now, so move the heap number to r0, which is the
+  // result.
+  __ mov(r0, Operand(r5));
+
+  // Tail call that writes the int32 in r2 to the heap number in r0, using
+  // r3 as scratch.  r0 is preserved and returned.
+  WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  if (mode_ != NO_OVERWRITE) {
+    __ bind(&have_to_allocate);
+    // Get a new heap number in r5.  r6 and r7 are scratch.
+    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+    __ jmp(&got_a_heap_number);
+  }
+
+  // If all else failed then we go to the runtime system.
+  __ bind(&slow);
+  __ push(r1);  // restore stack
+  __ push(r0);
+  __ mov(r0, Operand(1));  // 1 argument (not counting receiver).
+  switch (op_) {
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// Can we multiply by x with max two shifts and an add.
+// This answers yes to all integers from 2 to 10.
+static bool IsEasyToMultiplyBy(int x) {
+  if (x < 2) return false;                          // Avoid special cases.
+  if (x > (Smi::kMaxValue + 1) >> 2) return false;  // Almost always overflows.
+  if (IsPowerOf2(x)) return true;                   // Simple shift.
+  if (PopCountLessThanEqual2(x)) return true;       // Shift and add and shift.
+  if (IsPowerOf2(x + 1)) return true;               // Patterns like 11111.
+  return false;
+}
+
+
+// Can multiply by anything that IsEasyToMultiplyBy returns true for.
+// Source and destination may be the same register.  This routine does
+// not set carry and overflow the way a mul instruction would.
+static void MultiplyByKnownInt(MacroAssembler* masm,
+                               Register source,
+                               Register destination,
+                               int known_int) {
+  if (IsPowerOf2(known_int)) {
+    __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
+  } else if (PopCountLessThanEqual2(known_int)) {
+    int first_bit = BitPosition(known_int);
+    int second_bit = BitPosition(known_int ^ (1 << first_bit));
+    __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
+    if (first_bit != 0) {
+      __ mov(destination, Operand(destination, LSL, first_bit));
+    }
+  } else {
+    ASSERT(IsPowerOf2(known_int + 1));  // Patterns like 1111.
+    int the_bit = BitPosition(known_int + 1);
+    __ rsb(destination, source, Operand(source, LSL, the_bit));
+  }
+}
+
+
+// This function (as opposed to MultiplyByKnownInt) takes the known int in a
+// a register for the cases where it doesn't know a good trick, and may deliver
+// a result that needs shifting.
+static void MultiplyByKnownInt2(
+    MacroAssembler* masm,
+    Register result,
+    Register source,
+    Register known_int_register,   // Smi tagged.
+    int known_int,
+    int* required_shift) {  // Including Smi tag shift
+  switch (known_int) {
+    case 3:
+      __ add(result, source, Operand(source, LSL, 1));
+      *required_shift = 1;
+      break;
+    case 5:
+      __ add(result, source, Operand(source, LSL, 2));
+      *required_shift = 1;
+      break;
+    case 6:
+      __ add(result, source, Operand(source, LSL, 1));
+      *required_shift = 2;
+      break;
+    case 7:
+      __ rsb(result, source, Operand(source, LSL, 3));
+      *required_shift = 1;
+      break;
+    case 9:
+      __ add(result, source, Operand(source, LSL, 3));
+      *required_shift = 1;
+      break;
+    case 10:
+      __ add(result, source, Operand(source, LSL, 2));
+      *required_shift = 2;
+      break;
+    default:
+      ASSERT(!IsPowerOf2(known_int));  // That would be very inefficient.
+      __ mul(result, source, known_int_register);
+      *required_shift = 0;
+  }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+  // r1 : x
+  // r0 : y
+  // result : r0
+
+  // All ops need to know whether we are dealing with two Smis.  Set up r2 to
+  // tell us that.
+  __ orr(r2, r1, Operand(r0));  // r2 = x | y;
+
+  switch (op_) {
+    case Token::ADD: {
+      Label not_smi;
+      // Fast path.
+      ASSERT(kSmiTag == 0);  // Adjust code below.
+      __ tst(r2, Operand(kSmiTagMask));
+      __ b(ne, &not_smi);
+      __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
+      // Return if no overflow.
+      __ Ret(vc);
+      __ sub(r0, r0, Operand(r1));  // Revert optimistic add.
+
+      HandleBinaryOpSlowCases(masm,
+                              &not_smi,
+                              Builtins::ADD,
+                              Token::ADD,
+                              mode_);
+      break;
+    }
+
+    case Token::SUB: {
+      Label not_smi;
+      // Fast path.
+      ASSERT(kSmiTag == 0);  // Adjust code below.
+      __ tst(r2, Operand(kSmiTagMask));
+      __ b(ne, &not_smi);
+      __ sub(r0, r1, Operand(r0), SetCC);  // Subtract y optimistically.
+      // Return if no overflow.
+      __ Ret(vc);
+      __ sub(r0, r1, Operand(r0));  // Revert optimistic subtract.
+
+      HandleBinaryOpSlowCases(masm,
+                              &not_smi,
+                              Builtins::SUB,
+                              Token::SUB,
+                              mode_);
+      break;
+    }
+
+    case Token::MUL: {
+      Label not_smi, slow;
+      ASSERT(kSmiTag == 0);  // adjust code below
+      __ tst(r2, Operand(kSmiTagMask));
+      __ b(ne, &not_smi);
+      // Remove tag from one operand (but keep sign), so that result is Smi.
+      __ mov(ip, Operand(r0, ASR, kSmiTagSize));
+      // Do multiplication
+      __ smull(r3, r2, r1, ip);  // r3 = lower 32 bits of ip*r1.
+      // Go slow on overflows (overflow bit is not set).
+      __ mov(ip, Operand(r3, ASR, 31));
+      __ cmp(ip, Operand(r2));  // no overflow if higher 33 bits are identical
+      __ b(ne, &slow);
+      // Go slow on zero result to handle -0.
+      __ tst(r3, Operand(r3));
+      __ mov(r0, Operand(r3), LeaveCC, ne);
+      __ Ret(ne);
+      // We need -0 if we were multiplying a negative number with 0 to get 0.
+      // We know one of them was zero.
+      __ add(r2, r0, Operand(r1), SetCC);
+      __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
+      __ Ret(pl);  // Return Smi 0 if the non-zero one was positive.
+      // Slow case.  We fall through here if we multiplied a negative number
+      // with 0, because that would mean we should produce -0.
+      __ bind(&slow);
+
+      HandleBinaryOpSlowCases(masm,
+                              &not_smi,
+                              Builtins::MUL,
+                              Token::MUL,
+                              mode_);
+      break;
+    }
+
+    case Token::DIV:
+    case Token::MOD: {
+      Label not_smi;
+      if (specialized_on_rhs_) {
+        Label smi_is_unsuitable;
+        __ BranchOnNotSmi(r1, &not_smi);
+        if (IsPowerOf2(constant_rhs_)) {
+          if (op_ == Token::MOD) {
+            __ and_(r0,
+                    r1,
+                    Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
+                    SetCC);
+            // We now have the answer, but if the input was negative we also
+            // have the sign bit.  Our work is done if the result is
+            // positive or zero:
+            __ Ret(pl);
+            // A mod of a negative left hand side must return a negative number.
+            // Unfortunately if the answer is 0 then we must return -0.  And we
+            // already optimistically trashed r0 so we may need to restore it.
+            __ eor(r0, r0, Operand(0x80000000u), SetCC);
+            // Next two instructions are conditional on the answer being -0.
+            __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
+            __ b(eq, &smi_is_unsuitable);
+            // We need to subtract the dividend.  Eg. -3 % 4 == -3.
+            __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_)));
+          } else {
+            ASSERT(op_ == Token::DIV);
+            __ tst(r1,
+                   Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
+            __ b(ne, &smi_is_unsuitable);  // Go slow on negative or remainder.
+            int shift = 0;
+            int d = constant_rhs_;
+            while ((d & 1) == 0) {
+              d >>= 1;
+              shift++;
+            }
+            __ mov(r0, Operand(r1, LSR, shift));
+            __ bic(r0, r0, Operand(kSmiTagMask));
+          }
+        } else {
+          // Not a power of 2.
+          __ tst(r1, Operand(0x80000000u));
+          __ b(ne, &smi_is_unsuitable);
+          // Find a fixed point reciprocal of the divisor so we can divide by
+          // multiplying.
+          double divisor = 1.0 / constant_rhs_;
+          int shift = 32;
+          double scale = 4294967296.0;  // 1 << 32.
+          uint32_t mul;
+          // Maximise the precision of the fixed point reciprocal.
+          while (true) {
+            mul = static_cast<uint32_t>(scale * divisor);
+            if (mul >= 0x7fffffff) break;
+            scale *= 2.0;
+            shift++;
+          }
+          mul++;
+          __ mov(r2, Operand(mul));
+          __ umull(r3, r2, r2, r1);
+          __ mov(r2, Operand(r2, LSR, shift - 31));
+          // r2 is r1 / rhs.  r2 is not Smi tagged.
+          // r0 is still the known rhs.  r0 is Smi tagged.
+          // r1 is still the unkown lhs.  r1 is Smi tagged.
+          int required_r4_shift = 0;  // Including the Smi tag shift of 1.
+          // r4 = r2 * r0.
+          MultiplyByKnownInt2(masm,
+                              r4,
+                              r2,
+                              r0,
+                              constant_rhs_,
+                              &required_r4_shift);
+          // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs).
+          if (op_ == Token::DIV) {
+            __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
+            __ b(ne, &smi_is_unsuitable);  // There was a remainder.
+            __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+          } else {
+            ASSERT(op_ == Token::MOD);
+            __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
+          }
+        }
+        __ Ret();
+        __ bind(&smi_is_unsuitable);
+      } else {
+        __ jmp(&not_smi);
+      }
+      HandleBinaryOpSlowCases(masm,
+                              &not_smi,
+                              op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
+                              op_,
+                              mode_);
+      break;
+    }
+
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHR:
+    case Token::SHL: {
+      Label slow;
+      ASSERT(kSmiTag == 0);  // adjust code below
+      __ tst(r2, Operand(kSmiTagMask));
+      __ b(ne, &slow);
+      switch (op_) {
+        case Token::BIT_OR:  __ orr(r0, r0, Operand(r1)); break;
+        case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
+        case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
+        case Token::SAR:
+          // Remove tags from right operand.
+          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
+          // Use only the 5 least significant bits of the shift count.
+          __ and_(r2, r2, Operand(0x1f));
+          __ mov(r0, Operand(r1, ASR, r2));
+          // Smi tag result.
+          __ bic(r0, r0, Operand(kSmiTagMask));
+          break;
+        case Token::SHR:
+          // Remove tags from operands.  We can't do this on a 31 bit number
+          // because then the 0s get shifted into bit 30 instead of bit 31.
+          __ mov(r3, Operand(r1, ASR, kSmiTagSize));  // x
+          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
+          // Use only the 5 least significant bits of the shift count.
+          __ and_(r2, r2, Operand(0x1f));
+          __ mov(r3, Operand(r3, LSR, r2));
+          // Unsigned shift is not allowed to produce a negative number, so
+          // check the sign bit and the sign bit after Smi tagging.
+          __ tst(r3, Operand(0xc0000000));
+          __ b(ne, &slow);
+          // Smi tag result.
+          __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+          break;
+        case Token::SHL:
+          // Remove tags from operands.
+          __ mov(r3, Operand(r1, ASR, kSmiTagSize));  // x
+          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
+          // Use only the 5 least significant bits of the shift count.
+          __ and_(r2, r2, Operand(0x1f));
+          __ mov(r3, Operand(r3, LSL, r2));
+          // Check that the signed result fits in a Smi.
+          __ add(r2, r3, Operand(0x40000000), SetCC);
+          __ b(mi, &slow);
+          __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+          break;
+        default: UNREACHABLE();
+      }
+      __ Ret();
+      __ bind(&slow);
+      HandleNonSmiBitwiseOp(masm);
+      break;
+    }
+
+    default: UNREACHABLE();
+  }
+  // This code should be unreachable.
+  __ stop("Unreachable");
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+  // Do tail-call to runtime routine.  Runtime routines expect at least one
+  // argument, so give it a Smi.
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ push(r0);
+  __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
+
+  __ StubReturn(1);
+}
+
+
+void UnarySubStub::Generate(MacroAssembler* masm) {
+  Label undo;
+  Label slow;
+  Label not_smi;
+
+  // Enter runtime system if the value is not a smi.
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(ne, &not_smi);
+
+  // Enter runtime system if the value of the expression is zero
+  // to make sure that we switch between 0 and -0.
+  __ cmp(r0, Operand(0));
+  __ b(eq, &slow);
+
+  // The value of the expression is a smi that is not zero.  Try
+  // optimistic subtraction '0 - value'.
+  __ rsb(r1, r0, Operand(0), SetCC);
+  __ b(vs, &slow);
+
+  __ mov(r0, Operand(r1));  // Set r0 to result.
+  __ StubReturn(1);
+
+  // Enter runtime system.
+  __ bind(&slow);
+  __ push(r0);
+  __ mov(r0, Operand(0));  // Set number of arguments.
+  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
+
+  __ bind(&not_smi);
+  __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+  __ b(ne, &slow);
+  // r0 is a heap number.  Get a new heap number in r1.
+  if (overwrite_) {
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+    __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+  } else {
+    AllocateHeapNumber(masm, &slow, r1, r2, r3);
+    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+    __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+    __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+    __ mov(r0, Operand(r1));
+  }
+  __ StubReturn(1);
+}
+
+
+int CEntryStub::MinorKey() {
+  ASSERT(result_size_ <= 2);
+  // Result returned in r0 or r0+r1 by default.
+  return 0;
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  // r0 holds the exception.
+
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop the sp to the top of the handler.
+  __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+  __ ldr(sp, MemOperand(r3));
+
+  // Restore the next handler and frame pointer, discard handler state.
+  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  __ pop(r2);
+  __ str(r2, MemOperand(r3));
+  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  __ ldm(ia_w, sp, r3.bit() | fp.bit());  // r3: discarded state.
+
+  // Before returning we restore the context from the frame pointer if
+  // not NULL.  The frame pointer is NULL in the exception handler of a
+  // JS entry frame.
+  __ cmp(fp, Operand(0));
+  // Set cp to NULL if fp is NULL.
+  __ mov(cp, Operand(0), LeaveCC, eq);
+  // Restore cp otherwise.
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+#ifdef DEBUG
+  if (FLAG_debug_code) {
+    __ mov(lr, Operand(pc));
+  }
+#endif
+  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  __ pop(pc);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+                                          UncatchableExceptionType type) {
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop sp to the top stack handler.
+  __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+  __ ldr(sp, MemOperand(r3));
+
+  // Unwind the handlers until the ENTRY handler is found.
+  Label loop, done;
+  __ bind(&loop);
+  // Load the type of the current stack handler.
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  __ ldr(r2, MemOperand(sp, kStateOffset));
+  __ cmp(r2, Operand(StackHandler::ENTRY));
+  __ b(eq, &done);
+  // Fetch the next handler in the list.
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  __ ldr(sp, MemOperand(sp, kNextOffset));
+  __ jmp(&loop);
+  __ bind(&done);
+
+  // Set the top handler address to next handler past the current ENTRY handler.
+  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  __ pop(r2);
+  __ str(r2, MemOperand(r3));
+
+  if (type == OUT_OF_MEMORY) {
+    // Set external caught exception to false.
+    ExternalReference external_caught(Top::k_external_caught_exception_address);
+    __ mov(r0, Operand(false));
+    __ mov(r2, Operand(external_caught));
+    __ str(r0, MemOperand(r2));
+
+    // Set pending exception and r0 to out of memory exception.
+    Failure* out_of_memory = Failure::OutOfMemoryException();
+    __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+    __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+    __ str(r0, MemOperand(r2));
+  }
+
+  // Stack layout at this point. See also StackHandlerConstants.
+  // sp ->   state (ENTRY)
+  //         fp
+  //         lr
+
+  // Discard handler state (r2 is not used) and restore frame pointer.
+  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  __ ldm(ia_w, sp, r2.bit() | fp.bit());  // r2: discarded state.
+  // Before returning we restore the context from the frame pointer if
+  // not NULL.  The frame pointer is NULL in the exception handler of a
+  // JS entry frame.
+  __ cmp(fp, Operand(0));
+  // Set cp to NULL if fp is NULL.
+  __ mov(cp, Operand(0), LeaveCC, eq);
+  // Restore cp otherwise.
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+#ifdef DEBUG
+  if (FLAG_debug_code) {
+    __ mov(lr, Operand(pc));
+  }
+#endif
+  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  __ pop(pc);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+                              Label* throw_normal_exception,
+                              Label* throw_termination_exception,
+                              Label* throw_out_of_memory_exception,
+                              StackFrame::Type frame_type,
+                              bool do_gc,
+                              bool always_allocate) {
+  // r0: result parameter for PerformGC, if any
+  // r4: number of arguments including receiver  (C callee-saved)
+  // r5: pointer to builtin function  (C callee-saved)
+  // r6: pointer to the first argument (C callee-saved)
+
+  if (do_gc) {
+    // Passing r0.
+    ExternalReference gc_reference = ExternalReference::perform_gc_function();
+    __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY);
+  }
+
+  ExternalReference scope_depth =
+      ExternalReference::heap_always_allocate_scope_depth();
+  if (always_allocate) {
+    __ mov(r0, Operand(scope_depth));
+    __ ldr(r1, MemOperand(r0));
+    __ add(r1, r1, Operand(1));
+    __ str(r1, MemOperand(r0));
+  }
+
+  // Call C built-in.
+  // r0 = argc, r1 = argv
+  __ mov(r0, Operand(r4));
+  __ mov(r1, Operand(r6));
+
+  // TODO(1242173): To let the GC traverse the return address of the exit
+  // frames, we need to know where the return address is. Right now,
+  // we push it on the stack to be able to find it again, but we never
+  // restore from it in case of changes, which makes it impossible to
+  // support moving the C entry code stub. This should be fixed, but currently
+  // this is OK because the CEntryStub gets generated so early in the V8 boot
+  // sequence that it is not moving ever.
+  masm->add(lr, pc, Operand(4));  // compute return address: (pc + 8) + 4
+  masm->push(lr);
+  masm->Jump(r5);
+
+  if (always_allocate) {
+    // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
+    // though (contain the result).
+    __ mov(r2, Operand(scope_depth));
+    __ ldr(r3, MemOperand(r2));
+    __ sub(r3, r3, Operand(1));
+    __ str(r3, MemOperand(r2));
+  }
+
+  // check for failure result
+  Label failure_returned;
+  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  // Lower 2 bits of r2 are 0 iff r0 has failure tag.
+  __ add(r2, r0, Operand(1));
+  __ tst(r2, Operand(kFailureTagMask));
+  __ b(eq, &failure_returned);
+
+  // Exit C frame and return.
+  // r0:r1: result
+  // sp: stack pointer
+  // fp: frame pointer
+  __ LeaveExitFrame(frame_type);
+
+  // check if we should retry or throw exception
+  Label retry;
+  __ bind(&failure_returned);
+  ASSERT(Failure::RETRY_AFTER_GC == 0);
+  __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+  __ b(eq, &retry);
+
+  // Special handling of out of memory exceptions.
+  Failure* out_of_memory = Failure::OutOfMemoryException();
+  __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+  __ b(eq, throw_out_of_memory_exception);
+
+  // Retrieve the pending exception and clear the variable.
+  __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+  __ ldr(r3, MemOperand(ip));
+  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+  __ ldr(r0, MemOperand(ip));
+  __ str(r3, MemOperand(ip));
+
+  // Special handling of termination exceptions which are uncatchable
+  // by javascript code.
+  __ cmp(r0, Operand(Factory::termination_exception()));
+  __ b(eq, throw_termination_exception);
+
+  // Handle normal exception.
+  __ jmp(throw_normal_exception);
+
+  __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying
+}
+
+
+void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+  // Called from JavaScript; parameters are on stack as if calling JS function
+  // r0: number of arguments including receiver
+  // r1: pointer to builtin function
+  // fp: frame pointer  (restored after C call)
+  // sp: stack pointer  (restored as callee's sp after C call)
+  // cp: current context  (C callee-saved)
+
+  // NOTE: Invocations of builtins may return failure objects
+  // instead of a proper result. The builtin entry handles
+  // this by performing a garbage collection and retrying the
+  // builtin once.
+
+  StackFrame::Type frame_type = is_debug_break
+      ? StackFrame::EXIT_DEBUG
+      : StackFrame::EXIT;
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  __ EnterExitFrame(frame_type);
+
+  // r4: number of arguments (C callee-saved)
+  // r5: pointer to builtin function (C callee-saved)
+  // r6: pointer to first argument (C callee-saved)
+
+  Label throw_normal_exception;
+  Label throw_termination_exception;
+  Label throw_out_of_memory_exception;
+
+  // Call into the runtime system.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               false,
+               false);
+
+  // Do space-specific GC and retry runtime call.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               true,
+               false);
+
+  // Do full GC and retry runtime call one final time.
+  Failure* failure = Failure::InternalError();
+  __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               true,
+               true);
+
+  __ bind(&throw_out_of_memory_exception);
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+  __ bind(&throw_termination_exception);
+  GenerateThrowUncatchable(masm, TERMINATION);
+
+  __ bind(&throw_normal_exception);
+  GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+  // r0: code entry
+  // r1: function
+  // r2: receiver
+  // r3: argc
+  // [sp+0]: argv
+
+  Label invoke, exit;
+
+  // Called from C, so do not pop argc and args on exit (preserve sp)
+  // No need to save register-passed args
+  // Save callee-saved registers (incl. cp and fp), sp, and lr
+  __ stm(db_w, sp, kCalleeSaved | lr.bit());
+
+  // Get address of argv, see stm above.
+  // r0: code entry
+  // r1: function
+  // r2: receiver
+  // r3: argc
+  __ add(r4, sp, Operand((kNumCalleeSaved + 1)*kPointerSize));
+  __ ldr(r4, MemOperand(r4));  // argv
+
+  // Push a frame with special values setup to mark it as an entry frame.
+  // r0: code entry
+  // r1: function
+  // r2: receiver
+  // r3: argc
+  // r4: argv
+  __ mov(r8, Operand(-1));  // Push a bad frame pointer to fail if it is used.
+  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  __ mov(r7, Operand(Smi::FromInt(marker)));
+  __ mov(r6, Operand(Smi::FromInt(marker)));
+  __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+  __ ldr(r5, MemOperand(r5));
+  __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit());
+
+  // Setup frame pointer for the frame to be pushed.
+  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+  // Call a faked try-block that does the invoke.
+  __ bl(&invoke);
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  // Coming in here the fp will be invalid because the PushTryHandler below
+  // sets it to 0 to signal the existence of the JSEntry frame.
+  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+  __ str(r0, MemOperand(ip));
+  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+  __ b(&exit);
+
+  // Invoke: Link this frame into the handler chain.
+  __ bind(&invoke);
+  // Must preserve r0-r4, r5-r7 are available.
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  // If an exception not caught by another handler occurs, this handler
+  // returns control to the code after the bl(&invoke) above, which
+  // restores all kCalleeSaved registers (including cp and fp) to their
+  // saved values before returning a failure to C.
+
+  // Clear any pending exceptions.
+  __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+  __ ldr(r5, MemOperand(ip));
+  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+  __ str(r5, MemOperand(ip));
+
+  // Invoke the function by calling through JS entry trampoline builtin.
+  // Notice that we cannot store a reference to the trampoline code directly in
+  // this stub, because runtime stubs are not traversed when doing GC.
+
+  // Expected registers by Builtins::JSEntryTrampoline
+  // r0: code entry
+  // r1: function
+  // r2: receiver
+  // r3: argc
+  // r4: argv
+  if (is_construct) {
+    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+    __ mov(ip, Operand(construct_entry));
+  } else {
+    ExternalReference entry(Builtins::JSEntryTrampoline);
+    __ mov(ip, Operand(entry));
+  }
+  __ ldr(ip, MemOperand(ip));  // deref address
+
+  // Branch and link to JSEntryTrampoline.  We don't use the double underscore
+  // macro for the add instruction because we don't want the coverage tool
+  // inserting instructions here after we read the pc.
+  __ mov(lr, Operand(pc));
+  masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Unlink this frame from the handler chain. When reading the
+  // address of the next handler, there is no need to use the address
+  // displacement since the current stack pointer (sp) points directly
+  // to the stack handler.
+  __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
+  __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+  __ str(r3, MemOperand(ip));
+  // No need to restore registers
+  __ add(sp, sp, Operand(StackHandlerConstants::kSize));
+
+
+  __ bind(&exit);  // r0 holds result
+  // Restore the top frame descriptors from the stack.
+  __ pop(r3);
+  __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+  __ str(r3, MemOperand(ip));
+
+  // Reset the stack to the callee saved registers.
+  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+  // Restore callee-saved registers and return.
+#ifdef DEBUG
+  if (FLAG_debug_code) {
+    __ mov(lr, Operand(pc));
+  }
+#endif
+  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
+}
+
+
+// This stub performs an instanceof, calling the builtin function if
+// necessary.  Uses r1 for the object, r0 for the function that it may
+// be an instance of (these are fetched from the stack).
+void InstanceofStub::Generate(MacroAssembler* masm) {
+  // Get the object - slow case for smis (we may need to throw an exception
+  // depending on the rhs).
+  Label slow, loop, is_instance, is_not_instance;
+  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+  __ BranchOnSmi(r0, &slow);
+
+  // Check that the left hand is a JS object and put map in r3.
+  __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, &slow);
+  __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+  __ b(gt, &slow);
+
+  // Get the prototype of the function (r4 is result, r2 is scratch).
+  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+  __ TryGetFunctionPrototype(r1, r4, r2, &slow);
+
+  // Check that the function prototype is a JS object.
+  __ BranchOnSmi(r4, &slow);
+  __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, &slow);
+  __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
+  __ b(gt, &slow);
+
+  // Register mapping: r3 is object map and r4 is function prototype.
+  // Get prototype of object into r2.
+  __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
+
+  // Loop through the prototype chain looking for the function prototype.
+  __ bind(&loop);
+  __ cmp(r2, Operand(r4));
+  __ b(eq, &is_instance);
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r2, ip);
+  __ b(eq, &is_not_instance);
+  __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+  __ jmp(&loop);
+
+  __ bind(&is_instance);
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ pop();
+  __ pop();
+  __ mov(pc, Operand(lr));  // Return.
+
+  __ bind(&is_not_instance);
+  __ mov(r0, Operand(Smi::FromInt(1)));
+  __ pop();
+  __ pop();
+  __ mov(pc, Operand(lr));  // Return.
+
+  // Slow-case.  Tail call builtin.
+  __ bind(&slow);
+  __ mov(r0, Operand(1));  // Arg count without receiver.
+  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
+}
+
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(eq, &adaptor);
+
+  // Nothing to do: The formal number of parameters has already been
+  // passed in register r0 by calling function. Just return it.
+  __ Jump(lr);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame and return it.
+  __ bind(&adaptor);
+  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ Jump(lr);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+  // The displacement is the offset of the last parameter (if any)
+  // relative to the frame pointer.
+  static const int kDisplacement =
+      StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+  // Check that the key is a smi.
+  Label slow;
+  __ BranchOnNotSmi(r1, &slow);
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(eq, &adaptor);
+
+  // Check index against formal parameters count limit passed in
+  // through register eax. Use unsigned comparison to get negative
+  // check for free.
+  __ cmp(r1, r0);
+  __ b(cs, &slow);
+
+  // Read the argument from the stack and return it.
+  __ sub(r3, r0, r1);
+  __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ ldr(r0, MemOperand(r3, kDisplacement));
+  __ Jump(lr);
+
+  // Arguments adaptor case: Check index against actual arguments
+  // limit found in the arguments adaptor frame. Use unsigned
+  // comparison to get negative check for free.
+  __ bind(&adaptor);
+  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ cmp(r1, r0);
+  __ b(cs, &slow);
+
+  // Read the argument from the adaptor frame and return it.
+  __ sub(r3, r0, r1);
+  __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ ldr(r0, MemOperand(r3, kDisplacement));
+  __ Jump(lr);
+
+  // Slow-case: Handle non-smi or out-of-bounds access to arguments
+  // by calling the runtime system.
+  __ bind(&slow);
+  __ push(r1);
+  __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &runtime);
+
+  // Patch the arguments.length and the parameters pointer.
+  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ str(r0, MemOperand(sp, 0 * kPointerSize));
+  __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+  Label slow;
+  // Get the function to call from the stack.
+  // function, receiver [, arguments]
+  __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
+  // Check that the function is really a JavaScript function.
+  // r1: pushed function (to be verified)
+  __ BranchOnSmi(r1, &slow);
+  // Get the map of the function object.
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+  __ b(ne, &slow);
+
+  // Fast-case: Invoke the function now.
+  // r1: pushed function
+  ParameterCount actual(argc_);
+  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+
+  // Slow-case: Non-function called.
+  __ bind(&slow);
+  __ mov(r0, Operand(argc_));  // Setup the number of arguments.
+  __ mov(r2, Operand(0));
+  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+  __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
+          RelocInfo::CODE_TARGET);
+}
+
+
+int CompareStub::MinorKey() {
+  // Encode the two parameters in a unique 16 bit value.
+  ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
+  return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
new file mode 100644
index 0000000..1eb0932
--- /dev/null
+++ b/src/arm/codegen-arm.h
@@ -0,0 +1,506 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_CODEGEN_ARM_H_
+#define V8_ARM_CODEGEN_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations
+class DeferredCode;
+class RegisterAllocator;
+class RegisterFile;
+
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+// -------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
+class Reference BASE_EMBEDDED {
+ public:
+  // The values of the types is important, see size().
+  enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+  Reference(CodeGenerator* cgen, Expression* expression);
+  ~Reference();
+
+  Expression* expression() const { return expression_; }
+  Type type() const { return type_; }
+  void set_type(Type value) {
+    ASSERT(type_ == ILLEGAL);
+    type_ = value;
+  }
+
+  // The size the reference takes up on the stack.
+  int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+
+  bool is_illegal() const { return type_ == ILLEGAL; }
+  bool is_slot() const { return type_ == SLOT; }
+  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+
+  // Return the name.  Only valid for named property references.
+  Handle<String> GetName();
+
+  // Generate code to push the value of the reference on top of the
+  // expression stack.  The reference is expected to be already on top of
+  // the expression stack, and it is left in place with its value above it.
+  void GetValue(TypeofState typeof_state);
+
+  // Generate code to push the value of a reference on top of the expression
+  // stack and then spill the stack frame.  This function is used temporarily
+  // while the code generator is being transformed.
+  inline void GetValueAndSpill(TypeofState typeof_state);
+
+  // Generate code to store the value on top of the expression stack in the
+  // reference.  The reference is expected to be immediately below the value
+  // on the expression stack.  The stored value is left in place (with the
+  // reference intact below it) to support chained assignments.
+  void SetValue(InitState init_state);
+
+ private:
+  CodeGenerator* cgen_;
+  Expression* expression_;
+  Type type_;
+};
+
+
+// -------------------------------------------------------------------------
+// Code generation state
+
+// The state is passed down the AST by the code generator (and back up, in
+// the form of the state of the label pair).  It is threaded through the
+// call stack.  Constructing a state implicitly pushes it on the owning code
+// generator's stack of states, and destroying one implicitly pops it.
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+  // Create an initial code generator state.  Destroying the initial state
+  // leaves the code generator with a NULL state.
+  explicit CodeGenState(CodeGenerator* owner);
+
+  // Create a code generator state based on a code generator's current
+  // state.  The new state has its own typeof state and pair of branch
+  // labels.
+  CodeGenState(CodeGenerator* owner,
+               TypeofState typeof_state,
+               JumpTarget* true_target,
+               JumpTarget* false_target);
+
+  // Destroy a code generator state and restore the owning code generator's
+  // previous state.
+  ~CodeGenState();
+
+  TypeofState typeof_state() const { return typeof_state_; }
+  JumpTarget* true_target() const { return true_target_; }
+  JumpTarget* false_target() const { return false_target_; }
+
+ private:
+  CodeGenerator* owner_;
+  TypeofState typeof_state_;
+  JumpTarget* true_target_;
+  JumpTarget* false_target_;
+  CodeGenState* previous_;
+};
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator
+
+class CodeGenerator: public AstVisitor {
+ public:
+  // Takes a function literal, generates code for it. This function should only
+  // be called by compiler.cc.
+  static Handle<Code> MakeCode(FunctionLiteral* fun,
+                               Handle<Script> script,
+                               bool is_eval);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static bool ShouldGenerateLog(Expression* type);
+#endif
+
+  static void SetFunctionInfo(Handle<JSFunction> fun,
+                              FunctionLiteral* lit,
+                              bool is_toplevel,
+                              Handle<Script> script);
+
+  // Accessors
+  MacroAssembler* masm() { return masm_; }
+
+  VirtualFrame* frame() const { return frame_; }
+
+  bool has_valid_frame() const { return frame_ != NULL; }
+
+  // Set the virtual frame to be new_frame, with non-frame register
+  // reference counts given by non_frame_registers.  The non-frame
+  // register reference counts of the old frame are returned in
+  // non_frame_registers.
+  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+  void DeleteFrame();
+
+  RegisterAllocator* allocator() const { return allocator_; }
+
+  CodeGenState* state() { return state_; }
+  void set_state(CodeGenState* state) { state_ = state; }
+
+  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+
+  static const int kUnknownIntValue = -1;
+
+  // Number of instructions used for the JS return sequence. The constant is
+  // used by the debugger to patch the JS return sequence.
+  static const int kJSReturnSequenceLength = 4;
+
+ private:
+  // Construction/Destruction
+  CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
+  virtual ~CodeGenerator() { delete masm_; }
+
+  // Accessors
+  Scope* scope() const { return scope_; }
+
+  // Generating deferred code.
+  void ProcessDeferred();
+
+  bool is_eval() { return is_eval_; }
+
+  // State
+  bool has_cc() const  { return cc_reg_ != al; }
+  TypeofState typeof_state() const { return state_->typeof_state(); }
+  JumpTarget* true_target() const  { return state_->true_target(); }
+  JumpTarget* false_target() const  { return state_->false_target(); }
+
+  // We don't track loop nesting level on ARM yet.
+  int loop_nesting() const { return 0; }
+
+  // Node visitors.
+  void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DEF_VISIT(type) \
+  void Visit##type(type* node);
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+  // Visit a statement and then spill the virtual frame if control flow can
+  // reach the end of the statement (ie, it does not exit via break,
+  // continue, return, or throw).  This function is used temporarily while
+  // the code generator is being transformed.
+  inline void VisitAndSpill(Statement* statement);
+
+  // Visit a list of statements and then spill the virtual frame if control
+  // flow can reach the end of the list.
+  inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
+
+  // Main code generation function
+  void GenCode(FunctionLiteral* fun);
+
+  // The following are used by class Reference.
+  void LoadReference(Reference* ref);
+  void UnloadReference(Reference* ref);
+
+  MemOperand ContextOperand(Register context, int index) const {
+    return MemOperand(context, Context::SlotOffset(index));
+  }
+
+  MemOperand SlotOperand(Slot* slot, Register tmp);
+
+  MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
+                                               Register tmp,
+                                               Register tmp2,
+                                               JumpTarget* slow);
+
+  // Expressions
+  MemOperand GlobalObject() const  {
+    return ContextOperand(cp, Context::GLOBAL_INDEX);
+  }
+
+  void LoadCondition(Expression* x,
+                     TypeofState typeof_state,
+                     JumpTarget* true_target,
+                     JumpTarget* false_target,
+                     bool force_cc);
+  void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+  void LoadGlobal();
+  void LoadGlobalReceiver(Register scratch);
+
+  // Generate code to push the value of an expression on top of the frame
+  // and then spill the frame fully to memory.  This function is used
+  // temporarily while the code generator is being transformed.
+  inline void LoadAndSpill(Expression* expression,
+                           TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+
+  // Call LoadCondition and then spill the virtual frame unless control flow
+  // cannot reach the end of the expression (ie, by emitting only
+  // unconditional jumps to the control targets).
+  inline void LoadConditionAndSpill(Expression* expression,
+                                    TypeofState typeof_state,
+                                    JumpTarget* true_target,
+                                    JumpTarget* false_target,
+                                    bool force_control);
+
+  // Read a value from a slot and leave it on top of the expression stack.
+  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  void LoadFromGlobalSlotCheckExtensions(Slot* slot,
+                                         TypeofState typeof_state,
+                                         Register tmp,
+                                         Register tmp2,
+                                         JumpTarget* slow);
+
+  // Special code for typeof expressions: Unfortunately, we must
+  // be careful when loading the expression in 'typeof'
+  // expressions. We are not allowed to throw reference errors for
+  // non-existing properties of the global object, so we must make it
+  // look like an explicit property access, instead of an access
+  // through the context chain.
+  void LoadTypeofExpression(Expression* x);
+
+  void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
+
+  void GenericBinaryOperation(Token::Value op,
+                              OverwriteMode overwrite_mode,
+                              int known_rhs = kUnknownIntValue);
+  void Comparison(Condition cc,
+                  Expression* left,
+                  Expression* right,
+                  bool strict = false);
+
+  void SmiOperation(Token::Value op,
+                    Handle<Object> value,
+                    bool reversed,
+                    OverwriteMode mode);
+
+  void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+
+  // Control flow
+  void Branch(bool if_true, JumpTarget* target);
+  void CheckStack();
+
+  struct InlineRuntimeLUT {
+    void (CodeGenerator::*method)(ZoneList<Expression*>*);
+    const char* name;
+  };
+
+  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+  bool CheckForInlineRuntimeCall(CallRuntime* node);
+  static bool PatchInlineRuntimeEntry(Handle<String> name,
+                                      const InlineRuntimeLUT& new_entry,
+                                      InlineRuntimeLUT* old_entry);
+
+  Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
+  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+
+  // Declare global variables and functions in the given array of
+  // name/value pairs.
+  void DeclareGlobals(Handle<FixedArray> pairs);
+
+  // Instantiate the function boilerplate.
+  void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+
+  // Support for type checks.
+  void GenerateIsSmi(ZoneList<Expression*>* args);
+  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
+  void GenerateIsArray(ZoneList<Expression*>* args);
+
+  // Support for construct call checks.
+  void GenerateIsConstructCall(ZoneList<Expression*>* args);
+
+  // Support for arguments.length and arguments[?].
+  void GenerateArgumentsLength(ZoneList<Expression*>* args);
+  void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+  // Support for accessing the class and value fields of an object.
+  void GenerateClassOf(ZoneList<Expression*>* args);
+  void GenerateValueOf(ZoneList<Expression*>* args);
+  void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+  // Fast support for charCodeAt(n).
+  void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+
+  // Fast support for object equality testing.
+  void GenerateObjectEquals(ZoneList<Expression*>* args);
+
+  void GenerateLog(ZoneList<Expression*>* args);
+
+  // Fast support for Math.random().
+  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+  // Fast support for Math.sin and Math.cos.
+  enum MathOp { SIN, COS };
+  void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
+  inline void GenerateMathSin(ZoneList<Expression*>* args);
+  inline void GenerateMathCos(ZoneList<Expression*>* args);
+
+  // Methods used to indicate which source code is generated for. Source
+  // positions are collected by the assembler and emitted with the relocation
+  // information.
+  void CodeForFunctionPosition(FunctionLiteral* fun);
+  void CodeForReturnPosition(FunctionLiteral* fun);
+  void CodeForStatementPosition(Statement* node);
+  void CodeForSourcePosition(int pos);
+
+#ifdef DEBUG
+  // True if the registers are valid for entry to a block.
+  bool HasValidEntryRegisters();
+#endif
+
+  bool is_eval_;  // Tells whether code is generated for eval.
+
+  Handle<Script> script_;
+  List<DeferredCode*> deferred_;
+
+  // Assembler
+  MacroAssembler* masm_;  // to generate code
+
+  // Code generation state
+  Scope* scope_;
+  VirtualFrame* frame_;
+  RegisterAllocator* allocator_;
+  Condition cc_reg_;
+  CodeGenState* state_;
+
+  // Jump targets
+  BreakTarget function_return_;
+
+  // True if the function return is shadowed (ie, jumping to the target
+  // function_return_ does not jump to the true function return, but rather
+  // to some unlinking code).
+  bool function_return_is_shadowed_;
+
+  static InlineRuntimeLUT kInlineRuntimeLUT[];
+
+  friend class VirtualFrame;
+  friend class JumpTarget;
+  friend class Reference;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
+class GenericBinaryOpStub : public CodeStub {
+ public:
+  GenericBinaryOpStub(Token::Value op,
+                      OverwriteMode mode,
+                      int constant_rhs = CodeGenerator::kUnknownIntValue)
+      : op_(op),
+        mode_(mode),
+        constant_rhs_(constant_rhs),
+        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
+
+ private:
+  Token::Value op_;
+  OverwriteMode mode_;
+  int constant_rhs_;
+  bool specialized_on_rhs_;
+
+  static const int kMaxKnownRhs = 0x40000000;
+
+  // Minor key encoding in 16 bits.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 6> {};
+  class KnownIntBits: public BitField<int, 8, 8> {};
+
+  Major MajorKey() { return GenericBinaryOp; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return OpBits::encode(op_)
+           | ModeBits::encode(mode_)
+           | KnownIntBits::encode(MinorKeyForKnownInt());
+  }
+
+  void Generate(MacroAssembler* masm);
+  void HandleNonSmiBitwiseOp(MacroAssembler* masm);
+
+  static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
+    if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
+    if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
+    if (op == Token::MOD) {
+      if (constant_rhs <= 1) return false;
+      if (constant_rhs <= 10) return true;
+      if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
+      return false;
+    }
+    return false;
+  }
+
+  int MinorKeyForKnownInt() {
+    if (!specialized_on_rhs_) return 0;
+    if (constant_rhs_ <= 10) return constant_rhs_ + 1;
+    ASSERT(IsPowerOf2(constant_rhs_));
+    int key = 12;
+    int d = constant_rhs_;
+    while ((d & 1) == 0) {
+      key++;
+      d >>= 1;
+    }
+    return key;
+  }
+
+  const char* GetName() {
+    switch (op_) {
+      case Token::ADD: return "GenericBinaryOpStub_ADD";
+      case Token::SUB: return "GenericBinaryOpStub_SUB";
+      case Token::MUL: return "GenericBinaryOpStub_MUL";
+      case Token::DIV: return "GenericBinaryOpStub_DIV";
+      case Token::MOD: return "GenericBinaryOpStub_MOD";
+      case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+      case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+      case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+      case Token::SAR: return "GenericBinaryOpStub_SAR";
+      case Token::SHL: return "GenericBinaryOpStub_SHL";
+      case Token::SHR: return "GenericBinaryOpStub_SHR";
+      default:         return "GenericBinaryOpStub";
+    }
+  }
+
+#ifdef DEBUG
+  void Print() {
+    if (!specialized_on_rhs_) {
+      PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
+    } else {
+      PrintF("GenericBinaryOpStub (%s by %d)\n",
+             Token::String(op_),
+             constant_rhs_);
+    }
+  }
+#endif
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
new file mode 100644
index 0000000..964bfe1
--- /dev/null
+++ b/src/arm/constants-arm.cc
@@ -0,0 +1,92 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "constants-arm.h"
+
+
+namespace assembler {
+namespace arm {
+
+namespace v8i = v8::internal;
+
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumRegisters] = {
+  "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+  "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
+};
+
+
+// List of alias names which can be used when referring to ARM registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+  {10, "sl"},
+  {11, "r11"},
+  {12, "r12"},
+  {13, "r13"},
+  {14, "r14"},
+  {15, "r15"},
+  {kNoRegister, NULL}
+};
+
+
+const char* Registers::Name(int reg) {
+  const char* result;
+  if ((0 <= reg) && (reg < kNumRegisters)) {
+    result = names_[reg];
+  } else {
+    result = "noreg";
+  }
+  return result;
+}
+
+
+int Registers::Number(const char* name) {
+  // Look through the canonical names.
+  for (int i = 0; i < kNumRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      return i;
+    }
+  }
+
+  // Look through the alias names.
+  int i = 0;
+  while (aliases_[i].reg != kNoRegister) {
+    if (strcmp(aliases_[i].name, name) == 0) {
+      return aliases_[i].reg;
+    }
+    i++;
+  }
+
+  // No register with the reguested name found.
+  return kNoRegister;
+}
+
+
+} }  // namespace assembler::arm
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
new file mode 100644
index 0000000..6bd0d00
--- /dev/null
+++ b/src/arm/constants-arm.h
@@ -0,0 +1,322 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_CONSTANTS_ARM_H_
+#define V8_ARM_CONSTANTS_ARM_H_
+
+// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
+// are not running on real ARM hardware.  One reason for this is that the
+// old ABI uses fp registers in the calling convention and the simulator does
+// not simulate fp registers or coroutine instructions.
+#if defined(__ARM_EABI__) || !defined(__arm__)
+# define USE_ARM_EABI 1
+#endif
+
+// This means that interwork-compatible jump instructions are generated.  We
+// want to generate them on the simulator too so it makes snapshots that can
+// be used on real hardware.
+#if defined(__THUMB_INTERWORK__) || !defined(__arm__)
+# define USE_THUMB_INTERWORK 1
+#endif
+
+#if defined(__ARM_ARCH_5T__) || \
+    defined(__ARM_ARCH_5TE__) || \
+    defined(__ARM_ARCH_6__) || \
+    defined(__ARM_ARCH_7A__) || \
+    defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV5_INSTRUCTIONS 1
+# define CAN_USE_THUMB_INSTRUCTIONS 1
+#endif
+
+#if defined(__ARM_ARCH_6__) || \
+    defined(__ARM_ARCH_7A__) || \
+    defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV6_INSTRUCTIONS 1
+#endif
+
+#if defined(__ARM_ARCH_7A__) || \
+    defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
+#endif
+
+// Simulator should support ARM5 instructions.
+#if !defined(__arm__)
+# define CAN_USE_ARMV5_INSTRUCTIONS 1
+# define CAN_USE_THUMB_INSTRUCTIONS 1
+#endif
+
+namespace assembler {
+namespace arm {
+
+// Number of registers in normal ARM mode.
+static const int kNumRegisters = 16;
+
+// PC is register 15.
+static const int kPCRegister = 15;
+static const int kNoRegister = -1;
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate ARM instructions.
+//
+// Section references in the code refer to the "ARM Architecture Reference
+// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf)
+//
+// Constants for specific fields are defined in their respective named enums.
+// General constants are in an anonymous enum in class Instr.
+
+typedef unsigned char byte;
+
+// Values for the condition field as defined in section A3.2
+enum Condition {
+  no_condition = -1,
+  EQ =  0,  // equal
+  NE =  1,  // not equal
+  CS =  2,  // carry set/unsigned higher or same
+  CC =  3,  // carry clear/unsigned lower
+  MI =  4,  // minus/negative
+  PL =  5,  // plus/positive or zero
+  VS =  6,  // overflow
+  VC =  7,  // no overflow
+  HI =  8,  // unsigned higher
+  LS =  9,  // unsigned lower or same
+  GE = 10,  // signed greater than or equal
+  LT = 11,  // signed less than
+  GT = 12,  // signed greater than
+  LE = 13,  // signed less than or equal
+  AL = 14,  // always (unconditional)
+  special_condition = 15,  // special condition (refer to section A3.2.1)
+  max_condition = 16
+};
+
+
+// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
+// as defined in section A3.4
+enum Opcode {
+  no_operand = -1,
+  AND =  0,  // Logical AND
+  EOR =  1,  // Logical Exclusive OR
+  SUB =  2,  // Subtract
+  RSB =  3,  // Reverse Subtract
+  ADD =  4,  // Add
+  ADC =  5,  // Add with Carry
+  SBC =  6,  // Subtract with Carry
+  RSC =  7,  // Reverse Subtract with Carry
+  TST =  8,  // Test
+  TEQ =  9,  // Test Equivalence
+  CMP = 10,  // Compare
+  CMN = 11,  // Compare Negated
+  ORR = 12,  // Logical (inclusive) OR
+  MOV = 13,  // Move
+  BIC = 14,  // Bit Clear
+  MVN = 15,  // Move Not
+  max_operand = 16
+};
+
+
+// Some special instructions encoded as a TEQ with S=0 (bit 20).
+enum Opcode9Bits {
+  BX   =  1,
+  BXJ  =  2,
+  BLX  =  3,
+  BKPT =  7
+};
+
+
+// Some special instructions encoded as a CMN with S=0 (bit 20).
+enum Opcode11Bits {
+  CLZ  =  1
+};
+
+
+// S
+
+
+// Shifter types for Data-processing operands as defined in section A5.1.2.
+enum Shift {
+  no_shift = -1,
+  LSL = 0,  // Logical shift left
+  LSR = 1,  // Logical shift right
+  ASR = 2,  // Arithmetic shift right
+  ROR = 3,  // Rotate right
+  max_shift = 4
+};
+
+
+// Special Software Interrupt codes when used in the presence of the ARM
+// simulator.
+enum SoftwareInterruptCodes {
+  // transition to C code
+  call_rt_redirected = 0x10,
+  // break point
+  break_point = 0x20
+};
+
+
+typedef int32_t instr_t;
+
+
+// The class Instr enables access to individual fields defined in the ARM
+// architecture instruction set encoding as described in figure A3-1.
+//
+// Example: Test whether the instruction at ptr does set the condition code
+// bits.
+//
+// bool InstructionSetsConditionCodes(byte* ptr) {
+//   Instr* instr = Instr::At(ptr);
+//   int type = instr->TypeField();
+//   return ((type == 0) || (type == 1)) && instr->HasS();
+// }
+//
+class Instr {
+ public:
+  enum {
+    kInstrSize = 4,
+    kInstrSizeLog2 = 2,
+    kPCReadOffset = 8
+  };
+
+  // Get the raw instruction bits.
+  inline instr_t InstructionBits() const {
+    return *reinterpret_cast<const instr_t*>(this);
+  }
+
+  // Set the raw instruction bits to value.
+  inline void SetInstructionBits(instr_t value) {
+    *reinterpret_cast<instr_t*>(this) = value;
+  }
+
+  // Read one particular bit out of the instruction bits.
+  inline int Bit(int nr) const {
+    return (InstructionBits() >> nr) & 1;
+  }
+
+  // Read a bit field out of the instruction bits.
+  inline int Bits(int hi, int lo) const {
+    return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+  }
+
+
+  // Accessors for the different named fields used in the ARM encoding.
+  // The naming of these accessor corresponds to figure A3-1.
+  // Generally applicable fields
+  inline Condition ConditionField() const {
+    return static_cast<Condition>(Bits(31, 28));
+  }
+  inline int TypeField() const { return Bits(27, 25); }
+
+  inline int RnField() const { return Bits(19, 16); }
+  inline int RdField() const { return Bits(15, 12); }
+
+  // Fields used in Data processing instructions
+  inline Opcode OpcodeField() const {
+    return static_cast<Opcode>(Bits(24, 21));
+  }
+  inline int SField() const { return Bit(20); }
+    // with register
+  inline int RmField() const { return Bits(3, 0); }
+  inline Shift ShiftField() const { return static_cast<Shift>(Bits(6, 5)); }
+  inline int RegShiftField() const { return Bit(4); }
+  inline int RsField() const { return Bits(11, 8); }
+  inline int ShiftAmountField() const { return Bits(11, 7); }
+    // with immediate
+  inline int RotateField() const { return Bits(11, 8); }
+  inline int Immed8Field() const { return Bits(7, 0); }
+
+  // Fields used in Load/Store instructions
+  inline int PUField() const { return Bits(24, 23); }
+  inline int  BField() const { return Bit(22); }
+  inline int  WField() const { return Bit(21); }
+  inline int  LField() const { return Bit(20); }
+    // with register uses same fields as Data processing instructions above
+    // with immediate
+  inline int Offset12Field() const { return Bits(11, 0); }
+    // multiple
+  inline int RlistField() const { return Bits(15, 0); }
+    // extra loads and stores
+  inline int SignField() const { return Bit(6); }
+  inline int HField() const { return Bit(5); }
+  inline int ImmedHField() const { return Bits(11, 8); }
+  inline int ImmedLField() const { return Bits(3, 0); }
+
+  // Fields used in Branch instructions
+  inline int LinkField() const { return Bit(24); }
+  inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); }
+
+  // Fields used in Software interrupt instructions
+  inline SoftwareInterruptCodes SwiField() const {
+    return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
+  }
+
+  // Test for special encodings of type 0 instructions (extra loads and stores,
+  // as well as multiplications).
+  inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
+
+  // Special accessors that test for existence of a value.
+  inline bool HasS()    const { return SField() == 1; }
+  inline bool HasB()    const { return BField() == 1; }
+  inline bool HasW()    const { return WField() == 1; }
+  inline bool HasL()    const { return LField() == 1; }
+  inline bool HasSign() const { return SignField() == 1; }
+  inline bool HasH()    const { return HField() == 1; }
+  inline bool HasLink() const { return LinkField() == 1; }
+
+  // Instructions are read of out a code stream. The only way to get a
+  // reference to an instruction is to convert a pointer. There is no way
+  // to allocate or create instances of class Instr.
+  // Use the At(pc) function to create references to Instr.
+  static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); }
+
+ private:
+  // We need to prevent the creation of instances of class Instr.
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+};
+
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+  // Return the name of the register.
+  static const char* Name(int reg);
+
+  // Lookup the register number for the name provided.
+  static int Number(const char* name);
+
+  struct RegisterAlias {
+    int reg;
+    const char *name;
+  };
+
+ private:
+  static const char* names_[kNumRegisters];
+  static const RegisterAlias aliases_[];
+};
+
+
+
+} }  // namespace assembler::arm
+
+#endif  // V8_ARM_CONSTANTS_ARM_H_
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
new file mode 100644
index 0000000..cafefce
--- /dev/null
+++ b/src/arm/cpu-arm.cc
@@ -0,0 +1,127 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for arm independent of OS goes here.
+#if defined(__arm__)
+#include <sys/syscall.h>  // for cache flushing.
+#endif
+
+#include "v8.h"
+
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::Setup() {
+  // Nothing to do.
+}
+
+
+void CPU::FlushICache(void* start, size_t size) {
+#if !defined (__arm__)
+  // Not generating ARM instructions for C-code. This means that we are
+  // building an ARM emulator based target. No I$ flushes are necessary.
+  // None of this code ends up in the snapshot so there are no issues
+  // around whether or not to generate the code when building snapshots.
+#else
+  // Ideally, we would call
+  //   syscall(__ARM_NR_cacheflush, start,
+  //           reinterpret_cast<intptr_t>(start) + size, 0);
+  // however, syscall(int, ...) is not supported on all platforms, especially
+  // not when using EABI, so we call the __ARM_NR_cacheflush syscall directly.
+
+  register uint32_t beg asm("a1") = reinterpret_cast<uint32_t>(start);
+  register uint32_t end asm("a2") =
+      reinterpret_cast<uint32_t>(start) + size;
+  register uint32_t flg asm("a3") = 0;
+  #ifdef __ARM_EABI__
+    register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
+    #if defined (__arm__) && !defined(__thumb__)
+      // __arm__ may be defined in thumb mode.
+      asm volatile(
+          "swi 0x0"
+          : "=r" (beg)
+          : "0" (beg), "r" (end), "r" (flg), "r" (scno));
+    #else
+      asm volatile(
+      "@   Enter ARM Mode  \n\t"
+          "adr r3, 1f      \n\t"
+          "bx  r3          \n\t"
+          ".ALIGN 4        \n\t"
+          ".ARM            \n"
+      "1:  swi 0x0         \n\t"
+      "@   Enter THUMB Mode\n\t"
+          "adr r3, 2f+1    \n\t"
+          "bx  r3          \n\t"
+          ".THUMB          \n"
+      "2:                  \n\t"
+          : "=r" (beg)
+          : "0" (beg), "r" (end), "r" (flg), "r" (scno)
+          : "r3");
+    #endif
+  #else
+    #if defined (__arm__) && !defined(__thumb__)
+      // __arm__ may be defined in thumb mode.
+      asm volatile(
+          "swi %1"
+          : "=r" (beg)
+          : "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg));
+    #else
+      // Do not use the value of __ARM_NR_cacheflush in the inline assembly
+      // below, because the thumb mode value would be used, which would be
+      // wrong, since we switch to ARM mode before executing the swi instruction
+      asm volatile(
+      "@   Enter ARM Mode  \n\t"
+          "adr r3, 1f      \n\t"
+          "bx  r3          \n\t"
+          ".ALIGN 4        \n\t"
+          ".ARM            \n"
+      "1:  swi 0x9f0002    \n"
+      "@   Enter THUMB Mode\n\t"
+          "adr r3, 2f+1    \n\t"
+          "bx  r3          \n\t"
+          ".THUMB          \n"
+      "2:                  \n\t"
+          : "=r" (beg)
+          : "0" (beg), "r" (end), "r" (flg)
+          : "r3");
+    #endif
+  #endif
+#endif
+}
+
+
+void CPU::DebugBreak() {
+#if !defined (__arm__)
+  UNIMPLEMENTED();  // when building ARM emulator target
+#else
+  asm volatile("bkpt 0");
+#endif
+}
+
+} }  // namespace v8::internal
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
new file mode 100644
index 0000000..4f45175
--- /dev/null
+++ b/src/arm/debug-arm.cc
@@ -0,0 +1,213 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+  return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+  // Patch the code changing the return from JS function sequence from
+  //   mov sp, fp
+  //   ldmia sp!, {fp, lr}
+  //   add sp, sp, #4
+  //   bx lr
+  // to a call to the debug break return code.
+  //   mov lr, pc
+  //   ldr pc, [pc, #-4]
+  //   <debug break return code entry point address>
+  //   bktp 0
+  CodePatcher patcher(rinfo()->pc(), 4);
+  patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
+  patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
+  patcher.Emit(Debug::debug_break_return()->entry());
+  patcher.masm()->bkpt(0);
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     CodeGenerator::kJSReturnSequenceLength);
+}
+
+
+// A debug break in the exit code is identified by a call.
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  return rinfo->IsCallInstruction();
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+                                          RegList pointer_regs) {
+  // Save the content of all general purpose registers in memory. This copy in
+  // memory is later pushed onto the JS expression stack for the fake JS frame
+  // generated and also to the C frame generated on top of that. In the JS
+  // frame ONLY the registers containing pointers will be pushed on the
+  // expression stack. This causes the GC to update these  pointers so that
+  // they will have the correct value when returning from the debugger.
+  __ SaveRegistersToMemory(kJSCallerSaved);
+
+  __ EnterInternalFrame();
+
+  // Store the registers containing object pointers on the expression stack to
+  // make sure that these are correctly updated during GC.
+  // Use sp as base to push.
+  __ CopyRegistersFromMemoryToStack(sp, pointer_regs);
+
+#ifdef DEBUG
+  __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+  __ mov(r0, Operand(0));  // no arguments
+  __ mov(r1, Operand(ExternalReference::debug_break()));
+
+  CEntryDebugBreakStub ceb;
+  __ CallStub(&ceb);
+
+  // Restore the register values containing object pointers from the expression
+  // stack in the reverse order as they where pushed.
+  // Use sp as base to pop.
+  __ CopyRegistersFromStackToMemory(sp, r3, pointer_regs);
+
+  __ LeaveInternalFrame();
+
+  // Finally restore all registers.
+  __ RestoreRegistersFromMemory(kJSCallerSaved);
+
+  // Now that the break point has been handled, resume normal execution by
+  // jumping to the target address intended by the caller and that was
+  // overwritten by the address of DebugBreakXXX.
+  __ mov(ip, Operand(ExternalReference(Debug_Address::AfterBreakTarget())));
+  __ ldr(ip, MemOperand(ip));
+  __ Jump(ip);
+}
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+  // Calling convention for IC load (from ic-arm.cc).
+  // ----------- S t a t e -------------
+  //  -- r0    : receiver
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  // Registers r0 and r2 contain objects that needs to be pushed on the
+  // expression stack of the fake JS frame.
+  Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+  // Calling convention for IC store (from ic-arm.cc).
+  // ----------- S t a t e -------------
+  //  -- r0    : receiver
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  // Registers r0 and r2 contain objects that needs to be pushed on the
+  // expression stack of the fake JS frame.
+  Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[4]  : receiver
+  Generate_DebugBreakCallHelper(masm, 0);
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[4]  : receiver
+  Generate_DebugBreakCallHelper(masm, 0);
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+  // Calling convention for IC call (from ic-arm.cc)
+  // ----------- S t a t e -------------
+  //  -- r0: number of arguments
+  //  -- r1: receiver
+  //  -- lr: return address
+  // -----------------------------------
+  // Register r1 contains an object that needs to be pushed on the expression
+  // stack of the fake JS frame. r0 is the actual number of arguments not
+  // encoded as a smi, therefore it cannot be on the expression stack of the
+  // fake JS frame as it can easily be an invalid pointer (e.g. 1). r0 will be
+  // pushed on the stack of the C frame and restored from there.
+  Generate_DebugBreakCallHelper(masm, r1.bit());
+}
+
+
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+  // In places other than IC call sites it is expected that r0 is TOS which
+  // is an object - this is not generally the case so this should be used with
+  // care.
+  Generate_DebugBreakCallHelper(masm, r0.bit());
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+  // In places other than IC call sites it is expected that r0 is TOS which
+  // is an object - this is not generally the case so this should be used with
+  // care.
+  Generate_DebugBreakCallHelper(masm, r0.bit());
+}
+
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0);
+}
+
+
+#undef __
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+} }  // namespace v8::internal
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
new file mode 100644
index 0000000..6431483
--- /dev/null
+++ b/src/arm/disasm-arm.cc
@@ -0,0 +1,978 @@
+// Copyright 2007-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+//   NameConverter converter;
+//   Disassembler d(converter);
+//   for (byte* pc = begin; pc < end;) {
+//     char buffer[128];
+//     buffer[0] = '\0';
+//     byte* prev_pc = pc;
+//     pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+//     printf("%p    %08x      %s\n",
+//            prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+//   }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#ifndef WIN32
+#include <stdint.h>
+#endif
+
+#include "v8.h"
+
+#include "constants-arm.h"
+#include "disasm.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+
+namespace assembler {
+namespace arm {
+
+namespace v8i = v8::internal;
+
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+  Decoder(const disasm::NameConverter& converter,
+          v8::internal::Vector<char> out_buffer)
+    : converter_(converter),
+      out_buffer_(out_buffer),
+      out_buffer_pos_(0) {
+    out_buffer_[out_buffer_pos_] = '\0';
+  }
+
+  ~Decoder() {}
+
+  // Writes one disassembled instruction into 'buffer' (0-terminated).
+  // Returns the length of the disassembled machine instruction in bytes.
+  int InstructionDecode(byte* instruction);
+
+ private:
+  // Bottleneck functions to print into the out_buffer.
+  void PrintChar(const char ch);
+  void Print(const char* str);
+
+  // Printing of common values.
+  void PrintRegister(int reg);
+  void PrintCondition(Instr* instr);
+  void PrintShiftRm(Instr* instr);
+  void PrintShiftImm(Instr* instr);
+  void PrintPU(Instr* instr);
+  void PrintSoftwareInterrupt(SoftwareInterruptCodes swi);
+
+  // Handle formatting of instructions and their options.
+  int FormatRegister(Instr* instr, const char* option);
+  int FormatOption(Instr* instr, const char* option);
+  void Format(Instr* instr, const char* format);
+  void Unknown(Instr* instr);
+
+  // Each of these functions decodes one particular instruction type, a 3-bit
+  // field in the instruction encoding.
+  // Types 0 and 1 are combined as they are largely the same except for the way
+  // they interpret the shifter operand.
+  void DecodeType01(Instr* instr);
+  void DecodeType2(Instr* instr);
+  void DecodeType3(Instr* instr);
+  void DecodeType4(Instr* instr);
+  void DecodeType5(Instr* instr);
+  void DecodeType6(Instr* instr);
+  void DecodeType7(Instr* instr);
+  void DecodeUnconditional(Instr* instr);
+
+  const disasm::NameConverter& converter_;
+  v8::internal::Vector<char> out_buffer_;
+  int out_buffer_pos_;
+
+  DISALLOW_COPY_AND_ASSIGN(Decoder);
+};
+
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+  (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) {
+  out_buffer_[out_buffer_pos_++] = ch;
+}
+
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+  char cur = *str++;
+  while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    PrintChar(cur);
+    cur = *str++;
+  }
+  out_buffer_[out_buffer_pos_] = 0;
+}
+
+
+// These condition names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+static const char* cond_names[max_condition] = {
+  "eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
+  "hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
+};
+
+
+// Print the condition guarding the instruction.
+void Decoder::PrintCondition(Instr* instr) {
+  Print(cond_names[instr->ConditionField()]);
+}
+
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+  Print(converter_.NameOfCPURegister(reg));
+}
+
+
+// These shift names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+static const char* shift_names[max_shift] = {
+  "lsl", "lsr", "asr", "ror"
+};
+
+
+// Print the register shift operands for the instruction. Generally used for
+// data processing instructions.
+void Decoder::PrintShiftRm(Instr* instr) {
+  Shift shift = instr->ShiftField();
+  int shift_amount = instr->ShiftAmountField();
+  int rm = instr->RmField();
+
+  PrintRegister(rm);
+
+  if ((instr->RegShiftField() == 0) && (shift == LSL) && (shift_amount == 0)) {
+    // Special case for using rm only.
+    return;
+  }
+  if (instr->RegShiftField() == 0) {
+    // by immediate
+    if ((shift == ROR) && (shift_amount == 0)) {
+      Print(", RRX");
+      return;
+    } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
+      shift_amount = 32;
+    }
+    out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                         ", %s #%d",
+                                         shift_names[shift], shift_amount);
+  } else {
+    // by register
+    int rs = instr->RsField();
+    out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                         ", %s ", shift_names[shift]);
+    PrintRegister(rs);
+  }
+}
+
+
+// Print the immediate operand for the instruction. Generally used for data
+// processing instructions.
+void Decoder::PrintShiftImm(Instr* instr) {
+  int rotate = instr->RotateField() * 2;
+  int immed8 = instr->Immed8Field();
+  int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "#%d", imm);
+}
+
+
+// Print PU formatting to reduce complexity of FormatOption.
+void Decoder::PrintPU(Instr* instr) {
+  switch (instr->PUField()) {
+    case 0: {
+      Print("da");
+      break;
+    }
+    case 1: {
+      Print("ia");
+      break;
+    }
+    case 2: {
+      Print("db");
+      break;
+    }
+    case 3: {
+      Print("ib");
+      break;
+    }
+    default: {
+      UNREACHABLE();
+      break;
+    }
+  }
+}
+
+
+// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
+// the FormatOption method.
+void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes swi) {
+  switch (swi) {
+    case call_rt_redirected:
+      Print("call_rt_redirected");
+      return;
+    case break_point:
+      Print("break_point");
+      return;
+    default:
+      out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                           "%d",
+                                           swi);
+      return;
+  }
+}
+
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instr* instr, const char* format) {
+  ASSERT(format[0] == 'r');
+  if (format[1] == 'n') {  // 'rn: Rn register
+    int reg = instr->RnField();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 'd') {  // 'rd: Rd register
+    int reg = instr->RdField();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 's') {  // 'rs: Rs register
+    int reg = instr->RsField();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 'm') {  // 'rm: Rm register
+    int reg = instr->RmField();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 'l') {
+    // 'rlist: register list for load and store multiple instructions
+    ASSERT(STRING_STARTS_WITH(format, "rlist"));
+    int rlist = instr->RlistField();
+    int reg = 0;
+    Print("{");
+    // Print register list in ascending order, by scanning the bit mask.
+    while (rlist != 0) {
+      if ((rlist & 1) != 0) {
+        PrintRegister(reg);
+        if ((rlist >> 1) != 0) {
+          Print(", ");
+        }
+      }
+      reg++;
+      rlist >>= 1;
+    }
+    Print("}");
+    return 5;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.)  FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instr* instr, const char* format) {
+  switch (format[0]) {
+    case 'a': {  // 'a: accumulate multiplies
+      if (instr->Bit(21) == 0) {
+        Print("ul");
+      } else {
+        Print("la");
+      }
+      return 1;
+    }
+    case 'b': {  // 'b: byte loads or stores
+      if (instr->HasB()) {
+        Print("b");
+      }
+      return 1;
+    }
+    case 'c': {  // 'cond: conditional execution
+      ASSERT(STRING_STARTS_WITH(format, "cond"));
+      PrintCondition(instr);
+      return 4;
+    }
+    case 'h': {  // 'h: halfword operation for extra loads and stores
+      if (instr->HasH()) {
+        Print("h");
+      } else {
+        Print("b");
+      }
+      return 1;
+    }
+    case 'l': {  // 'l: branch and link
+      if (instr->HasLink()) {
+        Print("l");
+      }
+      return 1;
+    }
+    case 'm': {
+      if (format[1] == 'e') {  // 'memop: load/store instructions
+        ASSERT(STRING_STARTS_WITH(format, "memop"));
+        if (instr->HasL()) {
+          Print("ldr");
+        } else {
+          Print("str");
+        }
+        return 5;
+      }
+      // 'msg: for simulator break instructions
+      ASSERT(STRING_STARTS_WITH(format, "msg"));
+      byte* str =
+          reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
+      out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                           "%s", converter_.NameInCode(str));
+      return 3;
+    }
+    case 'o': {
+      if (format[3] == '1') {
+        // 'off12: 12-bit offset for load and store instructions
+        ASSERT(STRING_STARTS_WITH(format, "off12"));
+        out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                             "%d", instr->Offset12Field());
+        return 5;
+      }
+      // 'off8: 8-bit offset for extra load and store instructions
+      ASSERT(STRING_STARTS_WITH(format, "off8"));
+      int offs8 = (instr->ImmedHField() << 4) | instr->ImmedLField();
+      out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                           "%d", offs8);
+      return 4;
+    }
+    case 'p': {  // 'pu: P and U bits for load and store instructions
+      ASSERT(STRING_STARTS_WITH(format, "pu"));
+      PrintPU(instr);
+      return 2;
+    }
+    case 'r': {
+      return FormatRegister(instr, format);
+    }
+    case 's': {
+      if (format[1] == 'h') {  // 'shift_op or 'shift_rm
+        if (format[6] == 'o') {  // 'shift_op
+          ASSERT(STRING_STARTS_WITH(format, "shift_op"));
+          if (instr->TypeField() == 0) {
+            PrintShiftRm(instr);
+          } else {
+            ASSERT(instr->TypeField() == 1);
+            PrintShiftImm(instr);
+          }
+          return 8;
+        } else {  // 'shift_rm
+          ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
+          PrintShiftRm(instr);
+          return 8;
+        }
+      } else if (format[1] == 'w') {  // 'swi
+        ASSERT(STRING_STARTS_WITH(format, "swi"));
+        PrintSoftwareInterrupt(instr->SwiField());
+        return 3;
+      } else if (format[1] == 'i') {  // 'sign: signed extra loads and stores
+        ASSERT(STRING_STARTS_WITH(format, "sign"));
+        if (instr->HasSign()) {
+          Print("s");
+        }
+        return 4;
+      }
+      // 's: S field of data processing instructions
+      if (instr->HasS()) {
+        Print("s");
+      }
+      return 1;
+    }
+    case 't': {  // 'target: target of branch instructions
+      ASSERT(STRING_STARTS_WITH(format, "target"));
+      int off = (instr->SImmed24Field() << 2) + 8;
+      out_buffer_pos_ += v8i::OS::SNPrintF(
+          out_buffer_ + out_buffer_pos_,
+          "%+d -> %s",
+          off,
+          converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+      return 6;
+    }
+    case 'u': {  // 'u: signed or unsigned multiplies
+      // The manual gets the meaning of bit 22 backwards in the multiply
+      // instruction overview on page A3.16.2.  The instructions that
+      // exist in u and s variants are the following:
+      // smull A4.1.87
+      // umull A4.1.129
+      // umlal A4.1.128
+      // smlal A4.1.76
+      // For these 0 means u and 1 means s.  As can be seen on their individual
+      // pages.  The other 18 mul instructions have the bit set or unset in
+      // arbitrary ways that are unrelated to the signedness of the instruction.
+      // None of these 18 instructions exist in both a 'u' and an 's' variant.
+
+      if (instr->Bit(22) == 0) {
+        Print("u");
+      } else {
+        Print("s");
+      }
+      return 1;
+    }
+    case 'w': {  // 'w: W field of load and store instructions
+      if (instr->HasW()) {
+        Print("!");
+      }
+      return 1;
+    }
+    default: {
+      UNREACHABLE();
+      break;
+    }
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instr* instr, const char* format) {
+  char cur = *format++;
+  while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    if (cur == '\'') {  // Single quote is used as the formatting escape.
+      format += FormatOption(instr, format);
+    } else {
+      out_buffer_[out_buffer_pos_++] = cur;
+    }
+    cur = *format++;
+  }
+  out_buffer_[out_buffer_pos_]  = '\0';
+}
+
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instr* instr) {
+  Format(instr, "unknown");
+}
+
+
+void Decoder::DecodeType01(Instr* instr) {
+  int type = instr->TypeField();
+  if ((type == 0) && instr->IsSpecialType0()) {
+    // multiply instruction or extra loads and stores
+    if (instr->Bits(7, 4) == 9) {
+      if (instr->Bit(24) == 0) {
+        // multiply instructions
+        if (instr->Bit(23) == 0) {
+          if (instr->Bit(21) == 0) {
+            // The MUL instruction description (A 4.1.33) refers to Rd as being
+            // the destination for the operation, but it confusingly uses the
+            // Rn field to encode it.
+            Format(instr, "mul'cond's 'rn, 'rm, 'rs");
+          } else {
+            // The MLA instruction description (A 4.1.28) refers to the order
+            // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+            // Rn field to encode the Rd register and the Rd field to encode
+            // the Rn register.
+            Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+          }
+        } else {
+          // The signed/long multiply instructions use the terms RdHi and RdLo
+          // when referring to the target registers. They are mapped to the Rn
+          // and Rd fields as follows:
+          // RdLo == Rd field
+          // RdHi == Rn field
+          // The order of registers is: <RdLo>, <RdHi>, <Rm>, <Rs>
+          Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs");
+        }
+      } else {
+        Unknown(instr);  // not used by V8
+      }
+    } else {
+      // extra load/store instructions
+      switch (instr->PUField()) {
+        case 0: {
+          if (instr->Bit(22) == 0) {
+            Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
+          } else {
+            Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
+          }
+          break;
+        }
+        case 1: {
+          if (instr->Bit(22) == 0) {
+            Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
+          } else {
+            Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
+          }
+          break;
+        }
+        case 2: {
+          if (instr->Bit(22) == 0) {
+            Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
+          } else {
+            Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
+          }
+          break;
+        }
+        case 3: {
+          if (instr->Bit(22) == 0) {
+            Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
+          } else {
+            Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
+          }
+          break;
+        }
+        default: {
+          // The PU field is a 2-bit field.
+          UNREACHABLE();
+          break;
+        }
+      }
+      return;
+    }
+  } else {
+    switch (instr->OpcodeField()) {
+      case AND: {
+        Format(instr, "and'cond's 'rd, 'rn, 'shift_op");
+        break;
+      }
+      case EOR: {
+        Format(instr, "eor'cond's 'rd, 'rn, 'shift_op");
+        break;
+      }
+      case SUB: {
+        Format(instr, "sub'cond's 'rd, 'rn, 'shift_op");
+        break;
+      }
+      case RSB: {
+        Format(instr, "rsb'cond's 'rd, 'rn, 'shift_op");
+        break;
+      }
+      case ADD: {
+        Format(instr, "add'cond's 'rd, 'rn, 'shift_op");
+        break;
+      }
+      case ADC: {
+        Format(instr, "adc'cond's 'rd, 'rn, 'shift_op");
+        break;
+      }
+      case SBC: {
+        Format(instr, "sbc'cond's 'rd, 'rn, 'shift_op");
+        break;
+      }
+      case RSC: {
+        Format(instr, "rsc'cond's 'rd, 'rn, 'shift_op");
+        break;
+      }
+      case TST: {
+        if (instr->HasS()) {
+          Format(instr, "tst'cond 'rn, 'shift_op");
+        } else {
+          Unknown(instr);  // not used by V8
+        }
+        break;
+      }
+      case TEQ: {
+        if (instr->HasS()) {
+          Format(instr, "teq'cond 'rn, 'shift_op");
+        } else {
+          switch (instr->Bits(7, 4)) {
+            case BX:
+              Format(instr, "bx'cond 'rm");
+              break;
+            case BLX:
+              Format(instr, "blx'cond 'rm");
+              break;
+            default:
+              Unknown(instr);  // not used by V8
+              break;
+          }
+        }
+        break;
+      }
+      case CMP: {
+        if (instr->HasS()) {
+          Format(instr, "cmp'cond 'rn, 'shift_op");
+        } else {
+          Unknown(instr);  // not used by V8
+        }
+        break;
+      }
+      case CMN: {
+        if (instr->HasS()) {
+          Format(instr, "cmn'cond 'rn, 'shift_op");
+        } else {
+          switch (instr->Bits(7, 4)) {
+            case CLZ:
+              Format(instr, "clz'cond 'rd, 'rm");
+              break;
+            default:
+              Unknown(instr);  // not used by V8
+              break;
+          }
+        }
+        break;
+      }
+      case ORR: {
+        Format(instr, "orr'cond's 'rd, 'rn, 'shift_op");
+        break;
+      }
+      case MOV: {
+        Format(instr, "mov'cond's 'rd, 'shift_op");
+        break;
+      }
+      case BIC: {
+        Format(instr, "bic'cond's 'rd, 'rn, 'shift_op");
+        break;
+      }
+      case MVN: {
+        Format(instr, "mvn'cond's 'rd, 'shift_op");
+        break;
+      }
+      default: {
+        // The Opcode field is a 4-bit field.
+        UNREACHABLE();
+        break;
+      }
+    }
+  }
+}
+
+
+void Decoder::DecodeType2(Instr* instr) {
+  switch (instr->PUField()) {
+    case 0: {
+      if (instr->HasW()) {
+        Unknown(instr);  // not used in V8
+      }
+      Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
+      break;
+    }
+    case 1: {
+      if (instr->HasW()) {
+        Unknown(instr);  // not used in V8
+      }
+      Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
+      break;
+    }
+    case 2: {
+      Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
+      break;
+    }
+    case 3: {
+      Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
+      break;
+    }
+    default: {
+      // The PU field is a 2-bit field.
+      UNREACHABLE();
+      break;
+    }
+  }
+}
+
+
+void Decoder::DecodeType3(Instr* instr) {
+  switch (instr->PUField()) {
+    case 0: {
+      ASSERT(!instr->HasW());
+      Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
+      break;
+    }
+    case 1: {
+      ASSERT(!instr->HasW());
+      Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+      break;
+    }
+    case 2: {
+      Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
+      break;
+    }
+    case 3: {
+      Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+      break;
+    }
+    default: {
+      // The PU field is a 2-bit field.
+      UNREACHABLE();
+      break;
+    }
+  }
+}
+
+
+void Decoder::DecodeType4(Instr* instr) {
+  ASSERT(instr->Bit(22) == 0);  // Privileged mode currently not supported.
+  if (instr->HasL()) {
+    Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
+  } else {
+    Format(instr, "stm'cond'pu 'rn'w, 'rlist");
+  }
+}
+
+
+void Decoder::DecodeType5(Instr* instr) {
+  Format(instr, "b'l'cond 'target");
+}
+
+
+void Decoder::DecodeType6(Instr* instr) {
+  // Coprocessor instructions currently not supported.
+  Unknown(instr);
+}
+
+
+void Decoder::DecodeType7(Instr* instr) {
+  if (instr->Bit(24) == 1) {
+    Format(instr, "swi'cond 'swi");
+  } else {
+    // Coprocessor instructions currently not supported.
+    Unknown(instr);
+  }
+}
+
+
+void Decoder::DecodeUnconditional(Instr* instr) {
+  if (instr->Bits(7, 4) == 0xB && instr->Bits(27, 25) == 0 && instr->HasL()) {
+    Format(instr, "'memop'h'pu 'rd, ");
+    bool immediate = instr->HasB();
+    switch (instr->PUField()) {
+      case 0: {
+        // Post index, negative.
+        if (instr->HasW()) {
+          Unknown(instr);
+          break;
+        }
+        if (immediate) {
+          Format(instr, "['rn], #-'imm12");
+        } else {
+          Format(instr, "['rn], -'rm");
+        }
+        break;
+      }
+      case 1: {
+        // Post index, positive.
+        if (instr->HasW()) {
+          Unknown(instr);
+          break;
+        }
+        if (immediate) {
+          Format(instr, "['rn], #+'imm12");
+        } else {
+          Format(instr, "['rn], +'rm");
+        }
+        break;
+      }
+      case 2: {
+        // Pre index or offset, negative.
+        if (immediate) {
+          Format(instr, "['rn, #-'imm12]'w");
+        } else {
+          Format(instr, "['rn, -'rm]'w");
+        }
+        break;
+      }
+      case 3: {
+        // Pre index or offset, positive.
+        if (immediate) {
+          Format(instr, "['rn, #+'imm12]'w");
+        } else {
+          Format(instr, "['rn, +'rm]'w");
+        }
+        break;
+      }
+      default: {
+        // The PU field is a 2-bit field.
+        UNREACHABLE();
+        break;
+      }
+    }
+    return;
+  }
+  Format(instr, "break 'msg");
+}
+
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+  Instr* instr = Instr::At(instr_ptr);
+  // Print raw instruction bytes.
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "%08x       ",
+                                       instr->InstructionBits());
+  if (instr->ConditionField() == special_condition) {
+    DecodeUnconditional(instr);
+    return Instr::kInstrSize;
+  }
+  switch (instr->TypeField()) {
+    case 0:
+    case 1: {
+      DecodeType01(instr);
+      break;
+    }
+    case 2: {
+      DecodeType2(instr);
+      break;
+    }
+    case 3: {
+      DecodeType3(instr);
+      break;
+    }
+    case 4: {
+      DecodeType4(instr);
+      break;
+    }
+    case 5: {
+      DecodeType5(instr);
+      break;
+    }
+    case 6: {
+      DecodeType6(instr);
+      break;
+    }
+    case 7: {
+      DecodeType7(instr);
+      break;
+    }
+    default: {
+      // The type field is 3-bits in the ARM encoding.
+      UNREACHABLE();
+      break;
+    }
+  }
+  return Instr::kInstrSize;
+}
+
+
+} }  // namespace assembler::arm
+
+
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+namespace v8i = v8::internal;
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+  static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
+  v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
+  return tmp_buffer.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+  return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+  return assembler::arm::Registers::Name(reg);
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+  UNREACHABLE();  // ARM does not have the concept of a byte register
+  return "nobytereg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+  UNREACHABLE();  // ARM does not have any XMM registers
+  return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+  // The default name converter is called for unknown code. So we will not try
+  // to access any memory.
+  return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+    : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+                                    byte* instruction) {
+  assembler::arm::Decoder d(converter_, buffer);
+  return d.InstructionDecode(instruction);
+}
+
+
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
+  int instruction_bits = *(reinterpret_cast<int*>(instruction));
+  if ((instruction_bits & 0xfff00000) == 0x03000000) {
+    return instruction_bits & 0x0000ffff;
+  } else {
+    return -1;
+  }
+}
+
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+  NameConverter converter;
+  Disassembler d(converter);
+  for (byte* pc = begin; pc < end;) {
+    v8::internal::EmbeddedVector<char, 128> buffer;
+    buffer[0] = '\0';
+    byte* prev_pc = pc;
+    pc += d.InstructionDecode(buffer, pc);
+    fprintf(f, "%p    %08x      %s\n",
+            prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+  }
+}
+
+
+}  // namespace disasm
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
new file mode 100644
index 0000000..6fde4b7
--- /dev/null
+++ b/src/arm/frames-arm.cc
@@ -0,0 +1,118 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "frames-inl.h"
+#include "arm/assembler-arm-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+StackFrame::Type StackFrame::ComputeType(State* state) {
+  ASSERT(state->fp != NULL);
+  if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+    return ARGUMENTS_ADAPTOR;
+  }
+  // The marker and function offsets overlap. If the marker isn't a
+  // smi then the frame is a JavaScript frame -- and the marker is
+  // really the function.
+  const int offset = StandardFrameConstants::kMarkerOffset;
+  Object* marker = Memory::Object_at(state->fp + offset);
+  if (!marker->IsSmi()) return JAVA_SCRIPT;
+  return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+  if (fp == 0) return NONE;
+  // Compute frame type and stack pointer.
+  Address sp = fp + ExitFrameConstants::kSPDisplacement;
+  Type type;
+  if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
+    type = EXIT_DEBUG;
+    sp -= kNumJSCallerSaved * kPointerSize;
+  } else {
+    type = EXIT;
+  }
+  // Fill in the state.
+  state->sp = sp;
+  state->fp = fp;
+  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+  return type;
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+  // Do nothing
+}
+
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+  return ComputeParametersCount();
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+  int arguments;
+  if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
+    // The arguments for cooked frames are traversed as if they were
+    // expression stack elements of the calling frame. The reason for
+    // this rather strange decision is that we cannot access the
+    // function during mark-compact GCs when the stack is cooked.
+    // In fact accessing heap objects (like function->shared() below)
+    // at all during GC is problematic.
+    arguments = 0;
+  } else {
+    // Compute the number of arguments by getting the number of formal
+    // parameters of the function. We must remember to take the
+    // receiver into account (+1).
+    JSFunction* function = JSFunction::cast(this->function());
+    arguments = function->shared()->formal_parameter_count() + 1;
+  }
+  const int offset = StandardFrameConstants::kCallerSPOffset;
+  return fp() + offset + (arguments * kPointerSize);
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+  const int arguments = Smi::cast(GetExpression(0))->value();
+  const int offset = StandardFrameConstants::kCallerSPOffset;
+  return fp() + offset + (arguments + 1) * kPointerSize;
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+  // Internal frames have no arguments. The stack pointer of the
+  // caller is at a fixed offset from the frame pointer.
+  return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
new file mode 100644
index 0000000..0874c09
--- /dev/null
+++ b/src/arm/frames-arm.h
@@ -0,0 +1,162 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_FRAMES_ARM_H_
+#define V8_ARM_FRAMES_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+
+// The ARM ABI does not specify the usage of register r9, which may be reserved
+// as the static base or thread register on some platforms, in which case we
+// leave it alone. Adjust the value of kR9Available accordingly:
+static const int kR9Available = 1;  // 1 if available to us, 0 if reserved
+
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+static const int kNumRegs = 16;
+
+
+// Caller-saved/arguments registers
+static const RegList kJSCallerSaved =
+  1 << 0 |  // r0 a1
+  1 << 1 |  // r1 a2
+  1 << 2 |  // r2 a3
+  1 << 3;   // r3 a4
+
+static const int kNumJSCallerSaved = 4;
+
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0
+int JSCallerSavedCode(int n);
+
+
+// Callee-saved registers preserved when switching from C to JavaScript
+static const RegList kCalleeSaved =
+  1 <<  4 |  //  r4 v1
+  1 <<  5 |  //  r5 v2
+  1 <<  6 |  //  r6 v3
+  1 <<  7 |  //  r7 v4
+  1 <<  8 |  //  r8 v5 (cp in JavaScript code)
+  kR9Available
+    <<  9 |  //  r9 v6
+  1 << 10 |  // r10 v7
+  1 << 11;   // r11 v8 (fp in JavaScript code)
+
+static const int kNumCalleeSaved = 7 + kR9Available;
+
+
+// ----------------------------------------------------
+
+
+class StackHandlerConstants : public AllStatic {
+ public:
+  static const int kNextOffset  = 0 * kPointerSize;
+  static const int kStateOffset = 1 * kPointerSize;
+  static const int kFPOffset    = 2 * kPointerSize;
+  static const int kPCOffset    = 3 * kPointerSize;
+
+  static const int kSize = kPCOffset + kPointerSize;
+};
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+  static const int kCallerFPOffset      = -3 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+  // Exit frames have a debug marker on the stack.
+  static const int kSPDisplacement = -1 * kPointerSize;
+
+  // The debug marker is just above the frame pointer.
+  static const int kDebugMarkOffset = -1 * kPointerSize;
+
+  static const int kSavedRegistersOffset = 0 * kPointerSize;
+
+  // The caller fields are below the frame pointer on the stack.
+  static const int kCallerFPOffset = +0 * kPointerSize;
+  // The calling JS function is between FP and PC.
+  static const int kCallerPCOffset = +2 * kPointerSize;
+
+  // FP-relative displacement of the caller's SP.  It points just
+  // below the saved PC.
+  static const int kCallerSPDisplacement = +3 * kPointerSize;
+};
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+  static const int kExpressionsOffset = -3 * kPointerSize;
+  static const int kMarkerOffset      = -2 * kPointerSize;
+  static const int kContextOffset     = -1 * kPointerSize;
+  static const int kCallerFPOffset    =  0 * kPointerSize;
+  static const int kCallerPCOffset    = +1 * kPointerSize;
+  static const int kCallerSPOffset    = +2 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+  // Caller SP-relative.
+  static const int kParam0Offset   = -2 * kPointerSize;
+  static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+  static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+  const int offset = JavaScriptFrameConstants::kFunctionOffset;
+  return Memory::Object_at(fp() + offset);
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_FRAMES_ARM_H_
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
new file mode 100644
index 0000000..d230b45
--- /dev/null
+++ b/src/arm/ic-arm.cc
@@ -0,0 +1,824 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+                                   Label* miss,
+                                   Register t0,
+                                   Register t1) {
+  // Register use:
+  //
+  // t0 - used to hold the property dictionary.
+  //
+  // t1 - initially the receiver
+  //    - used for the index into the property dictionary
+  //    - holds the result on exit.
+  //
+  // r3 - used as temporary and to hold the capacity of the property
+  //      dictionary.
+  //
+  // r2 - holds the name of the property and is unchanges.
+
+  Label done;
+
+  // Check for the absence of an interceptor.
+  // Load the map into t0.
+  __ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset));
+  // Test the has_named_interceptor bit in the map.
+  __ ldr(r3, FieldMemOperand(t0, Map::kInstanceAttributesOffset));
+  __ tst(r3, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+  // Jump to miss if the interceptor bit is set.
+  __ b(ne, miss);
+
+  // Bail out if we have a JS global proxy object.
+  __ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+  __ cmp(r3, Operand(JS_GLOBAL_PROXY_TYPE));
+  __ b(eq, miss);
+
+  // Possible work-around for http://crbug.com/16276.
+  // See also: http://codereview.chromium.org/155418.
+  __ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ b(eq, miss);
+  __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ b(eq, miss);
+
+  // Check that the properties array is a dictionary.
+  __ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
+  __ ldr(r3, FieldMemOperand(t0, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+  __ cmp(r3, ip);
+  __ b(ne, miss);
+
+  // Compute the capacity mask.
+  const int kCapacityOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+  __ ldr(r3, FieldMemOperand(t0, kCapacityOffset));
+  __ mov(r3, Operand(r3, ASR, kSmiTagSize));  // convert smi to int
+  __ sub(r3, r3, Operand(1));
+
+  const int kElementsStartOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  static const int kProbes = 4;
+  for (int i = 0; i < kProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
+    __ mov(t1, Operand(t1, LSR, String::kHashShift));
+    if (i > 0) {
+      __ add(t1, t1, Operand(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(t1, t1, Operand(r3));
+
+    // Scale the index by multiplying by the element size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ add(t1, t1, Operand(t1, LSL, 1));  // t1 = t1 * 3
+
+    // Check if the key is identical to the name.
+    __ add(t1, t0, Operand(t1, LSL, 2));
+    __ ldr(ip, FieldMemOperand(t1, kElementsStartOffset));
+    __ cmp(r2, Operand(ip));
+    if (i != kProbes - 1) {
+      __ b(eq, &done);
+    } else {
+      __ b(ne, miss);
+    }
+  }
+
+  // Check that the value is a normal property.
+  __ bind(&done);  // t1 == t0 + 4*index
+  __ ldr(r3, FieldMemOperand(t1, kElementsStartOffset + 2 * kPointerSize));
+  __ tst(r3, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+  __ b(ne, miss);
+
+  // Get the value at the masked, scaled index and return.
+  __ ldr(t1, FieldMemOperand(t1, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used to check that a value is either not an object
+// or is loaded if it is an object.
+static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm,
+                                           Label* miss,
+                                           Register value,
+                                           Register scratch) {
+  Label done;
+  // Check if the value is a Smi.
+  __ tst(value, Operand(kSmiTagMask));
+  __ b(eq, &done);
+  // Check if the object has been loaded.
+  __ ldr(scratch, FieldMemOperand(value, JSObject::kMapOffset));
+  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
+  __ tst(scratch, Operand(1 << Map::kNeedsLoading));
+  __ b(ne, miss);
+  __ bind(&done);
+}
+
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+
+  Label miss;
+
+  __ ldr(r0, MemOperand(sp, 0));
+
+  StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ ldr(r0, MemOperand(sp, 0));
+
+  StubCompiler::GenerateLoadStringLength2(masm, r0, r1, r3, &miss);
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+
+  Label miss;
+
+  // Load receiver.
+  __ ldr(r0, MemOperand(sp, 0));
+
+  StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  //  -- lr: return address
+  // -----------------------------------
+  Label number, non_number, non_string, boolean, probe, miss;
+
+  // Get the receiver of the function from the stack into r1.
+  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+  // Get the name of the function from the stack; 1 ~ receiver.
+  __ ldr(r2, MemOperand(sp, (argc + 1) * kPointerSize));
+
+  // Probe the stub cache.
+  Code::Flags flags =
+      Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+  StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
+
+  // If the stub cache probing failed, the receiver might be a value.
+  // For value objects, we use the map of the prototype objects for
+  // the corresponding JSValue for the cache and that is what we need
+  // to probe.
+  //
+  // Check for number.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &number);
+  __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
+  __ b(ne, &non_number);
+  __ bind(&number);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::NUMBER_FUNCTION_INDEX, r1);
+  __ b(&probe);
+
+  // Check for string.
+  __ bind(&non_number);
+  __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
+  __ b(hs, &non_string);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::STRING_FUNCTION_INDEX, r1);
+  __ b(&probe);
+
+  // Check for boolean.
+  __ bind(&non_string);
+  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+  __ cmp(r1, ip);
+  __ b(eq, &boolean);
+  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+  __ cmp(r1, ip);
+  __ b(ne, &miss);
+  __ bind(&boolean);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
+
+  // Probe the stub cache for the value object.
+  __ bind(&probe);
+  StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+static void GenerateNormalHelper(MacroAssembler* masm,
+                                 int argc,
+                                 bool is_global_object,
+                                 Label* miss) {
+  // Search dictionary - put result in register r1.
+  GenerateDictionaryLoad(masm, miss, r0, r1);
+
+  // Check that the value isn't a smi.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, miss);
+
+  // Check that the value is a JSFunction.
+  __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
+  __ b(ne, miss);
+
+  // Check that the function has been loaded.
+  __ ldr(r0, FieldMemOperand(r1, JSObject::kMapOffset));
+  __ ldrb(r0, FieldMemOperand(r0, Map::kBitField2Offset));
+  __ tst(r0, Operand(1 << Map::kNeedsLoading));
+  __ b(ne, miss);
+
+  // Patch the receiver with the global proxy if necessary.
+  if (is_global_object) {
+    __ ldr(r2, MemOperand(sp, argc * kPointerSize));
+    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+    __ str(r2, MemOperand(sp, argc * kPointerSize));
+  }
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  //  -- lr: return address
+  // -----------------------------------
+
+  Label miss, global_object, non_global_object;
+
+  // Get the receiver of the function from the stack into r1.
+  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+  // Get the name of the function from the stack; 1 ~ receiver.
+  __ ldr(r2, MemOperand(sp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Check that the receiver is a valid JS object.  Put the map in r3.
+  __ CompareObjectType(r1, r3, r0, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, &miss);
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  // Check for access to global object.
+  __ cmp(r0, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ b(eq, &global_object);
+  __ cmp(r0, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ b(ne, &non_global_object);
+
+  // Accessing global object: Load and invoke.
+  __ bind(&global_object);
+  // Check that the global object does not require access checks.
+  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
+  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ b(ne, &miss);
+  GenerateNormalHelper(masm, argc, true, &miss);
+
+  // Accessing non-global object: Check for access to global proxy.
+  Label global_proxy, invoke;
+  __ bind(&non_global_object);
+  __ cmp(r0, Operand(JS_GLOBAL_PROXY_TYPE));
+  __ b(eq, &global_proxy);
+  // Check that the non-global, non-global-proxy object does not
+  // require access checks.
+  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
+  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ b(ne, &miss);
+  __ bind(&invoke);
+  GenerateNormalHelper(masm, argc, false, &miss);
+
+  // Global object access: Check access rights.
+  __ bind(&global_proxy);
+  __ CheckAccessGlobalProxy(r1, r0, &miss);
+  __ b(&invoke);
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::Generate(MacroAssembler* masm,
+                      int argc,
+                      const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  //  -- lr: return address
+  // -----------------------------------
+
+  // Get the receiver of the function from the stack.
+  __ ldr(r2, MemOperand(sp, argc * kPointerSize));
+  // Get the name of the function to call from the stack.
+  __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
+
+  __ EnterInternalFrame();
+
+  // Push the receiver and the name of the function.
+  __ stm(db_w, sp, r1.bit() | r2.bit());
+
+  // Call the entry.
+  __ mov(r0, Operand(2));
+  __ mov(r1, Operand(f));
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+
+  // Move result to r1 and leave the internal frame.
+  __ mov(r1, Operand(r0));
+  __ LeaveInternalFrame();
+
+  // Check if the receiver is a global object of some sort.
+  Label invoke, global;
+  __ ldr(r2, MemOperand(sp, argc * kPointerSize));  // receiver
+  __ tst(r2, Operand(kSmiTagMask));
+  __ b(eq, &invoke);
+  __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
+  __ b(eq, &global);
+  __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ b(ne, &invoke);
+
+  // Patch the receiver on the stack.
+  __ bind(&global);
+  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+  __ str(r2, MemOperand(sp, argc * kPointerSize));
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ bind(&invoke);
+  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+}
+
+
+// Defined in ic.cc.
+Object* LoadIC_Miss(Arguments args);
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+
+  __ ldr(r0, MemOperand(sp, 0));
+  // Probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
+  StubCache::GenerateProbe(masm, flags, r0, r2, r3, no_reg);
+
+  // Cache miss: Jump to runtime.
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+
+  Label miss, probe, global;
+
+  __ ldr(r0, MemOperand(sp, 0));
+  // Check that the receiver isn't a smi.
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Check that the receiver is a valid JS object.  Put the map in r3.
+  __ CompareObjectType(r0, r3, r1, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, &miss);
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  // Check for access to global object (unlikely).
+  __ cmp(r1, Operand(JS_GLOBAL_PROXY_TYPE));
+  __ b(eq, &global);
+
+  // Check for non-global object that requires access check.
+  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
+  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ b(ne, &miss);
+
+  __ bind(&probe);
+  GenerateDictionaryLoad(masm, &miss, r1, r0);
+  GenerateCheckNonObjectOrLoaded(masm, &miss, r0, r1);
+  __ Ret();
+
+  // Global object access: Check access rights.
+  __ bind(&global);
+  __ CheckAccessGlobalProxy(r0, r1, &miss);
+  __ b(&probe);
+
+  // Cache miss: Restore receiver from stack and jump to runtime.
+  __ bind(&miss);
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+
+  __ ldr(r3, MemOperand(sp, 0));
+  __ stm(db_w, sp, r2.bit() | r3.bit());
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(f, 2, 1);
+}
+
+
+// TODO(181): Implement map patching once loop nesting is tracked on the
+// ARM platform so we can generate inlined fast-case code loads in
+// loops.
+void LoadIC::ClearInlinedVersion(Address address) {}
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  return false;
+}
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {}
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  return false;
+}
+
+void KeyedStoreIC::ClearInlinedVersion(Address address) {}
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  return false;
+}
+
+
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[4]  : receiver
+  __ ldm(ia, sp, r2.bit() | r3.bit());
+  __ stm(db_w, sp, r2.bit() | r3.bit());
+
+  __ TailCallRuntime(f, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[4]  : receiver
+  Label slow, fast;
+
+  // Get the key and receiver object from the stack.
+  __ ldm(ia, sp, r0.bit() | r1.bit());
+  // Check that the key is a smi.
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(ne, &slow);
+  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+  // Check that the object isn't a smi.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &slow);
+
+  // Get the map of the receiver.
+  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to check this explicitly since this generic stub does not perform
+  // map checks.
+  __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ b(ne, &slow);
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing into string
+  // objects work as intended.
+  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+  __ cmp(r2, Operand(JS_OBJECT_TYPE));
+  __ b(lt, &slow);
+
+  // Get the elements array of the object.
+  __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+  // Check that the object is in fast mode (not dictionary).
+  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+  __ cmp(r3, ip);
+  __ b(ne, &slow);
+  // Check that the key (index) is within bounds.
+  __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
+  __ cmp(r0, Operand(r3));
+  __ b(lo, &fast);
+
+  // Slow case: Push extra copies of the arguments (2).
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
+  __ ldm(ia, sp, r0.bit() | r1.bit());
+  __ stm(db_w, sp, r0.bit() | r1.bit());
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1);
+
+  // Fast case: Do the load.
+  __ bind(&fast);
+  __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(r0, ip);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ b(eq, &slow);
+
+  __ Ret();
+}
+
+
+void KeyedStoreIC::Generate(MacroAssembler* masm,
+                            const ExternalReference& f) {
+  // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[1]  : receiver
+
+  __ ldm(ia, sp, r2.bit() | r3.bit());
+  __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+
+  __ TailCallRuntime(f, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[1]  : receiver
+  Label slow, fast, array, extra, exit;
+  // Get the key and the object from the stack.
+  __ ldm(ia, sp, r1.bit() | r3.bit());  // r1 = key, r3 = receiver
+  // Check that the key is a smi.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(ne, &slow);
+  // Check that the object isn't a smi.
+  __ tst(r3, Operand(kSmiTagMask));
+  __ b(eq, &slow);
+  // Get the map of the object.
+  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ ldrb(ip, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ b(ne, &slow);
+  // Check if the object is a JS array or not.
+  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+  __ cmp(r2, Operand(JS_ARRAY_TYPE));
+  // r1 == key.
+  __ b(eq, &array);
+  // Check that the object is some kind of JS object.
+  __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+  __ b(lt, &slow);
+
+
+  // Object case: Check key against length in the elements array.
+  __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
+  // Check that the object is in fast mode (not dictionary).
+  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+  __ cmp(r2, ip);
+  __ b(ne, &slow);
+  // Untag the key (for checking against untagged length in the fixed array).
+  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+  // Compute address to store into and check array bounds.
+  __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
+  __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset));
+  __ cmp(r1, Operand(ip));
+  __ b(lo, &fast);
+
+
+  // Slow case: Push extra copies of the arguments (3).
+  __ bind(&slow);
+  __ ldm(ia, sp, r1.bit() | r3.bit());  // r0 == value, r1 == key, r3 == object
+  __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  // r0 == value, r1 == key, r2 == elements, r3 == object
+  __ bind(&extra);
+  __ b(ne, &slow);  // do not leave holes in the array
+  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // untag
+  __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset));
+  __ cmp(r1, Operand(ip));
+  __ b(hs, &slow);
+  __ mov(r1, Operand(r1, LSL, kSmiTagSize));  // restore tag
+  __ add(r1, r1, Operand(1 << kSmiTagSize));  // and increment
+  __ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset));
+  __ mov(r3, Operand(r2));
+  // NOTE: Computing the address to store into must take the fact
+  // that the key has been incremented into account.
+  int displacement = FixedArray::kHeaderSize - kHeapObjectTag -
+      ((1 << kSmiTagSize) * 2);
+  __ add(r2, r2, Operand(displacement));
+  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ b(&fast);
+
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode; if it is the
+  // length is always a smi.
+  // r0 == value, r3 == object
+  __ bind(&array);
+  __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
+  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+  __ cmp(r1, ip);
+  __ b(ne, &slow);
+
+  // Check the key against the length in the array, compute the
+  // address to store into and fall through to fast case.
+  __ ldr(r1, MemOperand(sp));  // restore key
+  // r0 == value, r1 == key, r2 == elements, r3 == object.
+  __ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset));
+  __ cmp(r1, Operand(ip));
+  __ b(hs, &extra);
+  __ mov(r3, Operand(r2));
+  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+
+  // Fast case: Do the store.
+  // r0 == value, r2 == address to store into, r3 == elements
+  __ bind(&fast);
+  __ str(r0, MemOperand(r2));
+  // Skip write barrier if the written value is a smi.
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &exit);
+  // Update write barrier for the elements array address.
+  __ sub(r1, r2, Operand(r3));
+  __ RecordWrite(r3, r1, r2);
+
+  __ bind(&exit);
+  __ Ret();
+}
+
+
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[1]  : receiver
+  // ----------- S t a t e -------------
+
+  __ ldm(ia, sp, r2.bit() | r3.bit());
+  __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(
+      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+
+  // Get the receiver from the stack and probe the stub cache.
+  __ ldr(r1, MemOperand(sp));
+  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
+  StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
+
+  // Cache miss: Jump to runtime.
+  Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+}
+
+
+void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+
+  __ ldr(r3, MemOperand(sp));  // copy receiver
+  __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(
+      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+}
+
+
+void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+
+  __ ldr(r3, MemOperand(sp));  // copy receiver
+  __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(f, 3, 1);
+}
+
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
new file mode 100644
index 0000000..3315f83
--- /dev/null
+++ b/src/arm/jump-target-arm.cc
@@ -0,0 +1,238 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ ACCESS_MASM(cgen()->masm())
+
+void JumpTarget::DoJump() {
+  ASSERT(cgen()->has_valid_frame());
+  // Live non-frame registers are not allowed at unconditional jumps
+  // because we have no way of invalidating the corresponding results
+  // which are still live in the C++ code.
+  ASSERT(cgen()->HasValidEntryRegisters());
+
+  if (is_bound()) {
+    // Backward jump.  There already a frame expectation at the target.
+    ASSERT(direction_ == BIDIRECTIONAL);
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
+  } else {
+    // Use the current frame as the expected one at the target if necessary.
+    if (entry_frame_ == NULL) {
+      entry_frame_ = cgen()->frame();
+      RegisterFile empty;
+      cgen()->SetFrame(NULL, &empty);
+    } else {
+      cgen()->frame()->MergeTo(entry_frame_);
+      cgen()->DeleteFrame();
+    }
+
+    // The predicate is_linked() should be made true.  Its implementation
+    // detects the presence of a frame pointer in the reaching_frames_ list.
+    if (!is_linked()) {
+      reaching_frames_.Add(NULL);
+      ASSERT(is_linked());
+    }
+  }
+  __ jmp(&entry_label_);
+}
+
+
+void JumpTarget::DoBranch(Condition cc, Hint ignored) {
+  ASSERT(cgen()->has_valid_frame());
+
+  if (is_bound()) {
+    ASSERT(direction_ == BIDIRECTIONAL);
+    // Backward branch.  We have an expected frame to merge to on the
+    // backward edge.
+    cgen()->frame()->MergeTo(entry_frame_);
+  } else {
+    // Clone the current frame to use as the expected one at the target if
+    // necessary.
+    if (entry_frame_ == NULL) {
+      entry_frame_ = new VirtualFrame(cgen()->frame());
+    }
+    // The predicate is_linked() should be made true.  Its implementation
+    // detects the presence of a frame pointer in the reaching_frames_ list.
+    if (!is_linked()) {
+      reaching_frames_.Add(NULL);
+      ASSERT(is_linked());
+    }
+  }
+  __ b(cc, &entry_label_);
+}
+
+
+void JumpTarget::Call() {
+  // Call is used to push the address of the catch block on the stack as
+  // a return address when compiling try/catch and try/finally.  We
+  // fully spill the frame before making the call.  The expected frame
+  // at the label (which should be the only one) is the spilled current
+  // frame plus an in-memory return address.  The "fall-through" frame
+  // at the return site is the spilled current frame.
+  ASSERT(cgen()->has_valid_frame());
+  // There are no non-frame references across the call.
+  ASSERT(cgen()->HasValidEntryRegisters());
+  ASSERT(!is_linked());
+
+  // Calls are always 'forward' so we use a copy of the current frame (plus
+  // one for a return address) as the expected frame.
+  ASSERT(entry_frame_ == NULL);
+  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
+  target_frame->Adjust(1);
+  entry_frame_ = target_frame;
+
+  // The predicate is_linked() should now be made true.  Its implementation
+  // detects the presence of a frame pointer in the reaching_frames_ list.
+  reaching_frames_.Add(NULL);
+  ASSERT(is_linked());
+
+  __ bl(&entry_label_);
+}
+
+
+void JumpTarget::DoBind() {
+  ASSERT(!is_bound());
+
+  // Live non-frame registers are not allowed at the start of a basic
+  // block.
+  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
+
+  if (cgen()->has_valid_frame()) {
+    // If there is a current frame we can use it on the fall through.
+    if (entry_frame_ == NULL) {
+      entry_frame_ = new VirtualFrame(cgen()->frame());
+    } else {
+      ASSERT(cgen()->frame()->Equals(entry_frame_));
+    }
+  } else {
+    // If there is no current frame we must have an entry frame which we can
+    // copy.
+    ASSERT(entry_frame_ != NULL);
+    RegisterFile empty;
+    cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+  }
+
+  // The predicate is_linked() should be made false.  Its implementation
+  // detects the presence (or absence) of frame pointers in the
+  // reaching_frames_ list.  If we inserted a bogus frame to make
+  // is_linked() true, remove it now.
+  if (is_linked()) {
+    reaching_frames_.Clear();
+  }
+
+  __ bind(&entry_label_);
+}
+
+
+void BreakTarget::Jump() {
+  // On ARM we do not currently emit merge code for jumps, so we need to do
+  // it explicitly here.  The only merging necessary is to drop extra
+  // statement state from the stack.
+  ASSERT(cgen()->has_valid_frame());
+  int count = cgen()->frame()->height() - expected_height_;
+  cgen()->frame()->Drop(count);
+  DoJump();
+}
+
+
+void BreakTarget::Jump(Result* arg) {
+  // On ARM we do not currently emit merge code for jumps, so we need to do
+  // it explicitly here.  The only merging necessary is to drop extra
+  // statement state from the stack.
+  ASSERT(cgen()->has_valid_frame());
+  int count = cgen()->frame()->height() - expected_height_;
+  cgen()->frame()->Drop(count);
+  cgen()->frame()->Push(arg);
+  DoJump();
+}
+
+
+void BreakTarget::Bind() {
+#ifdef DEBUG
+  // All the forward-reaching frames should have been adjusted at the
+  // jumps to this target.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i] == NULL ||
+           reaching_frames_[i]->height() == expected_height_);
+  }
+#endif
+  // Drop leftover statement state from the frame before merging, even
+  // on the fall through.  This is so we can bind the return target
+  // with state on the frame.
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    // On ARM we do not currently emit merge code at binding sites, so we need
+    // to do it explicitly here.  The only merging necessary is to drop extra
+    // statement state from the stack.
+    cgen()->frame()->Drop(count);
+  }
+
+  DoBind();
+}
+
+
+void BreakTarget::Bind(Result* arg) {
+#ifdef DEBUG
+  // All the forward-reaching frames should have been adjusted at the
+  // jumps to this target.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i] == NULL ||
+           reaching_frames_[i]->height() == expected_height_ + 1);
+  }
+#endif
+  // Drop leftover statement state from the frame before merging, even
+  // on the fall through.  This is so we can bind the return target
+  // with state on the frame.
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    // On ARM we do not currently emit merge code at binding sites, so we need
+    // to do it explicitly here.  The only merging necessary is to drop extra
+    // statement state from the stack.
+    cgen()->frame()->ForgetElements(count);
+    cgen()->frame()->Push(arg);
+  }
+  DoBind();
+  *arg = cgen()->frame()->Pop();
+}
+
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
new file mode 100644
index 0000000..cf46773
--- /dev/null
+++ b/src/arm/macro-assembler-arm.cc
@@ -0,0 +1,1189 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+    : Assembler(buffer, size),
+      unresolved_(0),
+      generating_stub_(false),
+      allow_stub_calls_(true),
+      code_object_(Heap::undefined_value()) {
+}
+
+
+// We always generate arm code, never thumb code, even if V8 is compiled to
+// thumb, so we require inter-working support
+#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
+#error "flag -mthumb-interwork missing"
+#endif
+
+
+// We do not support thumb inter-working with an arm architecture not supporting
+// the blx instruction (below v5t).  If you know what CPU you are compiling for
+// you can use -march=armv7 or similar.
+#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
+# error "For thumb inter-working we require an architecture which supports blx"
+#endif
+
+
+// Using blx may yield better code, so use it when required or when available
+#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
+#define USE_BLX 1
+#endif
+
+// Using bx does not yield better code, so use it only when required
+#if defined(USE_THUMB_INTERWORK)
+#define USE_BX 1
+#endif
+
+
+void MacroAssembler::Jump(Register target, Condition cond) {
+#if USE_BX
+  bx(target, cond);
+#else
+  mov(pc, Operand(target), LeaveCC, cond);
+#endif
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+                          Condition cond) {
+#if USE_BX
+  mov(ip, Operand(target, rmode), LeaveCC, cond);
+  bx(ip, cond);
+#else
+  mov(pc, Operand(target, rmode), LeaveCC, cond);
+#endif
+}
+
+
+void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
+                          Condition cond) {
+  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+                          Condition cond) {
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  // 'code' is always generated ARM code, never THUMB code
+  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Call(Register target, Condition cond) {
+#if USE_BLX
+  blx(target, cond);
+#else
+  // set lr for return at current pc + 8
+  mov(lr, Operand(pc), LeaveCC, cond);
+  mov(pc, Operand(target), LeaveCC, cond);
+#endif
+}
+
+
+void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
+                          Condition cond) {
+  // Set lr for return at current pc + 8.
+  mov(lr, Operand(pc), LeaveCC, cond);
+  // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
+  mov(pc, Operand(target, rmode), LeaveCC, cond);
+  // If USE_BLX is defined, we could emit a 'mov ip, target', followed by a
+  // 'blx ip'; however, the code would not be shorter than the above sequence
+  // and the target address of the call would be referenced by the first
+  // instruction rather than the second one, which would make it harder to patch
+  // (two instructions before the return address, instead of one).
+  ASSERT(kCallTargetAddressOffset == kInstrSize);
+}
+
+
+void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
+                          Condition cond) {
+  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  Call(reinterpret_cast<intptr_t>(target), rmode, cond);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+                          Condition cond) {
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  // 'code' is always generated ARM code, never THUMB code
+  Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Ret(Condition cond) {
+#if USE_BX
+  bx(lr, cond);
+#else
+  mov(pc, Operand(lr), LeaveCC, cond);
+#endif
+}
+
+
+void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
+  // Empty the const pool.
+  CheckConstPool(true, true);
+  add(pc, pc, Operand(index,
+                      LSL,
+                      assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
+  BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
+  nop();  // Jump table alignment.
+  for (int i = 0; i < targets.length(); i++) {
+    b(targets[i]);
+  }
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+                              Heap::RootListIndex index,
+                              Condition cond) {
+  ldr(destination, MemOperand(r10, index << kPointerSizeLog2), cond);
+}
+
+
+// Will clobber 4 registers: object, offset, scratch, ip.  The
+// register 'object' contains a heap object pointer.  The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object, Register offset,
+                                 Register scratch) {
+  // This is how much we shift the remembered set bit offset to get the
+  // offset of the word in the remembered set.  We divide by kBitsPerInt (32,
+  // shift right 5) and then multiply by kIntSize (4, shift left 2).
+  const int kRSetWordShift = 3;
+
+  Label fast, done;
+
+  // First, test that the object is not in the new space.  We cannot set
+  // remembered set bits in the new space.
+  // object: heap object pointer (with tag)
+  // offset: offset to store location from the object
+  and_(scratch, object, Operand(Heap::NewSpaceMask()));
+  cmp(scratch, Operand(ExternalReference::new_space_start()));
+  b(eq, &done);
+
+  // Compute the bit offset in the remembered set.
+  // object: heap object pointer (with tag)
+  // offset: offset to store location from the object
+  mov(ip, Operand(Page::kPageAlignmentMask));  // load mask only once
+  and_(scratch, object, Operand(ip));  // offset into page of the object
+  add(offset, scratch, Operand(offset));  // add offset into the object
+  mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
+
+  // Compute the page address from the heap object pointer.
+  // object: heap object pointer (with tag)
+  // offset: bit offset of store position in the remembered set
+  bic(object, object, Operand(ip));
+
+  // If the bit offset lies beyond the normal remembered set range, it is in
+  // the extra remembered set area of a large object.
+  // object: page start
+  // offset: bit offset of store position in the remembered set
+  cmp(offset, Operand(Page::kPageSize / kPointerSize));
+  b(lt, &fast);
+
+  // Adjust the bit offset to be relative to the start of the extra
+  // remembered set and the start address to be the address of the extra
+  // remembered set.
+  sub(offset, offset, Operand(Page::kPageSize / kPointerSize));
+  // Load the array length into 'scratch' and multiply by four to get the
+  // size in bytes of the elements.
+  ldr(scratch, MemOperand(object, Page::kObjectStartOffset
+                                  + FixedArray::kLengthOffset));
+  mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits));
+  // Add the page header (including remembered set), array header, and array
+  // body size to the page address.
+  add(object, object, Operand(Page::kObjectStartOffset
+                              + FixedArray::kHeaderSize));
+  add(object, object, Operand(scratch));
+
+  bind(&fast);
+  // Get address of the rset word.
+  // object: start of the remembered set (page start for the fast case)
+  // offset: bit offset of store position in the remembered set
+  bic(scratch, offset, Operand(kBitsPerInt - 1));  // clear the bit offset
+  add(object, object, Operand(scratch, LSR, kRSetWordShift));
+  // Get bit offset in the rset word.
+  // object: address of remembered set word
+  // offset: bit offset of store position
+  and_(offset, offset, Operand(kBitsPerInt - 1));
+
+  ldr(scratch, MemOperand(object));
+  mov(ip, Operand(1));
+  orr(scratch, scratch, Operand(ip, LSL, offset));
+  str(scratch, MemOperand(object));
+
+  bind(&done);
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+  // r0-r3: preserved
+  stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+  mov(ip, Operand(Smi::FromInt(type)));
+  push(ip);
+  mov(ip, Operand(CodeObject()));
+  push(ip);
+  add(fp, sp, Operand(3 * kPointerSize));  // Adjust FP to point to saved FP.
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+  // r0: preserved
+  // r1: preserved
+  // r2: preserved
+
+  // Drop the execution stack down to the frame pointer and restore
+  // the caller frame pointer and return address.
+  mov(sp, fp);
+  ldm(ia_w, sp, fp.bit() | lr.bit());
+}
+
+
+void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
+  ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
+
+  // Compute the argv pointer and keep it in a callee-saved register.
+  // r0 is argc.
+  add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
+  sub(r6, r6, Operand(kPointerSize));
+
+  // Compute callee's stack pointer before making changes and save it as
+  // ip register so that it is restored as sp register on exit, thereby
+  // popping the args.
+
+  // ip = sp + kPointerSize * #args;
+  add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+
+  // Align the stack at this point.  After this point we have 5 pushes,
+  // so in fact we have to unalign here!  See also the assert on the
+  // alignment in AlignStack.
+  AlignStack(1);
+
+  // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
+  stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
+  mov(fp, Operand(sp));  // setup new frame pointer
+
+  // Push debug marker.
+  mov(ip, Operand(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+  push(ip);
+
+  // Save the frame pointer and the context in top.
+  mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+  str(fp, MemOperand(ip));
+  mov(ip, Operand(ExternalReference(Top::k_context_address)));
+  str(cp, MemOperand(ip));
+
+  // Setup argc and the builtin function in callee-saved registers.
+  mov(r4, Operand(r0));
+  mov(r5, Operand(r1));
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Save the state of all registers to the stack from the memory
+  // location. This is needed to allow nested break points.
+  if (type == StackFrame::EXIT_DEBUG) {
+    // Use sp as base to push.
+    CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
+  }
+#endif
+}
+
+
+void MacroAssembler::AlignStack(int offset) {
+#if defined(V8_HOST_ARCH_ARM)
+  // Running on the real platform. Use the alignment as mandated by the local
+  // environment.
+  // Note: This will break if we ever start generating snapshots on one ARM
+  // platform for another ARM platform with a different alignment.
+  int activation_frame_alignment = OS::ActivationFrameAlignment();
+#else  // defined(V8_HOST_ARCH_ARM)
+  // If we are using the simulator then we should always align to the expected
+  // alignment. As the simulator is used to generate snapshots we do not know
+  // if the target platform will need alignment, so we will always align at
+  // this point here.
+  int activation_frame_alignment = 2 * kPointerSize;
+#endif  // defined(V8_HOST_ARCH_ARM)
+  if (activation_frame_alignment != kPointerSize) {
+    // This code needs to be made more general if this assert doesn't hold.
+    ASSERT(activation_frame_alignment == 2 * kPointerSize);
+    mov(r7, Operand(Smi::FromInt(0)));
+    tst(sp, Operand(activation_frame_alignment - offset));
+    push(r7, eq);  // Conditional push instruction.
+  }
+}
+
+
+void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Restore the memory copy of the registers by digging them out from
+  // the stack. This is needed to allow nested break points.
+  if (type == StackFrame::EXIT_DEBUG) {
+    // This code intentionally clobbers r2 and r3.
+    const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
+    const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+    add(r3, fp, Operand(kOffset));
+    CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
+  }
+#endif
+
+  // Clear top frame.
+  mov(r3, Operand(0));
+  mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+  str(r3, MemOperand(ip));
+
+  // Restore current context from top and clear it in debug mode.
+  mov(ip, Operand(ExternalReference(Top::k_context_address)));
+  ldr(cp, MemOperand(ip));
+#ifdef DEBUG
+  str(r3, MemOperand(ip));
+#endif
+
+  // Pop the arguments, restore registers, and return.
+  mov(sp, Operand(fp));  // respect ABI stack constraint
+  ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_reg,
+                                    Label* done,
+                                    InvokeFlag flag) {
+  bool definitely_matches = false;
+  Label regular_invoke;
+
+  // Check whether the expected and actual arguments count match. If not,
+  // setup registers according to contract with ArgumentsAdaptorTrampoline:
+  //  r0: actual arguments count
+  //  r1: function (passed through to callee)
+  //  r2: expected arguments count
+  //  r3: callee code entry
+
+  // The code below is made a lot easier because the calling code already sets
+  // up actual and expected registers according to the contract if values are
+  // passed in registers.
+  ASSERT(actual.is_immediate() || actual.reg().is(r0));
+  ASSERT(expected.is_immediate() || expected.reg().is(r2));
+  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
+
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      mov(r0, Operand(actual.immediate()));
+      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+      if (expected.immediate() == sentinel) {
+        // Don't worry about adapting arguments for builtins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        mov(r2, Operand(expected.immediate()));
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      cmp(expected.reg(), Operand(actual.immediate()));
+      b(eq, &regular_invoke);
+      mov(r0, Operand(actual.immediate()));
+    } else {
+      cmp(expected.reg(), Operand(actual.reg()));
+      b(eq, &regular_invoke);
+    }
+  }
+
+  if (!definitely_matches) {
+    if (!code_constant.is_null()) {
+      mov(r3, Operand(code_constant));
+      add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+    }
+
+    Handle<Code> adaptor =
+        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+    if (flag == CALL_FUNCTION) {
+      Call(adaptor, RelocInfo::CODE_TARGET);
+      b(done);
+    } else {
+      Jump(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&regular_invoke);
+  }
+}
+
+
+void MacroAssembler::InvokeCode(Register code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                InvokeFlag flag) {
+  Label done;
+
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    Call(code);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    Jump(code);
+  }
+
+  // Continue here if InvokePrologue does handle the invocation due to
+  // mismatched parameter counts.
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeCode(Handle<Code> code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                RelocInfo::Mode rmode,
+                                InvokeFlag flag) {
+  Label done;
+
+  InvokePrologue(expected, actual, code, no_reg, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    Call(code, rmode);
+  } else {
+    Jump(code, rmode);
+  }
+
+  // Continue here if InvokePrologue does handle the invocation due to
+  // mismatched parameter counts.
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register fun,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag) {
+  // Contract with called JS functions requires that function is passed in r1.
+  ASSERT(fun.is(r1));
+
+  Register expected_reg = r2;
+  Register code_reg = r3;
+
+  ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+  ldr(expected_reg,
+      FieldMemOperand(code_reg,
+                      SharedFunctionInfo::kFormalParameterCountOffset));
+  ldr(code_reg,
+      MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+  add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  ParameterCount expected(expected_reg);
+  InvokeCode(code_reg, expected, actual, flag);
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::SaveRegistersToMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of registers to memory location.
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      Register reg = { r };
+      mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+      str(reg, MemOperand(ip));
+    }
+  }
+}
+
+
+void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of memory location to registers.
+  for (int i = kNumJSCallerSaved; --i >= 0;) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      Register reg = { r };
+      mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+      ldr(reg, MemOperand(ip));
+    }
+  }
+}
+
+
+void MacroAssembler::CopyRegistersFromMemoryToStack(Register base,
+                                                    RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of the memory location to the stack and adjust base.
+  for (int i = kNumJSCallerSaved; --i >= 0;) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+      ldr(ip, MemOperand(ip));
+      str(ip, MemOperand(base, 4, NegPreIndex));
+    }
+  }
+}
+
+
+void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
+                                                    Register scratch,
+                                                    RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of the stack to the memory location and adjust base.
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+      ldr(scratch, MemOperand(base, 4, PostIndex));
+      str(scratch, MemOperand(ip));
+    }
+  }
+}
+#endif
+
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+                                    HandlerType type) {
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  // The pc (return address) is passed in register lr.
+  if (try_location == IN_JAVASCRIPT) {
+    if (type == TRY_CATCH_HANDLER) {
+      mov(r3, Operand(StackHandler::TRY_CATCH));
+    } else {
+      mov(r3, Operand(StackHandler::TRY_FINALLY));
+    }
+    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+           && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+    stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
+    // Save the current handler as the next handler.
+    mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+    ldr(r1, MemOperand(r3));
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    push(r1);
+    // Link this handler as the new current one.
+    str(sp, MemOperand(r3));
+  } else {
+    // Must preserve r0-r4, r5-r7 are available.
+    ASSERT(try_location == IN_JS_ENTRY);
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for fp. We expect the code throwing an exception to check fp
+    // before dereferencing it to restore the context.
+    mov(ip, Operand(0));  // To save a NULL frame pointer.
+    mov(r6, Operand(StackHandler::ENTRY));
+    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+           && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+    stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
+    // Save the current handler as the next handler.
+    mov(r7, Operand(ExternalReference(Top::k_handler_address)));
+    ldr(r6, MemOperand(r7));
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    push(r6);
+    // Link this handler as the new current one.
+    str(sp, MemOperand(r7));
+  }
+}
+
+
+Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
+                                   JSObject* holder, Register holder_reg,
+                                   Register scratch,
+                                   Label* miss) {
+  // Make sure there's no overlap between scratch and the other
+  // registers.
+  ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 1;
+
+  // Check the maps in the prototype chain.
+  // Traverse the prototype chain from the object and do map checks.
+  while (object != holder) {
+    depth++;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+    // Get the map of the current object.
+    ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+    cmp(scratch, Operand(Handle<Map>(object->map())));
+
+    // Branch on the result of the map check.
+    b(ne, miss);
+
+    // Check access rights to the global object.  This has to happen
+    // after the map check so that we know that the object is
+    // actually a global object.
+    if (object->IsJSGlobalProxy()) {
+      CheckAccessGlobalProxy(reg, scratch, miss);
+      // Restore scratch register to be the map of the object.  In the
+      // new space case below, we load the prototype from the map in
+      // the scratch register.
+      ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+    }
+
+    reg = holder_reg;  // from now the object is in holder_reg
+    JSObject* prototype = JSObject::cast(object->GetPrototype());
+    if (Heap::InNewSpace(prototype)) {
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      ldr(reg, FieldMemOperand(scratch, Map::kPrototypeOffset));
+    } else {
+      // The prototype is in old space; load it directly.
+      mov(reg, Operand(Handle<JSObject>(prototype)));
+    }
+
+    // Go to the next object in the prototype chain.
+    object = prototype;
+  }
+
+  // Check the holder map.
+  ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+  cmp(scratch, Operand(Handle<Map>(object->map())));
+  b(ne, miss);
+
+  // Log the check depth.
+  LOG(IntEvent("check-maps-depth", depth));
+
+  // Perform security check for access to the global object and return
+  // the holder register.
+  ASSERT(object == holder);
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+  if (object->IsJSGlobalProxy()) {
+    CheckAccessGlobalProxy(reg, scratch, miss);
+  }
+  return reg;
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+                                            Register scratch,
+                                            Label* miss) {
+  Label same_contexts;
+
+  ASSERT(!holder_reg.is(scratch));
+  ASSERT(!holder_reg.is(ip));
+  ASSERT(!scratch.is(ip));
+
+  // Load current lexical context from the stack frame.
+  ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+  cmp(scratch, Operand(0));
+  Check(ne, "we should not have an empty lexical context");
+#endif
+
+  // Load the global context of the current context.
+  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  ldr(scratch, FieldMemOperand(scratch, offset));
+  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    // TODO(119): avoid push(holder_reg)/pop(holder_reg)
+    // Cannot use ip as a temporary in this verification code. Due to the fact
+    // that ip is clobbered as part of cmp with an object Operand.
+    push(holder_reg);  // Temporarily save holder on the stack.
+    // Read the first word and compare to the global_context_map.
+    ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+    LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+    cmp(holder_reg, ip);
+    Check(eq, "JSGlobalObject::global_context should be a global context.");
+    pop(holder_reg);  // Restore holder.
+  }
+
+  // Check if both contexts are the same.
+  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+  cmp(scratch, Operand(ip));
+  b(eq, &same_contexts);
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    // TODO(119): avoid push(holder_reg)/pop(holder_reg)
+    // Cannot use ip as a temporary in this verification code. Due to the fact
+    // that ip is clobbered as part of cmp with an object Operand.
+    push(holder_reg);  // Temporarily save holder on the stack.
+    mov(holder_reg, ip);  // Move ip to its holding place.
+    LoadRoot(ip, Heap::kNullValueRootIndex);
+    cmp(holder_reg, ip);
+    Check(ne, "JSGlobalProxy::context() should not be null.");
+
+    ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+    LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+    cmp(holder_reg, ip);
+    Check(eq, "JSGlobalObject::global_context should be a global context.");
+    // Restore ip is not needed. ip is reloaded below.
+    pop(holder_reg);  // Restore holder.
+    // Restore ip to holder's context.
+    ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+  }
+
+  // Check that the security token in the calling global object is
+  // compatible with the security token in the receiving global
+  // object.
+  int token_offset = Context::kHeaderSize +
+                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+  ldr(scratch, FieldMemOperand(scratch, token_offset));
+  ldr(ip, FieldMemOperand(ip, token_offset));
+  cmp(scratch, Operand(ip));
+  b(ne, miss);
+
+  bind(&same_contexts);
+}
+
+
+void MacroAssembler::AllocateInNewSpace(int object_size,
+                                        Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  ASSERT(!result.is(scratch1));
+  ASSERT(!scratch1.is(scratch2));
+
+  // Load address of new object into result and allocation top address into
+  // scratch1.
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+  mov(scratch1, Operand(new_space_allocation_top));
+  if ((flags & RESULT_CONTAINS_TOP) == 0) {
+    ldr(result, MemOperand(scratch1));
+  } else {
+#ifdef DEBUG
+    // Assert that result actually contains top on entry. scratch2 is used
+    // immediately below so this use of scratch2 does not cause difference with
+    // respect to register content between debug and release mode.
+    ldr(scratch2, MemOperand(scratch1));
+    cmp(result, scratch2);
+    Check(eq, "Unexpected allocation top");
+#endif
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top.
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  mov(scratch2, Operand(new_space_allocation_limit));
+  ldr(scratch2, MemOperand(scratch2));
+  add(result, result, Operand(object_size * kPointerSize));
+  cmp(result, Operand(scratch2));
+  b(hi, gc_required);
+
+  // Update allocation top. result temporarily holds the new top,
+  str(result, MemOperand(scratch1));
+
+  // Tag and adjust back to start of new object.
+  if ((flags & TAG_OBJECT) != 0) {
+    sub(result, result, Operand((object_size * kPointerSize) -
+                                kHeapObjectTag));
+  } else {
+    sub(result, result, Operand(object_size * kPointerSize));
+  }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+                                        Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  ASSERT(!result.is(scratch1));
+  ASSERT(!scratch1.is(scratch2));
+
+  // Load address of new object into result and allocation top address into
+  // scratch1.
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+  mov(scratch1, Operand(new_space_allocation_top));
+  if ((flags & RESULT_CONTAINS_TOP) == 0) {
+    ldr(result, MemOperand(scratch1));
+  } else {
+#ifdef DEBUG
+    // Assert that result actually contains top on entry. scratch2 is used
+    // immediately below so this use of scratch2 does not cause difference with
+    // respect to register content between debug and release mode.
+    ldr(scratch2, MemOperand(scratch1));
+    cmp(result, scratch2);
+    Check(eq, "Unexpected allocation top");
+#endif
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top. Object size is in words so a shift is required to
+  // get the number of bytes
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  mov(scratch2, Operand(new_space_allocation_limit));
+  ldr(scratch2, MemOperand(scratch2));
+  add(result, result, Operand(object_size, LSL, kPointerSizeLog2));
+  cmp(result, Operand(scratch2));
+  b(hi, gc_required);
+
+  // Update allocation top. result temporarily holds the new top,
+  str(result, MemOperand(scratch1));
+
+  // Adjust back to start of new object.
+  sub(result, result, Operand(object_size, LSL, kPointerSizeLog2));
+
+  // Tag object if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    add(result, result, Operand(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+                                              Register scratch) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+
+  // Make sure the object has no tag before resetting top.
+  and_(object, object, Operand(~kHeapObjectTagMask));
+#ifdef DEBUG
+  // Check that the object un-allocated is below the current top.
+  mov(scratch, Operand(new_space_allocation_top));
+  ldr(scratch, MemOperand(scratch));
+  cmp(object, scratch);
+  Check(lt, "Undo allocation of non allocated memory");
+#endif
+  // Write the address of the object to un-allocate as the current top.
+  mov(scratch, Operand(new_space_allocation_top));
+  str(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::CompareObjectType(Register function,
+                                       Register map,
+                                       Register type_reg,
+                                       InstanceType type) {
+  ldr(map, FieldMemOperand(function, HeapObject::kMapOffset));
+  CompareInstanceType(map, type_reg, type);
+}
+
+
+void MacroAssembler::CompareInstanceType(Register map,
+                                         Register type_reg,
+                                         InstanceType type) {
+  ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  cmp(type_reg, Operand(type));
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+                                             Register result,
+                                             Register scratch,
+                                             Label* miss) {
+  // Check that the receiver isn't a smi.
+  BranchOnSmi(function, miss);
+
+  // Check that the function really is a function.  Load map into result reg.
+  CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
+  b(ne, miss);
+
+  // Make sure that the function has an instance prototype.
+  Label non_instance;
+  ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+  tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
+  b(ne, &non_instance);
+
+  // Get the prototype or initial map from the function.
+  ldr(result,
+      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // If the prototype or initial map is the hole, don't return it and
+  // simply miss the cache instead. This will allow us to allocate a
+  // prototype object on-demand in the runtime system.
+  LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  cmp(result, ip);
+  b(eq, miss);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  CompareObjectType(result, scratch, scratch, MAP_TYPE);
+  b(ne, &done);
+
+  // Get the prototype from the initial map.
+  ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+  jmp(&done);
+
+  // Non-instance prototype: Fetch prototype from constructor field
+  // in initial map.
+  bind(&non_instance);
+  ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+  // All done.
+  bind(&done);
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
+  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+  ASSERT(argc >= 1 && generating_stub());
+  if (argc > 1)
+    add(sp, sp, Operand((argc - 1) * kPointerSize));
+  Ret();
+}
+
+
+void MacroAssembler::IllegalOperation(int num_arguments) {
+  if (num_arguments > 0) {
+    add(sp, sp, Operand(num_arguments * kPointerSize));
+  }
+  LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+  // All parameters are on the stack.  r0 has the return value after call.
+
+  // If the expected number of arguments of the runtime function is
+  // constant, we check that the actual number of arguments match the
+  // expectation.
+  if (f->nargs >= 0 && f->nargs != num_arguments) {
+    IllegalOperation(num_arguments);
+    return;
+  }
+
+  Runtime::FunctionId function_id =
+      static_cast<Runtime::FunctionId>(f->stub_id);
+  RuntimeStub stub(function_id, num_arguments);
+  CallStub(&stub);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+  CallRuntime(Runtime::FunctionForId(fid), num_arguments);
+}
+
+
+void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
+                                     int num_arguments,
+                                     int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  mov(r0, Operand(num_arguments));
+  JumpToRuntime(ext);
+}
+
+
+void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
+#if defined(__thumb__)
+  // Thumb mode builtin.
+  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
+#endif
+  mov(r1, Operand(builtin));
+  CEntryStub stub(1);
+  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
+                                            bool* resolved) {
+  // Contract with compiled functions is that the function is passed in r1.
+  int builtins_offset =
+      JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
+  ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
+  ldr(r1, FieldMemOperand(r1, builtins_offset));
+
+  return Builtins::GetCode(id, resolved);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeJSFlags flags) {
+  bool resolved;
+  Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+  if (flags == CALL_JS) {
+    Call(code, RelocInfo::CODE_TARGET);
+  } else {
+    ASSERT(flags == JUMP_JS);
+    Jump(code, RelocInfo::CODE_TARGET);
+  }
+
+  if (!resolved) {
+    const char* name = Builtins::GetName(id);
+    int argc = Builtins::GetArgumentsCount(id);
+    uint32_t flags =
+        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
+        Bootstrapper::FixupFlagsUseCodeObject::encode(false);
+    Unresolved entry = { pc_offset() - kInstrSize, flags, name };
+    unresolved_.Add(entry);
+  }
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  bool resolved;
+  Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+  mov(target, Operand(code));
+  if (!resolved) {
+    const char* name = Builtins::GetName(id);
+    int argc = Builtins::GetArgumentsCount(id);
+    uint32_t flags =
+        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
+        Bootstrapper::FixupFlagsUseCodeObject::encode(true);
+    Unresolved entry = { pc_offset() - kInstrSize, flags, name };
+    unresolved_.Add(entry);
+  }
+
+  add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+                                Register scratch1, Register scratch2) {
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    mov(scratch1, Operand(value));
+    mov(scratch2, Operand(ExternalReference(counter)));
+    str(scratch1, MemOperand(scratch2));
+  }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    mov(scratch2, Operand(ExternalReference(counter)));
+    ldr(scratch1, MemOperand(scratch2));
+    add(scratch1, scratch1, Operand(value));
+    str(scratch1, MemOperand(scratch2));
+  }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    mov(scratch2, Operand(ExternalReference(counter)));
+    ldr(scratch1, MemOperand(scratch2));
+    sub(scratch1, scratch1, Operand(value));
+    str(scratch1, MemOperand(scratch2));
+  }
+}
+
+
+void MacroAssembler::Assert(Condition cc, const char* msg) {
+  if (FLAG_debug_code)
+    Check(cc, msg);
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg) {
+  Label L;
+  b(cc, &L);
+  Abort(msg);
+  // will not return here
+  bind(&L);
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+  // We want to pass the msg string like a smi to avoid GC
+  // problems, however msg is not guaranteed to be aligned
+  // properly. Instead, we pass an aligned pointer that is
+  // a proper v8 smi, but also pass the alignment difference
+  // from the real pointer as a smi.
+  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+  if (msg != NULL) {
+    RecordComment("Abort message: ");
+    RecordComment(msg);
+  }
+#endif
+  mov(r0, Operand(p0));
+  push(r0);
+  mov(r0, Operand(Smi::FromInt(p1 - p0)));
+  push(r0);
+  CallRuntime(Runtime::kAbort, 2);
+  // will not return here
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+CodePatcher::CodePatcher(byte* address, int instructions)
+    : address_(address),
+      instructions_(instructions),
+      size_(instructions * Assembler::kInstrSize),
+      masm_(address, size_ + Assembler::kGap) {
+  // Create a new macro assembler pointing to the address of the code to patch.
+  // The size is adjusted with kGap on order for the assembler to generate size
+  // bytes of instructions without failing with buffer size constraints.
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+  // Indicate that code has changed.
+  CPU::FlushICache(address_, size_);
+
+  // Check that the code was patched as expected.
+  ASSERT(masm_.pc_ == address_ + size_);
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void CodePatcher::Emit(Instr x) {
+  masm()->emit(x);
+}
+
+
+void CodePatcher::Emit(Address addr) {
+  masm()->emit(reinterpret_cast<Instr>(addr));
+}
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
+} }  // namespace v8::internal
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
new file mode 100644
index 0000000..ee9d70d
--- /dev/null
+++ b/src/arm/macro-assembler-arm.h
@@ -0,0 +1,402 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
+#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
+
+#include "assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Give alias names to registers
+const Register cp = { 8 };  // JavaScript context pointer
+
+
+enum InvokeJSFlags {
+  CALL_JS,
+  JUMP_JS
+};
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+  MacroAssembler(void* buffer, int size);
+
+  // ---------------------------------------------------------------------------
+  // Low-level helpers for compiler
+
+  // Jump, Call, and Ret pseudo instructions implementing inter-working
+ private:
+  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+ public:
+  void Jump(Register target, Condition cond = al);
+  void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
+  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+  void Call(Register target, Condition cond = al);
+  void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
+  void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+  void Ret(Condition cond = al);
+  // Jumps to the label at the index given by the Smi in "index".
+  void SmiJumpTable(Register index, Vector<Label*> targets);
+  // Load an object from the root table.
+  void LoadRoot(Register destination,
+                Heap::RootListIndex index,
+                Condition cond = al);
+
+  // Sets the remembered set bit for [address+offset], where address is the
+  // address of the heap object 'object'.  The address must be in the first 8K
+  // of an allocated page. The 'scratch' register is used in the
+  // implementation and all 3 registers are clobbered by the operation, as
+  // well as the ip register.
+  void RecordWrite(Register object, Register offset, Register scratch);
+
+  // ---------------------------------------------------------------------------
+  // Activation frames
+
+  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
+  // Enter specific kind of exit frame; either EXIT or
+  // EXIT_DEBUG. Expects the number of arguments in register r0 and
+  // the builtin function to call in register r1. Exits with argc in
+  // r4, argv in r6, and and the builtin function to call in r5.
+  void EnterExitFrame(StackFrame::Type type);
+
+  // Leave the current exit frame. Expects the return value in r0.
+  void LeaveExitFrame(StackFrame::Type type);
+
+  // Align the stack by optionally pushing a Smi zero.
+  void AlignStack(int offset);
+
+  // ---------------------------------------------------------------------------
+  // JavaScript invokes
+
+  // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(Register code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  InvokeFlag flag);
+
+  void InvokeCode(Handle<Code> code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  RelocInfo::Mode rmode,
+                  InvokeFlag flag);
+
+  // Invoke the JavaScript function in the given register. Changes the
+  // current context to the context in the function before invoking.
+  void InvokeFunction(Register function,
+                      const ParameterCount& actual,
+                      InvokeFlag flag);
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // ---------------------------------------------------------------------------
+  // Debugger Support
+
+  void SaveRegistersToMemory(RegList regs);
+  void RestoreRegistersFromMemory(RegList regs);
+  void CopyRegistersFromMemoryToStack(Register base, RegList regs);
+  void CopyRegistersFromStackToMemory(Register base,
+                                      Register scratch,
+                                      RegList regs);
+#endif
+
+  // ---------------------------------------------------------------------------
+  // Exception handling
+
+  // Push a new try handler and link into try handler chain.
+  // The return address must be passed in register lr.
+  // On exit, r0 contains TOS (code slot).
+  void PushTryHandler(CodeLocation try_location, HandlerType type);
+
+
+  // ---------------------------------------------------------------------------
+  // Inline caching support
+
+  // Generates code that verifies that the maps of objects in the
+  // prototype chain of object hasn't changed since the code was
+  // generated and branches to the miss label if any map has. If
+  // necessary the function also generates code for security check
+  // in case of global object holders. The scratch and holder
+  // registers are always clobbered, but the object register is only
+  // clobbered if it the same as the holder register. The function
+  // returns a register containing the holder - either object_reg or
+  // holder_reg.
+  Register CheckMaps(JSObject* object, Register object_reg,
+                     JSObject* holder, Register holder_reg,
+                     Register scratch, Label* miss);
+
+  // Generate code for checking access rights - used for security checks
+  // on access to global objects across environments. The holder register
+  // is left untouched, whereas both scratch registers are clobbered.
+  void CheckAccessGlobalProxy(Register holder_reg,
+                              Register scratch,
+                              Label* miss);
+
+
+  // ---------------------------------------------------------------------------
+  // Allocation support
+
+  // Allocate an object in new space. The object_size is specified in words (not
+  // bytes). If the new space is exhausted control continues at the gc_required
+  // label. The allocated object is returned in result. If the flag
+  // tag_allocated_object is true the result is tagged as as a heap object.
+  void AllocateInNewSpace(int object_size,
+                          Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required,
+                          AllocationFlags flags);
+  void AllocateInNewSpace(Register object_size,
+                          Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required,
+                          AllocationFlags flags);
+
+  // Undo allocation in new space. The object passed and objects allocated after
+  // it will no longer be allocated. The caller must make sure that no pointers
+  // are left to the object(s) no longer allocated as they would be invalid when
+  // allocation is undone.
+  void UndoAllocationInNewSpace(Register object, Register scratch);
+
+  // ---------------------------------------------------------------------------
+  // Support functions.
+
+  // Try to get function prototype of a function and puts the value in
+  // the result register. Checks that the function really is a
+  // function and jumps to the miss label if the fast checks fail. The
+  // function register will be untouched; the other registers may be
+  // clobbered.
+  void TryGetFunctionPrototype(Register function,
+                               Register result,
+                               Register scratch,
+                               Label* miss);
+
+  // Compare object type for heap object.  heap_object contains a non-Smi
+  // whose object type should be compared with the given type.  This both
+  // sets the flags and leaves the object type in the type_reg register.
+  // It leaves the map in the map register (unless the type_reg and map register
+  // are the same register).  It leaves the heap object in the heap_object
+  // register unless the heap_object register is the same register as one of the
+  // other registers.
+  void CompareObjectType(Register heap_object,
+                         Register map,
+                         Register type_reg,
+                         InstanceType type);
+
+  // Compare instance type in a map.  map contains a valid map object whose
+  // object type should be compared with the given type.  This both
+  // sets the flags and leaves the object type in the type_reg register.  It
+  // leaves the heap object in the heap_object register unless the heap_object
+  // register is the same register as type_reg.
+  void CompareInstanceType(Register map,
+                           Register type_reg,
+                           InstanceType type);
+
+  inline void BranchOnSmi(Register value, Label* smi_label) {
+    tst(value, Operand(kSmiTagMask));
+    b(eq, smi_label);
+  }
+
+  inline void BranchOnNotSmi(Register value, Label* not_smi_label) {
+    tst(value, Operand(kSmiTagMask));
+    b(ne, not_smi_label);
+  }
+
+  // Generates code for reporting that an illegal operation has
+  // occurred.
+  void IllegalOperation(int num_arguments);
+
+
+  // ---------------------------------------------------------------------------
+  // Runtime calls
+
+  // Call a code stub.
+  void CallStub(CodeStub* stub, Condition cond = al);
+  void CallJSExitStub(CodeStub* stub);
+
+  // Return from a code stub after popping its arguments.
+  void StubReturn(int argc);
+
+  // Call a runtime routine.
+  // Eventually this should be used for all C calls.
+  void CallRuntime(Runtime::Function* f, int num_arguments);
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+
+  // Tail call of a runtime routine (jump).
+  // Like JumpToRuntime, but also takes care of passing the number
+  // of parameters.
+  void TailCallRuntime(const ExternalReference& ext,
+                       int num_arguments,
+                       int result_size);
+
+  // Jump to a runtime routine.
+  void JumpToRuntime(const ExternalReference& builtin);
+
+  // Invoke specified builtin JavaScript function. Adds an entry to
+  // the unresolved list if the name does not resolve.
+  void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
+
+  // Store the code object for the given builtin in the target register and
+  // setup the function in r1.
+  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+  struct Unresolved {
+    int pc;
+    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
+    const char* name;
+  };
+  List<Unresolved>* unresolved() { return &unresolved_; }
+
+  Handle<Object> CodeObject() { return code_object_; }
+
+
+  // ---------------------------------------------------------------------------
+  // StatsCounter support
+
+  void SetCounter(StatsCounter* counter, int value,
+                  Register scratch1, Register scratch2);
+  void IncrementCounter(StatsCounter* counter, int value,
+                        Register scratch1, Register scratch2);
+  void DecrementCounter(StatsCounter* counter, int value,
+                        Register scratch1, Register scratch2);
+
+
+  // ---------------------------------------------------------------------------
+  // Debugging
+
+  // Calls Abort(msg) if the condition cc is not satisfied.
+  // Use --debug_code to enable.
+  void Assert(Condition cc, const char* msg);
+
+  // Like Assert(), but always enabled.
+  void Check(Condition cc, const char* msg);
+
+  // Print a message to stdout and abort execution.
+  void Abort(const char* msg);
+
+  // Verify restrictions about code generated in stubs.
+  void set_generating_stub(bool value) { generating_stub_ = value; }
+  bool generating_stub() { return generating_stub_; }
+  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
+  bool allow_stub_calls() { return allow_stub_calls_; }
+
+ private:
+  List<Unresolved> unresolved_;
+  bool generating_stub_;
+  bool allow_stub_calls_;
+  Handle<Object> code_object_;  // This handle will be patched with the code
+                                // object on installation.
+
+  // Helper functions for generating invokes.
+  void InvokePrologue(const ParameterCount& expected,
+                      const ParameterCount& actual,
+                      Handle<Code> code_constant,
+                      Register code_reg,
+                      Label* done,
+                      InvokeFlag flag);
+
+  // Prepares for a call or jump to a builtin by doing two things:
+  // 1. Emits code that fetches the builtin's function object from the context
+  //    at runtime, and puts it in the register rdi.
+  // 2. Fetches the builtin's code object, and returns it in a handle, at
+  //    compile time, so that later code can emit instructions to jump or call
+  //    the builtin directly.  If the code object has not yet been created, it
+  //    returns the builtin code object for IllegalFunction, and sets the
+  //    output parameter "resolved" to false.  Code that uses the return value
+  //    should then add the address and the builtin name to the list of fixups
+  //    called unresolved_, which is fixed up by the bootstrapper.
+  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+};
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+  CodePatcher(byte* address, int instructions);
+  virtual ~CodePatcher();
+
+  // Macro assembler to emit code.
+  MacroAssembler* masm() { return &masm_; }
+
+  // Emit an instruction directly.
+  void Emit(Instr x);
+
+  // Emit an address directly.
+  void Emit(Address addr);
+
+ private:
+  byte* address_;  // The address of the code being patched.
+  int instructions_;  // Number of instructions of the expected patch size.
+  int size_;  // Number of bytes of the expected patch size.
+  MacroAssembler masm_;  // Macro assembler used to generate the code.
+};
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_MACRO_ASSEMBLER_ARM_H_
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
new file mode 100644
index 0000000..2e75a61
--- /dev/null
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -0,0 +1,1230 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "unicode.h"
+#include "log.h"
+#include "ast.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "arm/macro-assembler-arm.h"
+#include "arm/regexp-macro-assembler-arm.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_NATIVE_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - r5 : Pointer to current code object (Code*) including heap object tag.
+ * - r6 : Current position in input, as negative offset from end of string.
+ *        Please notice that this is the byte offset, not the character offset!
+ * - r7 : Currently loaded character. Must be loaded using
+ *        LoadCurrentCharacter before using any of the dispatch methods.
+ * - r8 : points to tip of backtrack stack
+ * - r9 : Unused, might be used by C code and expected unchanged.
+ * - r10 : End of input (points to byte after last character in input).
+ * - r11 : Frame pointer. Used to access arguments, local variables and
+ *         RegExp registers.
+ * - r12 : IP register, used by assembler. Very volatile.
+ * - r13/sp : points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ *
+ * Each call to a public method should retain this convention.
+ * The stack will have the following structure:
+ *       - stack_area_base    (High end of the memory area to use as
+ *                             backtracking stack)
+ *       - at_start           (if 1, start at start of string, if 0, don't)
+ *       --- sp when called ---
+ *       - link address
+ *       - backup of registers r4..r11
+ *       - int* capture_array (int[num_saved_registers_], for output).
+ *       - end of input       (Address of end of string)
+ *       - start of input     (Address of first character in string)
+ *       --- frame pointer ----
+ *       - void* input_string (location of a handle containing the string)
+ *       - Offset of location before start of input (effectively character
+ *         position -1). Used to initialize capture registers to a non-position.
+ *       - register 0         (Only positions must be stored in the first
+ *       - register 1          num_saved_registers_ registers)
+ *       - ...
+ *       - register num_registers-1
+ *       --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code, by calling the code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ *              Address start,
+ *              Address end,
+ *              int* capture_output_array,
+ *              bool at_start,
+ *              byte* stack_area_base)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc).
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
+    Mode mode,
+    int registers_to_save)
+    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+      mode_(mode),
+      num_registers_(registers_to_save),
+      num_saved_registers_(registers_to_save),
+      entry_label_(),
+      start_label_(),
+      success_label_(),
+      backtrack_label_(),
+      exit_label_() {
+  ASSERT_EQ(0, registers_to_save % 2);
+  __ jmp(&entry_label_);   // We'll write the entry code later.
+  EmitBacktrackConstantPool();
+  __ bind(&start_label_);  // And then continue from here.
+}
+
+
+RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
+  delete masm_;
+  // Unuse labels in case we throw away the assembler without calling GetCode.
+  entry_label_.Unuse();
+  start_label_.Unuse();
+  success_label_.Unuse();
+  backtrack_label_.Unuse();
+  exit_label_.Unuse();
+  check_preempt_label_.Unuse();
+  stack_overflow_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerARM::stack_limit_slack()  {
+  return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
+  if (by != 0) {
+    Label inside_string;
+    __ add(current_input_offset(),
+           current_input_offset(), Operand(by * char_size()));
+  }
+}
+
+
+void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
+  ASSERT(reg >= 0);
+  ASSERT(reg < num_registers_);
+  if (by != 0) {
+    __ ldr(r0, register_location(reg));
+    __ add(r0, r0, Operand(by));
+    __ str(r0, register_location(reg));
+  }
+}
+
+
+void RegExpMacroAssemblerARM::Backtrack() {
+  CheckPreemption();
+  // Pop Code* offset from backtrack stack, add Code* and jump to location.
+  Pop(r0);
+  __ add(pc, r0, Operand(r5));
+}
+
+
+void RegExpMacroAssemblerARM::Bind(Label* label) {
+  __ bind(label);
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacter(uint32_t c, Label* on_equal) {
+  __ cmp(current_character(), Operand(c));
+  BranchOrBacktrack(eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
+  __ cmp(current_character(), Operand(limit));
+  BranchOrBacktrack(gt, on_greater);
+}
+
+
+void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
+  Label not_at_start;
+  // Did we start the match at the start of the string at all?
+  __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
+  __ cmp(r0, Operand(0));
+  BranchOrBacktrack(eq, &not_at_start);
+
+  // If we did, are we still at the start of the input?
+  __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
+  __ add(r0, end_of_input_address(), Operand(current_input_offset()));
+  __ cmp(r0, r1);
+  BranchOrBacktrack(eq, on_at_start);
+  __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
+  // Did we start the match at the start of the string at all?
+  __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
+  __ cmp(r0, Operand(0));
+  BranchOrBacktrack(eq, on_not_at_start);
+  // If we did, are we still at the start of the input?
+  __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
+  __ add(r0, end_of_input_address(), Operand(current_input_offset()));
+  __ cmp(r0, r1);
+  BranchOrBacktrack(ne, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacterLT(uc16 limit, Label* on_less) {
+  __ cmp(current_character(), Operand(limit));
+  BranchOrBacktrack(lt, on_less);
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str,
+                                              int cp_offset,
+                                              Label* on_failure,
+                                              bool check_end_of_string) {
+  if (on_failure == NULL) {
+    // Instead of inlining a backtrack for each test, (re)use the global
+    // backtrack target.
+    on_failure = &backtrack_label_;
+  }
+
+  if (check_end_of_string) {
+    // Is last character of required match inside string.
+    CheckPosition(cp_offset + str.length() - 1, on_failure);
+  }
+
+  __ add(r0, end_of_input_address(), Operand(current_input_offset()));
+  if (cp_offset != 0) {
+    int byte_offset = cp_offset * char_size();
+    __ add(r0, r0, Operand(byte_offset));
+  }
+
+  // r0 : Address of characters to match against str.
+  int stored_high_byte = 0;
+  for (int i = 0; i < str.length(); i++) {
+    if (mode_ == ASCII) {
+      __ ldrb(r1, MemOperand(r0, char_size(), PostIndex));
+      ASSERT(str[i] <= String::kMaxAsciiCharCode);
+      __ cmp(r1, Operand(str[i]));
+    } else {
+      __ ldrh(r1, MemOperand(r0, char_size(), PostIndex));
+      uc16 match_char = str[i];
+      int match_high_byte = (match_char >> 8);
+      if (match_high_byte == 0) {
+        __ cmp(r1, Operand(str[i]));
+      } else {
+        if (match_high_byte != stored_high_byte) {
+          __ mov(r2, Operand(match_high_byte));
+          stored_high_byte = match_high_byte;
+        }
+        __ add(r3, r2, Operand(match_char & 0xff));
+        __ cmp(r1, r3);
+      }
+    }
+    BranchOrBacktrack(ne, on_failure);
+  }
+}
+
+
+void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
+  __ ldr(r0, MemOperand(backtrack_stackpointer(), 0));
+  __ cmp(current_input_offset(), r0);
+  __ add(backtrack_stackpointer(),
+         backtrack_stackpointer(), Operand(kPointerSize), LeaveCC, eq);
+  BranchOrBacktrack(eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
+    int start_reg,
+    Label* on_no_match) {
+  Label fallthrough;
+  __ ldr(r0, register_location(start_reg));  // Index of start of capture
+  __ ldr(r1, register_location(start_reg + 1));  // Index of end of capture
+  __ sub(r1, r1, r0, SetCC);  // Length of capture.
+
+  // If length is zero, either the capture is empty or it is not participating.
+  // In either case succeed immediately.
+  __ b(eq, &fallthrough);
+
+  // Check that there are enough characters left in the input.
+  __ cmn(r1, Operand(current_input_offset()));
+  BranchOrBacktrack(gt, on_no_match);
+
+  if (mode_ == ASCII) {
+    Label success;
+    Label fail;
+    Label loop_check;
+
+    // r0 - offset of start of capture
+    // r1 - length of capture
+    __ add(r0, r0, Operand(end_of_input_address()));
+    __ add(r2, end_of_input_address(), Operand(current_input_offset()));
+    __ add(r1, r0, Operand(r1));
+
+    // r0 - Address of start of capture.
+    // r1 - Address of end of capture
+    // r2 - Address of current input position.
+
+    Label loop;
+    __ bind(&loop);
+    __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
+    __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
+    __ cmp(r4, r3);
+    __ b(eq, &loop_check);
+
+    // Mismatch, try case-insensitive match (converting letters to lower-case).
+    __ orr(r3, r3, Operand(0x20));  // Convert capture character to lower-case.
+    __ orr(r4, r4, Operand(0x20));  // Also convert input character.
+    __ cmp(r4, r3);
+    __ b(ne, &fail);
+    __ sub(r3, r3, Operand('a'));
+    __ cmp(r3, Operand('z' - 'a'));  // Is r3 a lowercase letter?
+    __ b(hi, &fail);
+
+
+    __ bind(&loop_check);
+    __ cmp(r0, r1);
+    __ b(lt, &loop);
+    __ jmp(&success);
+
+    __ bind(&fail);
+    BranchOrBacktrack(al, on_no_match);
+
+    __ bind(&success);
+    // Compute new value of character position after the matched part.
+    __ sub(current_input_offset(), r2, end_of_input_address());
+  } else {
+    ASSERT(mode_ == UC16);
+    int argument_count = 3;
+    FrameAlign(argument_count, r2);
+
+    // r0 - offset of start of capture
+    // r1 - length of capture
+
+    // Put arguments into arguments registers.
+    // Parameters are
+    //   r0: Address byte_offset1 - Address captured substring's start.
+    //   r1: Address byte_offset2 - Address of current character position.
+    //   r2: size_t byte_length - length of capture in bytes(!)
+
+    // Address of start of capture.
+    __ add(r0, r0, Operand(end_of_input_address()));
+    // Length of capture.
+    __ mov(r2, Operand(r1));
+    // Save length in callee-save register for use on return.
+    __ mov(r4, Operand(r1));
+    // Address of current input position.
+    __ add(r1, current_input_offset(), Operand(end_of_input_address()));
+
+    ExternalReference function =
+        ExternalReference::re_case_insensitive_compare_uc16();
+    CallCFunction(function, argument_count);
+
+    // Check if function returned non-zero for success or zero for failure.
+    __ cmp(r0, Operand(0));
+    BranchOrBacktrack(eq, on_no_match);
+    // On success, increment position by length of capture.
+    __ add(current_input_offset(), current_input_offset(), Operand(r4));
+  }
+
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotBackReference(
+    int start_reg,
+    Label* on_no_match) {
+  Label fallthrough;
+  Label success;
+
+  // Find length of back-referenced capture.
+  __ ldr(r0, register_location(start_reg));
+  __ ldr(r1, register_location(start_reg + 1));
+  __ sub(r1, r1, r0, SetCC);  // Length to check.
+  // Succeed on empty capture (including no capture).
+  __ b(eq, &fallthrough);
+
+  // Check that there are enough characters left in the input.
+  __ cmn(r1, Operand(current_input_offset()));
+  BranchOrBacktrack(gt, on_no_match);
+
+  // Compute pointers to match string and capture string
+  __ add(r0, r0, Operand(end_of_input_address()));
+  __ add(r2, end_of_input_address(), Operand(current_input_offset()));
+  __ add(r1, r1, Operand(r0));
+
+  Label loop;
+  __ bind(&loop);
+  if (mode_ == ASCII) {
+    __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
+    __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
+  } else {
+    ASSERT(mode_ == UC16);
+    __ ldrh(r3, MemOperand(r0, char_size(), PostIndex));
+    __ ldrh(r4, MemOperand(r2, char_size(), PostIndex));
+  }
+  __ cmp(r3, r4);
+  BranchOrBacktrack(ne, on_no_match);
+  __ cmp(r0, r1);
+  __ b(lt, &loop);
+
+  // Move current character position to position after match.
+  __ sub(current_input_offset(), r2, end_of_input_address());
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
+                                                      int reg2,
+                                                      Label* on_not_equal) {
+  __ ldr(r0, register_location(reg1));
+  __ ldr(r1, register_location(reg2));
+  __ cmp(r0, r1);
+  BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotCharacter(uint32_t c,
+                                                Label* on_not_equal) {
+  __ cmp(current_character(), Operand(c));
+  BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
+                                                     uint32_t mask,
+                                                     Label* on_equal) {
+  __ and_(r0, current_character(), Operand(mask));
+  __ cmp(r0, Operand(c));
+  BranchOrBacktrack(eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(uint32_t c,
+                                                        uint32_t mask,
+                                                        Label* on_not_equal) {
+  __ and_(r0, current_character(), Operand(mask));
+  __ cmp(r0, Operand(c));
+  BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
+    uc16 c,
+    uc16 minus,
+    uc16 mask,
+    Label* on_not_equal) {
+  ASSERT(minus < String::kMaxUC16CharCode);
+  __ sub(r0, current_character(), Operand(minus));
+  __ and_(r0, r0, Operand(mask));
+  __ cmp(r0, Operand(c));
+  BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
+                                                         int cp_offset,
+                                                         bool check_offset,
+                                                         Label* on_no_match) {
+  // Range checks (c in min..max) are generally implemented by an unsigned
+  // (c - min) <= (max - min) check
+  switch (type) {
+  case 's':
+    // Match space-characters
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      if (check_offset) {
+        LoadCurrentCharacter(cp_offset, on_no_match);
+      } else {
+        LoadCurrentCharacterUnchecked(cp_offset, 1);
+      }
+      Label success;
+      __ cmp(current_character(), Operand(' '));
+      __ b(eq, &success);
+      // Check range 0x09..0x0d
+      __ sub(r0, current_character(), Operand('\t'));
+      __ cmp(r0, Operand('\r' - '\t'));
+      BranchOrBacktrack(hi, on_no_match);
+      __ bind(&success);
+      return true;
+    }
+    return false;
+  case 'S':
+    // Match non-space characters.
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      __ cmp(current_character(), Operand(' '));
+      BranchOrBacktrack(eq, on_no_match);
+      __ sub(r0, current_character(), Operand('\t'));
+      __ cmp(r0, Operand('\r' - '\t'));
+      BranchOrBacktrack(ls, on_no_match);
+      return true;
+    }
+    return false;
+  case 'd':
+    // Match ASCII digits ('0'..'9')
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ sub(r0, current_character(), Operand('0'));
+    __ cmp(current_character(), Operand('9' - '0'));
+    BranchOrBacktrack(hi, on_no_match);
+    return true;
+  case 'D':
+    // Match non ASCII-digits
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ sub(r0, current_character(), Operand('0'));
+    __ cmp(r0, Operand('9' - '0'));
+    BranchOrBacktrack(ls, on_no_match);
+    return true;
+  case '.': {
+    // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ eor(r0, current_character(), Operand(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+    __ sub(r0, r0, Operand(0x0b));
+    __ cmp(r0, Operand(0x0c - 0x0b));
+    BranchOrBacktrack(ls, on_no_match);
+    if (mode_ == UC16) {
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ sub(r0, r0, Operand(0x2028 - 0x0b));
+      __ cmp(r0, Operand(1));
+      BranchOrBacktrack(ls, on_no_match);
+    }
+    return true;
+  }
+  case '*':
+    // Match any character.
+    if (check_offset) {
+      CheckPosition(cp_offset, on_no_match);
+    }
+    return true;
+  // No custom implementation (yet): w, W, s(UC16), S(UC16).
+  default:
+    return false;
+  }
+}
+
+
+void RegExpMacroAssemblerARM::Fail() {
+  __ mov(r0, Operand(FAILURE));
+  __ jmp(&exit_label_);
+}
+
+
+Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
+  // Finalize code - write the entry point code now we know how many
+  // registers we need.
+
+  // Entry code:
+  __ bind(&entry_label_);
+  // Push Link register.
+  // Push arguments
+  // Save callee-save registers.
+  // Start new stack frame.
+  // Order here should correspond to order of offset constants in header file.
+  RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() |
+      r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit();
+  RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit();
+  __ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit());
+  // Set frame pointer just above the arguments.
+  __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
+  __ push(r0);  // Make room for "position - 1" constant (value is irrelevant).
+
+  // Check if we have space on the stack for registers.
+  Label stack_limit_hit;
+  Label stack_ok;
+
+  ExternalReference stack_guard_limit =
+      ExternalReference::address_of_stack_guard_limit();
+  __ mov(r0, Operand(stack_guard_limit));
+  __ ldr(r0, MemOperand(r0));
+  __ sub(r0, sp, r0, SetCC);
+  // Handle it if the stack pointer is already below the stack limit.
+  __ b(ls, &stack_limit_hit);
+  // Check if there is room for the variable number of registers above
+  // the stack limit.
+  __ cmp(r0, Operand(num_registers_ * kPointerSize));
+  __ b(hs, &stack_ok);
+  // Exit with OutOfMemory exception. There is not enough space on the stack
+  // for our working registers.
+  __ mov(r0, Operand(EXCEPTION));
+  __ jmp(&exit_label_);
+
+  __ bind(&stack_limit_hit);
+  CallCheckStackGuardState(r0);
+  __ cmp(r0, Operand(0));
+  // If returned value is non-zero, we exit with the returned value as result.
+  __ b(ne, &exit_label_);
+
+  __ bind(&stack_ok);
+
+  // Allocate space on stack for registers.
+  __ sub(sp, sp, Operand(num_registers_ * kPointerSize));
+  // Load string end.
+  __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+  // Load input start.
+  __ ldr(r0, MemOperand(frame_pointer(), kInputStart));
+  // Find negative length (offset of start relative to end).
+  __ sub(current_input_offset(), r0, end_of_input_address());
+  // Set r0 to address of char before start of input
+  // (effectively string position -1).
+  __ sub(r0, current_input_offset(), Operand(char_size()));
+  // Store this value in a local variable, for use when clearing
+  // position registers.
+  __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+  if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
+    // Fill saved registers with initial value = start offset - 1
+
+    // Address of register 0.
+    __ add(r1, frame_pointer(), Operand(kRegisterZero));
+    __ mov(r2, Operand(num_saved_registers_));
+    Label init_loop;
+    __ bind(&init_loop);
+    __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
+    __ sub(r2, r2, Operand(1), SetCC);
+    __ b(ne, &init_loop);
+  }
+
+  // Initialize backtrack stack pointer.
+  __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+  // Initialize code pointer register
+  __ mov(code_pointer(), Operand(masm_->CodeObject()));
+  // Load previous char as initial value of current character register.
+  Label at_start;
+  __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
+  __ cmp(r0, Operand(0));
+  __ b(ne, &at_start);
+  LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
+  __ jmp(&start_label_);
+  __ bind(&at_start);
+  __ mov(current_character(), Operand('\n'));
+  __ jmp(&start_label_);
+
+
+  // Exit code:
+  if (success_label_.is_linked()) {
+    // Save captures when successful.
+    __ bind(&success_label_);
+    if (num_saved_registers_ > 0) {
+      // copy captures to output
+      __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
+      __ ldr(r0, MemOperand(frame_pointer(), kRegisterOutput));
+      __ sub(r1, end_of_input_address(), r1);
+      // r1 is length of input in bytes.
+      if (mode_ == UC16) {
+        __ mov(r1, Operand(r1, LSR, 1));
+      }
+      // r1 is length of input in characters.
+
+      ASSERT_EQ(0, num_saved_registers_ % 2);
+      // Always an even number of capture registers. This allows us to
+      // unroll the loop once to add an operation between a load of a register
+      // and the following use of that register.
+      for (int i = 0; i < num_saved_registers_; i += 2) {
+        __ ldr(r2, register_location(i));
+        __ ldr(r3, register_location(i + 1));
+        if (mode_ == UC16) {
+          __ add(r2, r1, Operand(r2, ASR, 1));
+          __ add(r3, r1, Operand(r3, ASR, 1));
+        } else {
+          __ add(r2, r1, Operand(r2));
+          __ add(r3, r1, Operand(r3));
+        }
+        __ str(r2, MemOperand(r0, kPointerSize, PostIndex));
+        __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
+      }
+    }
+    __ mov(r0, Operand(SUCCESS));
+  }
+  // Exit and return r0
+  __ bind(&exit_label_);
+  // Skip sp past regexp registers and local variables..
+  __ mov(sp, frame_pointer());
+  // Restore registers r4..r11 and return (restoring lr to pc).
+  __ ldm(ia_w, sp, registers_to_retain | pc.bit());
+
+  // Backtrack code (branch target for conditional backtracks).
+  if (backtrack_label_.is_linked()) {
+    __ bind(&backtrack_label_);
+    Backtrack();
+  }
+
+  Label exit_with_exception;
+
+  // Preempt-code
+  if (check_preempt_label_.is_linked()) {
+    SafeCallTarget(&check_preempt_label_);
+
+    CallCheckStackGuardState(r0);
+    __ cmp(r0, Operand(0));
+    // If returning non-zero, we should end execution with the given
+    // result as return value.
+    __ b(ne, &exit_label_);
+
+    // String might have moved: Reload end of string from frame.
+    __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+    SafeReturn();
+  }
+
+  // Backtrack stack overflow code.
+  if (stack_overflow_label_.is_linked()) {
+    SafeCallTarget(&stack_overflow_label_);
+    // Reached if the backtrack-stack limit has been hit.
+
+    Label grow_failed;
+
+    // Call GrowStack(backtrack_stackpointer())
+    int num_arguments = 2;
+    FrameAlign(num_arguments, r0);
+    __ mov(r0, backtrack_stackpointer());
+    __ add(r1, frame_pointer(), Operand(kStackHighEnd));
+    ExternalReference grow_stack =
+      ExternalReference::re_grow_stack();
+    CallCFunction(grow_stack, num_arguments);
+    // If return NULL, we have failed to grow the stack, and
+    // must exit with a stack-overflow exception.
+    __ cmp(r0, Operand(0));
+    __ b(eq, &exit_with_exception);
+    // Otherwise use return value as new stack pointer.
+    __ mov(backtrack_stackpointer(), r0);
+    // Restore saved registers and continue.
+    SafeReturn();
+  }
+
+  if (exit_with_exception.is_linked()) {
+    // If any of the code above needed to exit with an exception.
+    __ bind(&exit_with_exception);
+    // Exit with Result EXCEPTION(-1) to signal thrown exception.
+    __ mov(r0, Operand(EXCEPTION));
+    __ jmp(&exit_label_);
+  }
+
+  CodeDesc code_desc;
+  masm_->GetCode(&code_desc);
+  Handle<Code> code = Factory::NewCode(code_desc,
+                                       NULL,
+                                       Code::ComputeFlags(Code::REGEXP),
+                                       masm_->CodeObject());
+  LOG(RegExpCodeCreateEvent(*code, *source));
+  return Handle<Object>::cast(code);
+}
+
+
+void RegExpMacroAssemblerARM::GoTo(Label* to) {
+  BranchOrBacktrack(al, to);
+}
+
+
+void RegExpMacroAssemblerARM::IfRegisterGE(int reg,
+                                           int comparand,
+                                           Label* if_ge) {
+  __ ldr(r0, register_location(reg));
+  __ cmp(r0, Operand(comparand));
+  BranchOrBacktrack(ge, if_ge);
+}
+
+
+void RegExpMacroAssemblerARM::IfRegisterLT(int reg,
+                                           int comparand,
+                                           Label* if_lt) {
+  __ ldr(r0, register_location(reg));
+  __ cmp(r0, Operand(comparand));
+  BranchOrBacktrack(lt, if_lt);
+}
+
+
+void RegExpMacroAssemblerARM::IfRegisterEqPos(int reg,
+                                              Label* if_eq) {
+  __ ldr(r0, register_location(reg));
+  __ cmp(r0, Operand(current_input_offset()));
+  BranchOrBacktrack(eq, if_eq);
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+    RegExpMacroAssemblerARM::Implementation() {
+  return kARMImplementation;
+}
+
+
+void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
+                                                   Label* on_end_of_input,
+                                                   bool check_bounds,
+                                                   int characters) {
+  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
+  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
+  if (check_bounds) {
+    CheckPosition(cp_offset + characters - 1, on_end_of_input);
+  }
+  LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerARM::PopCurrentPosition() {
+  Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerARM::PopRegister(int register_index) {
+  Pop(r0);
+  __ str(r0, register_location(register_index));
+}
+
+
+static bool is_valid_memory_offset(int value) {
+  if (value < 0) value = -value;
+  return value < (1<<12);
+}
+
+
+void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
+  if (label->is_bound()) {
+    int target = label->pos();
+    __ mov(r0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+  } else {
+    int constant_offset = GetBacktrackConstantPoolEntry();
+    masm_->label_at_put(label, constant_offset);
+    // Reading pc-relative is based on the address 8 bytes ahead of
+    // the current opcode.
+    unsigned int offset_of_pc_register_read =
+      masm_->pc_offset() + Assembler::kPcLoadDelta;
+    int pc_offset_of_constant =
+      constant_offset - offset_of_pc_register_read;
+    ASSERT(pc_offset_of_constant < 0);
+    if (is_valid_memory_offset(pc_offset_of_constant)) {
+      masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize);
+      __ ldr(r0, MemOperand(pc, pc_offset_of_constant));
+    } else {
+      // Not a 12-bit offset, so it needs to be loaded from the constant
+      // pool.
+      masm_->BlockConstPoolBefore(
+          masm_->pc_offset() + 2 * Assembler::kInstrSize);
+      __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
+      __ ldr(r0, MemOperand(pc, r0));
+    }
+  }
+  Push(r0);
+  CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerARM::PushCurrentPosition() {
+  Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerARM::PushRegister(int register_index,
+                                           StackCheckFlag check_stack_limit) {
+  __ ldr(r0, register_location(register_index));
+  Push(r0);
+  if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerARM::ReadCurrentPositionFromRegister(int reg) {
+  __ ldr(current_input_offset(), register_location(reg));
+}
+
+
+void RegExpMacroAssemblerARM::ReadStackPointerFromRegister(int reg) {
+  __ ldr(backtrack_stackpointer(), register_location(reg));
+  __ ldr(r0, MemOperand(frame_pointer(), kStackHighEnd));
+  __ add(backtrack_stackpointer(), backtrack_stackpointer(), Operand(r0));
+}
+
+
+void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
+  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  __ mov(r0, Operand(to));
+  __ str(r0, register_location(register_index));
+}
+
+
+void RegExpMacroAssemblerARM::Succeed() {
+  __ jmp(&success_label_);
+}
+
+
+void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg,
+                                                             int cp_offset) {
+  if (cp_offset == 0) {
+    __ str(current_input_offset(), register_location(reg));
+  } else {
+    __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
+    __ str(r0, register_location(reg));
+  }
+}
+
+
+void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
+  ASSERT(reg_from <= reg_to);
+  __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+  for (int reg = reg_from; reg <= reg_to; reg++) {
+    __ str(r0, register_location(reg));
+  }
+}
+
+
+void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
+  __ ldr(r1, MemOperand(frame_pointer(), kStackHighEnd));
+  __ sub(r0, backtrack_stackpointer(), r1);
+  __ str(r0, register_location(reg));
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
+  int num_arguments = 3;
+  FrameAlign(num_arguments, scratch);
+  // RegExp code frame pointer.
+  __ mov(r2, frame_pointer());
+  // Code* of self.
+  __ mov(r1, Operand(masm_->CodeObject()));
+  // r0 becomes return address pointer.
+  ExternalReference stack_guard_check =
+      ExternalReference::re_check_stack_guard_state();
+  CallCFunctionUsingStub(stack_guard_check, num_arguments);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+  return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
+                                                  Code* re_code,
+                                                  Address re_frame) {
+  if (StackGuard::IsStackOverflow()) {
+    Top::StackOverflow();
+    return EXCEPTION;
+  }
+
+  // If not real stack overflow the stack guard was used to interrupt
+  // execution for another purpose.
+
+  // Prepare for possible GC.
+  HandleScope handles;
+  Handle<Code> code_handle(re_code);
+
+  Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+  // Current string.
+  bool is_ascii = subject->IsAsciiRepresentation();
+
+  ASSERT(re_code->instruction_start() <= *return_address);
+  ASSERT(*return_address <=
+      re_code->instruction_start() + re_code->instruction_size());
+
+  Object* result = Execution::HandleStackGuardInterrupt();
+
+  if (*code_handle != re_code) {  // Return address no longer valid
+    int delta = *code_handle - re_code;
+    // Overwrite the return address on the stack.
+    *return_address += delta;
+  }
+
+  if (result->IsException()) {
+    return EXCEPTION;
+  }
+
+  // String might have changed.
+  if (subject->IsAsciiRepresentation() != is_ascii) {
+    // If we changed between an ASCII and an UC16 string, the specialized
+    // code cannot be used, and we need to restart regexp matching from
+    // scratch (including, potentially, compiling a new version of the code).
+    return RETRY;
+  }
+
+  // Otherwise, the content of the string might have moved. It must still
+  // be a sequential or external string with the same content.
+  // Update the start and end pointers in the stack frame to the current
+  // location (whether it has actually moved or not).
+  ASSERT(StringShape(*subject).IsSequential() ||
+      StringShape(*subject).IsExternal());
+
+  // The original start address of the characters to match.
+  const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+  // Find the current start address of the same character at the current string
+  // position.
+  int start_index = frame_entry<int>(re_frame, kStartIndex);
+  const byte* new_address = StringCharacterPosition(*subject, start_index);
+
+  if (start_address != new_address) {
+    // If there is a difference, update the object pointer and start and end
+    // addresses in the RegExp stack frame to match the new value.
+    const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+    int byte_length = end_address - start_address;
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
+    frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+    frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  }
+
+  return 0;
+}
+
+
+MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
+  ASSERT(register_index < (1<<30));
+  if (num_registers_ <= register_index) {
+    num_registers_ = register_index + 1;
+  }
+  return MemOperand(frame_pointer(),
+                    kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerARM::CheckPosition(int cp_offset,
+                                            Label* on_outside_input) {
+  __ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
+  BranchOrBacktrack(ge, on_outside_input);
+}
+
+
+void RegExpMacroAssemblerARM::BranchOrBacktrack(Condition condition,
+                                                Label* to) {
+  if (condition == al) {  // Unconditional.
+    if (to == NULL) {
+      Backtrack();
+      return;
+    }
+    __ jmp(to);
+    return;
+  }
+  if (to == NULL) {
+    __ b(condition, &backtrack_label_);
+    return;
+  }
+  __ b(condition, to);
+}
+
+
+void RegExpMacroAssemblerARM::SafeCall(Label* to, Condition cond) {
+  __ bl(to, cond);
+}
+
+
+void RegExpMacroAssemblerARM::SafeReturn() {
+  __ pop(lr);
+  __ add(pc, lr, Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerARM::SafeCallTarget(Label* name) {
+  __ bind(name);
+  __ sub(lr, lr, Operand(masm_->CodeObject()));
+  __ push(lr);
+}
+
+
+void RegExpMacroAssemblerARM::Push(Register source) {
+  ASSERT(!source.is(backtrack_stackpointer()));
+  __ str(source,
+         MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
+}
+
+
+void RegExpMacroAssemblerARM::Pop(Register target) {
+  ASSERT(!target.is(backtrack_stackpointer()));
+  __ ldr(target,
+         MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
+}
+
+
+void RegExpMacroAssemblerARM::CheckPreemption() {
+  // Check for preemption.
+  ExternalReference stack_guard_limit =
+      ExternalReference::address_of_stack_guard_limit();
+  __ mov(r0, Operand(stack_guard_limit));
+  __ ldr(r0, MemOperand(r0));
+  __ cmp(sp, r0);
+  SafeCall(&check_preempt_label_, ls);
+}
+
+
+void RegExpMacroAssemblerARM::CheckStackLimit() {
+  if (FLAG_check_stack) {
+    ExternalReference stack_limit =
+        ExternalReference::address_of_regexp_stack_limit();
+    __ mov(r0, Operand(stack_limit));
+    __ ldr(r0, MemOperand(r0));
+    __ cmp(backtrack_stackpointer(), Operand(r0));
+    SafeCall(&stack_overflow_label_, ls);
+  }
+}
+
+
+void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
+  __ CheckConstPool(false, false);
+  __ BlockConstPoolBefore(
+      masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize);
+  backtrack_constant_pool_offset_ = masm_->pc_offset();
+  for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
+    __ emit(0);
+  }
+
+  backtrack_constant_pool_capacity_ = kBacktrackConstantPoolSize;
+}
+
+
+int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() {
+  while (backtrack_constant_pool_capacity_ > 0) {
+    int offset = backtrack_constant_pool_offset_;
+    backtrack_constant_pool_offset_ += kPointerSize;
+    backtrack_constant_pool_capacity_--;
+    if (masm_->pc_offset() - offset < 2 * KB) {
+      return offset;
+    }
+  }
+  Label new_pool_skip;
+  __ jmp(&new_pool_skip);
+  EmitBacktrackConstantPool();
+  __ bind(&new_pool_skip);
+  int offset = backtrack_constant_pool_offset_;
+  backtrack_constant_pool_offset_ += kPointerSize;
+  backtrack_constant_pool_capacity_--;
+  return offset;
+}
+
+
+void RegExpMacroAssemblerARM::FrameAlign(int num_arguments, Register scratch) {
+  int frameAlignment = OS::ActivationFrameAlignment();
+  // Up to four simple arguments are passed in registers r0..r3.
+  int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+  if (frameAlignment != 0) {
+    // Make stack end at alignment and make room for num_arguments - 4 words
+    // and the original value of sp.
+    __ mov(scratch, sp);
+    __ sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+    ASSERT(IsPowerOf2(frameAlignment));
+    __ and_(sp, sp, Operand(-frameAlignment));
+    __ str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+  } else {
+    __ sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+  }
+}
+
+
+void RegExpMacroAssemblerARM::CallCFunction(ExternalReference function,
+                                            int num_arguments) {
+  __ mov(r5, Operand(function));
+  // Just call directly. The function called cannot cause a GC, or
+  // allow preemption, so the return address in the link register
+  // stays correct.
+  __ Call(r5);
+  int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+  if (OS::ActivationFrameAlignment() > kIntSize) {
+    __ ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+  } else {
+    __ add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
+  }
+  __ mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
+    ExternalReference function,
+    int num_arguments) {
+  // Must pass all arguments in registers. The stub pushes on the stack.
+  ASSERT(num_arguments <= 4);
+  __ mov(r5, Operand(function));
+  RegExpCEntryStub stub;
+  __ CallStub(&stub);
+  if (OS::ActivationFrameAlignment() != 0) {
+    __ ldr(sp, MemOperand(sp, 0));
+  }
+  __ mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
+                                                            int characters) {
+  Register offset = current_input_offset();
+  if (cp_offset != 0) {
+    __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
+    offset = r0;
+  }
+  // We assume that we cannot do unaligned loads on ARM, so this function
+  // must only be used to load a single character at a time.
+  ASSERT(characters == 1);
+  if (mode_ == ASCII) {
+    __ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
+  } else {
+    ASSERT(mode_ == UC16);
+    __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
+  }
+}
+
+
+void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
+  int stack_alignment = OS::ActivationFrameAlignment();
+  if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
+  // Stack is already aligned for call, so decrement by alignment
+  // to make room for storing the link register.
+  __ str(lr, MemOperand(sp, stack_alignment, NegPreIndex));
+  __ mov(r0, sp);
+  __ Call(r5);
+  __ ldr(pc, MemOperand(sp, stack_alignment, PostIndex));
+}
+
+#undef __
+
+#endif  // V8_NATIVE_REGEXP
+
+}}  // namespace v8::internal
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
new file mode 100644
index 0000000..0711ac1
--- /dev/null
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -0,0 +1,268 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
+#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+
+#ifndef V8_NATIVE_REGEXP
+class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerARM();
+  virtual ~RegExpMacroAssemblerARM();
+};
+
+#else
+class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerARM(Mode mode, int registers_to_save);
+  virtual ~RegExpMacroAssemblerARM();
+  virtual int stack_limit_slack();
+  virtual void AdvanceCurrentPosition(int by);
+  virtual void AdvanceRegister(int reg, int by);
+  virtual void Backtrack();
+  virtual void Bind(Label* label);
+  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckCharacter(uint32_t c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(uint32_t c,
+                                      uint32_t mask,
+                                      Label* on_equal);
+  virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+  virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+  virtual void CheckCharacters(Vector<const uc16> str,
+                               int cp_offset,
+                               Label* on_failure,
+                               bool check_end_of_string);
+  // A "greedy loop" is a loop that is both greedy and with a simple
+  // body. It has a particularly simple implementation.
+  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+  virtual void CheckNotAtStart(Label* on_not_at_start);
+  virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+                                               Label* on_no_match);
+  virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(uint32_t c,
+                                         uint32_t mask,
+                                         Label* on_not_equal);
+  virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+                                              uc16 minus,
+                                              uc16 mask,
+                                              Label* on_not_equal);
+  // Checks whether the given offset from the current position is before
+  // the end of the string.
+  virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+  virtual bool CheckSpecialCharacterClass(uc16 type,
+                                          int cp_offset,
+                                          bool check_offset,
+                                          Label* on_no_match);
+  virtual void Fail();
+  virtual Handle<Object> GetCode(Handle<String> source);
+  virtual void GoTo(Label* label);
+  virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+  virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+  virtual void IfRegisterEqPos(int reg, Label* if_eq);
+  virtual IrregexpImplementation Implementation();
+  virtual void LoadCurrentCharacter(int cp_offset,
+                                    Label* on_end_of_input,
+                                    bool check_bounds = true,
+                                    int characters = 1);
+  virtual void PopCurrentPosition();
+  virtual void PopRegister(int register_index);
+  virtual void PushBacktrack(Label* label);
+  virtual void PushCurrentPosition();
+  virtual void PushRegister(int register_index,
+                            StackCheckFlag check_stack_limit);
+  virtual void ReadCurrentPositionFromRegister(int reg);
+  virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetRegister(int register_index, int to);
+  virtual void Succeed();
+  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+  virtual void ClearRegisters(int reg_from, int reg_to);
+  virtual void WriteStackPointerToRegister(int reg);
+
+  // Called from RegExp if the stack-guard is triggered.
+  // If the code object is relocated, the return address is fixed before
+  // returning.
+  static int CheckStackGuardState(Address* return_address,
+                                  Code* re_code,
+                                  Address re_frame);
+ private:
+  // Offsets from frame_pointer() of function parameters and stored registers.
+  static const int kFramePointer = 0;
+
+  // Above the frame pointer - Stored registers and stack passed parameters.
+  // Register 4..11.
+  static const int kStoredRegisters = kFramePointer;
+  // Return address (stored from link register, read into pc on return).
+  static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
+  // Stack parameters placed by caller.
+  static const int kRegisterOutput = kReturnAddress + kPointerSize;
+  static const int kAtStart = kRegisterOutput + kPointerSize;
+  static const int kStackHighEnd = kAtStart + kPointerSize;
+
+  // Below the frame pointer.
+  // Register parameters stored by setup code.
+  static const int kInputEnd = kFramePointer - kPointerSize;
+  static const int kInputStart = kInputEnd - kPointerSize;
+  static const int kStartIndex = kInputStart - kPointerSize;
+  static const int kInputString = kStartIndex - kPointerSize;
+  // When adding local variables remember to push space for them in
+  // the frame in GetCode.
+  static const int kInputStartMinusOne = kInputString - kPointerSize;
+  // First register address. Following registers are below it on the stack.
+  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+  // Initial size of code buffer.
+  static const size_t kRegExpCodeSize = 1024;
+
+  static const int kBacktrackConstantPoolSize = 4;
+
+  // Load a number of characters at the given offset from the
+  // current position, into the current-character register.
+  void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+  // Check whether preemption has been requested.
+  void CheckPreemption();
+
+  // Check whether we are exceeding the stack limit on the backtrack stack.
+  void CheckStackLimit();
+
+  void EmitBacktrackConstantPool();
+  int GetBacktrackConstantPoolEntry();
+
+
+  // Generate a call to CheckStackGuardState.
+  void CallCheckStackGuardState(Register scratch);
+
+  // The ebp-relative location of a regexp register.
+  MemOperand register_location(int register_index);
+
+  // Register holding the current input position as negative offset from
+  // the end of the string.
+  inline Register current_input_offset() { return r6; }
+
+  // The register containing the current character after LoadCurrentCharacter.
+  inline Register current_character() { return r7; }
+
+  // Register holding address of the end of the input string.
+  inline Register end_of_input_address() { return r10; }
+
+  // Register holding the frame address. Local variables, parameters and
+  // regexp registers are addressed relative to this.
+  inline Register frame_pointer() { return fp; }
+
+  // The register containing the backtrack stack top. Provides a meaningful
+  // name to the register.
+  inline Register backtrack_stackpointer() { return r8; }
+
+  // Register holding pointer to the current code object.
+  inline Register code_pointer() { return r5; }
+
+  // Byte size of chars in the string to match (decided by the Mode argument)
+  inline int char_size() { return static_cast<int>(mode_); }
+
+  // Equivalent to a conditional branch to the label, unless the label
+  // is NULL, in which case it is a conditional Backtrack.
+  void BranchOrBacktrack(Condition condition, Label* to);
+
+  // Call and return internally in the generated code in a way that
+  // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+  inline void SafeCall(Label* to, Condition cond = al);
+  inline void SafeReturn();
+  inline void SafeCallTarget(Label* name);
+
+  // Pushes the value of a register on the backtrack stack. Decrements the
+  // stack pointer by a word size and stores the register's value there.
+  inline void Push(Register source);
+
+  // Pops a value from the backtrack stack. Reads the word at the stack pointer
+  // and increments it by a word size.
+  inline void Pop(Register target);
+
+  // Before calling a C-function from generated code, align arguments on stack.
+  // After aligning the frame, non-register arguments must be stored in
+  // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
+  // are word sized.
+  // Some compilers/platforms require the stack to be aligned when calling
+  // C++ code.
+  // Needs a scratch register to do some arithmetic. This register will be
+  // trashed.
+  inline void FrameAlign(int num_arguments, Register scratch);
+
+  // Calls a C function and cleans up the space for arguments allocated
+  // by FrameAlign. The called function is not allowed to trigger a garbage
+  // collection.
+  inline void CallCFunction(ExternalReference function,
+                            int num_arguments);
+
+  // Calls a C function and cleans up the frame alignment done by
+  // by FrameAlign. The called function *is* allowed to trigger a garbage
+  // collection, but may not take more than four arguments (no arguments
+  // passed on the stack), and the first argument will be a pointer to the
+  // return address.
+  inline void CallCFunctionUsingStub(ExternalReference function,
+                                     int num_arguments);
+
+
+  MacroAssembler* masm_;
+
+  // Which mode to generate code for (ASCII or UC16).
+  Mode mode_;
+
+  // One greater than maximal register index actually used.
+  int num_registers_;
+
+  // Number of registers to output at the end (the saved registers
+  // are always 0..num_saved_registers_-1)
+  int num_saved_registers_;
+
+  // Manage a small pre-allocated pool for writing label targets
+  // to for pushing backtrack addresses.
+  int backtrack_constant_pool_offset_;
+  int backtrack_constant_pool_capacity_;
+
+  // Labels used internally.
+  Label entry_label_;
+  Label start_label_;
+  Label success_label_;
+  Label backtrack_label_;
+  Label exit_label_;
+  Label check_preempt_label_;
+  Label stack_overflow_label_;
+};
+
+
+#endif  // V8_NATIVE_REGEXP
+
+
+}}  // namespace v8::internal
+
+#endif  // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
diff --git a/src/arm/register-allocator-arm-inl.h b/src/arm/register-allocator-arm-inl.h
new file mode 100644
index 0000000..4691f29
--- /dev/null
+++ b/src/arm/register-allocator-arm-inl.h
@@ -0,0 +1,103 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
+#define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+  return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc);
+}
+
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers.  The mapping is:
+//
+// r0 <-> 0
+// r1 <-> 1
+// r2 <-> 2
+// r3 <-> 3
+// r4 <-> 4
+// r5 <-> 5
+// r6 <-> 6
+// r7 <-> 7
+// r9 <-> 8
+// r10 <-> 9
+// ip <-> 10
+// lr <-> 11
+
+int RegisterAllocator::ToNumber(Register reg) {
+  ASSERT(reg.is_valid() && !IsReserved(reg));
+  const int kNumbers[] = {
+    0,   // r0
+    1,   // r1
+    2,   // r2
+    3,   // r3
+    4,   // r4
+    5,   // r5
+    6,   // r6
+    7,   // r7
+    -1,  // cp
+    8,   // r9
+    9,   // r10
+    -1,  // fp
+    10,  // ip
+    -1,  // sp
+    11,  // lr
+    -1   // pc
+  };
+  return kNumbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+  ASSERT(num >= 0 && num < kNumRegisters);
+  const Register kRegisters[] =
+      { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
+  return kRegisters[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+  Reset();
+  // The non-reserved r1 and lr registers are live on JS function entry.
+  Use(r1);  // JS function.
+  Use(lr);  // Return address.
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
diff --git a/src/arm/register-allocator-arm.cc b/src/arm/register-allocator-arm.cc
new file mode 100644
index 0000000..ad0c7f9
--- /dev/null
+++ b/src/arm/register-allocator-arm.cc
@@ -0,0 +1,59 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+  UNIMPLEMENTED();
+}
+
+
+void Result::ToRegister(Register target) {
+  UNIMPLEMENTED();
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+  // No byte registers on ARM.
+  UNREACHABLE();
+  return Result();
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/arm/register-allocator-arm.h b/src/arm/register-allocator-arm.h
new file mode 100644
index 0000000..f953ed9
--- /dev/null
+++ b/src/arm/register-allocator-arm.h
@@ -0,0 +1,43 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+  static const int kNumRegisters = 12;
+  static const int kInvalidRegister = -1;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_REGISTER_ALLOCATOR_ARM_H_
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
new file mode 100644
index 0000000..22bec82
--- /dev/null
+++ b/src/arm/simulator-arm.cc
@@ -0,0 +1,1929 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include <cstdarg>
+#include "v8.h"
+
+#include "disasm.h"
+#include "assembler.h"
+#include "arm/constants-arm.h"
+#include "arm/simulator-arm.h"
+
+#if !defined(__arm__)
+
+// Only build the simulator if not compiling for real ARM hardware.
+namespace assembler {
+namespace arm {
+
+using ::v8::internal::Object;
+using ::v8::internal::PrintF;
+using ::v8::internal::OS;
+using ::v8::internal::ReadLine;
+using ::v8::internal::DeleteArray;
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
+// Library does not provide vsscanf.
+#define SScanF sscanf  // NOLINT
+
+// The Debugger class is used by the simulator while debugging simulated ARM
+// code.
+class Debugger {
+ public:
+  explicit Debugger(Simulator* sim);
+  ~Debugger();
+
+  void Stop(Instr* instr);
+  void Debug();
+
+ private:
+  static const instr_t kBreakpointInstr =
+      ((AL << 28) | (7 << 25) | (1 << 24) | break_point);
+  static const instr_t kNopInstr =
+      ((AL << 28) | (13 << 21));
+
+  Simulator* sim_;
+
+  int32_t GetRegisterValue(int regnum);
+  bool GetValue(const char* desc, int32_t* value);
+
+  // Set or delete a breakpoint. Returns true if successful.
+  bool SetBreakpoint(Instr* breakpc);
+  bool DeleteBreakpoint(Instr* breakpc);
+
+  // Undo and redo all breakpoints. This is needed to bracket disassembly and
+  // execution to skip past breakpoints when run from the debugger.
+  void UndoBreakpoints();
+  void RedoBreakpoints();
+};
+
+
+Debugger::Debugger(Simulator* sim) {
+  sim_ = sim;
+}
+
+
+Debugger::~Debugger() {
+}
+
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitializeCoverage() {
+  char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+  if (file_name != NULL) {
+    coverage_log = fopen(file_name, "aw+");
+  }
+}
+
+
+void Debugger::Stop(Instr* instr) {
+  char* str = reinterpret_cast<char*>(instr->InstructionBits() & 0x0fffffff);
+  if (strlen(str) > 0) {
+    if (coverage_log != NULL) {
+      fprintf(coverage_log, "%s\n", str);
+      fflush(coverage_log);
+    }
+    instr->SetInstructionBits(0xe1a00000);  // Overwrite with nop.
+  }
+  sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
+}
+
+#else  // ndef GENERATED_CODE_COVERAGE
+
+static void InitializeCoverage() {
+}
+
+
+void Debugger::Stop(Instr* instr) {
+  const char* str = (const char*)(instr->InstructionBits() & 0x0fffffff);
+  PrintF("Simulator hit %s\n", str);
+  sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
+  Debug();
+}
+#endif
+
+
+int32_t Debugger::GetRegisterValue(int regnum) {
+  if (regnum == kPCRegister) {
+    return sim_->get_pc();
+  } else {
+    return sim_->get_register(regnum);
+  }
+}
+
+
+bool Debugger::GetValue(const char* desc, int32_t* value) {
+  int regnum = Registers::Number(desc);
+  if (regnum != kNoRegister) {
+    *value = GetRegisterValue(regnum);
+    return true;
+  } else {
+    return SScanF(desc, "%i", value) == 1;
+  }
+  return false;
+}
+
+
+bool Debugger::SetBreakpoint(Instr* breakpc) {
+  // Check if a breakpoint can be set. If not return without any side-effects.
+  if (sim_->break_pc_ != NULL) {
+    return false;
+  }
+
+  // Set the breakpoint.
+  sim_->break_pc_ = breakpc;
+  sim_->break_instr_ = breakpc->InstructionBits();
+  // Not setting the breakpoint instruction in the code itself. It will be set
+  // when the debugger shell continues.
+  return true;
+}
+
+
+bool Debugger::DeleteBreakpoint(Instr* breakpc) {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+
+  sim_->break_pc_ = NULL;
+  sim_->break_instr_ = 0;
+  return true;
+}
+
+
+void Debugger::UndoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+}
+
+
+void Debugger::RedoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+  }
+}
+
+
+void Debugger::Debug() {
+  intptr_t last_pc = -1;
+  bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+  char cmd[COMMAND_SIZE + 1];
+  char arg1[ARG_SIZE + 1];
+  char arg2[ARG_SIZE + 1];
+
+  // make sure to have a proper terminating character if reaching the limit
+  cmd[COMMAND_SIZE] = 0;
+  arg1[ARG_SIZE] = 0;
+  arg2[ARG_SIZE] = 0;
+
+  // Undo all set breakpoints while running in the debugger shell. This will
+  // make them invisible to all commands.
+  UndoBreakpoints();
+
+  while (!done) {
+    if (last_pc != sim_->get_pc()) {
+      disasm::NameConverter converter;
+      disasm::Disassembler dasm(converter);
+      // use a reasonably large buffer
+      v8::internal::EmbeddedVector<char, 256> buffer;
+      dasm.InstructionDecode(buffer,
+                             reinterpret_cast<byte*>(sim_->get_pc()));
+      PrintF("  0x%08x  %s\n", sim_->get_pc(), buffer.start());
+      last_pc = sim_->get_pc();
+    }
+    char* line = ReadLine("sim> ");
+    if (line == NULL) {
+      break;
+    } else {
+      // Use sscanf to parse the individual parts of the command line. At the
+      // moment no command expects more than two parameters.
+      int args = SScanF(line,
+                        "%" XSTR(COMMAND_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s",
+                        cmd, arg1, arg2);
+      if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+        sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+      } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+        // Execute the one instruction we broke at with breakpoints disabled.
+        sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+        // Leave the debugger shell.
+        done = true;
+      } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+        if (args == 2) {
+          int32_t value;
+          if (strcmp(arg1, "all") == 0) {
+            for (int i = 0; i < kNumRegisters; i++) {
+              value = GetRegisterValue(i);
+              PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value);
+            }
+          } else {
+            if (GetValue(arg1, &value)) {
+              PrintF("%s: 0x%08x %d \n", arg1, value, value);
+            } else {
+              PrintF("%s unrecognized\n", arg1);
+            }
+          }
+        } else {
+          PrintF("print <register>\n");
+        }
+      } else if ((strcmp(cmd, "po") == 0)
+                 || (strcmp(cmd, "printobject") == 0)) {
+        if (args == 2) {
+          int32_t value;
+          if (GetValue(arg1, &value)) {
+            Object* obj = reinterpret_cast<Object*>(value);
+            PrintF("%s: \n", arg1);
+#ifdef DEBUG
+            obj->PrintLn();
+#else
+            obj->ShortPrint();
+            PrintF("\n");
+#endif
+          } else {
+            PrintF("%s unrecognized\n", arg1);
+          }
+        } else {
+          PrintF("printobject <value>\n");
+        }
+      } else if (strcmp(cmd, "disasm") == 0) {
+        disasm::NameConverter converter;
+        disasm::Disassembler dasm(converter);
+        // use a reasonably large buffer
+        v8::internal::EmbeddedVector<char, 256> buffer;
+
+        byte* cur = NULL;
+        byte* end = NULL;
+
+        if (args == 1) {
+          cur = reinterpret_cast<byte*>(sim_->get_pc());
+          end = cur + (10 * Instr::kInstrSize);
+        } else if (args == 2) {
+          int32_t value;
+          if (GetValue(arg1, &value)) {
+            cur = reinterpret_cast<byte*>(value);
+            // no length parameter passed, assume 10 instructions
+            end = cur + (10 * Instr::kInstrSize);
+          }
+        } else {
+          int32_t value1;
+          int32_t value2;
+          if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+            cur = reinterpret_cast<byte*>(value1);
+            end = cur + (value2 * Instr::kInstrSize);
+          }
+        }
+
+        while (cur < end) {
+          dasm.InstructionDecode(buffer, cur);
+          PrintF("  0x%08x  %s\n", cur, buffer.start());
+          cur += Instr::kInstrSize;
+        }
+      } else if (strcmp(cmd, "gdb") == 0) {
+        PrintF("relinquishing control to gdb\n");
+        v8::internal::OS::DebugBreak();
+        PrintF("regaining control from gdb\n");
+      } else if (strcmp(cmd, "break") == 0) {
+        if (args == 2) {
+          int32_t value;
+          if (GetValue(arg1, &value)) {
+            if (!SetBreakpoint(reinterpret_cast<Instr*>(value))) {
+              PrintF("setting breakpoint failed\n");
+            }
+          } else {
+            PrintF("%s unrecognized\n", arg1);
+          }
+        } else {
+          PrintF("break <address>\n");
+        }
+      } else if (strcmp(cmd, "del") == 0) {
+        if (!DeleteBreakpoint(NULL)) {
+          PrintF("deleting breakpoint failed\n");
+        }
+      } else if (strcmp(cmd, "flags") == 0) {
+        PrintF("N flag: %d; ", sim_->n_flag_);
+        PrintF("Z flag: %d; ", sim_->z_flag_);
+        PrintF("C flag: %d; ", sim_->c_flag_);
+        PrintF("V flag: %d\n", sim_->v_flag_);
+      } else if (strcmp(cmd, "unstop") == 0) {
+        intptr_t stop_pc = sim_->get_pc() - Instr::kInstrSize;
+        Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc);
+        if (stop_instr->ConditionField() == special_condition) {
+          stop_instr->SetInstructionBits(kNopInstr);
+        } else {
+          PrintF("Not at debugger stop.");
+        }
+      } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+        PrintF("cont\n");
+        PrintF("  continue execution (alias 'c')\n");
+        PrintF("stepi\n");
+        PrintF("  step one instruction (alias 'si')\n");
+        PrintF("print <register>\n");
+        PrintF("  print register content (alias 'p')\n");
+        PrintF("  use register name 'all' to print all registers\n");
+        PrintF("printobject <register>\n");
+        PrintF("  print an object from a register (alias 'po')\n");
+        PrintF("flags\n");
+        PrintF("  print flags\n");
+        PrintF("disasm [<instructions>]\n");
+        PrintF("disasm [[<address>] <instructions>]\n");
+        PrintF("  disassemble code, default is 10 instructions from pc\n");
+        PrintF("gdb\n");
+        PrintF("  enter gdb\n");
+        PrintF("break <address>\n");
+        PrintF("  set a break point on the address\n");
+        PrintF("del\n");
+        PrintF("  delete the breakpoint\n");
+        PrintF("unstop\n");
+        PrintF("  ignore the stop instruction at the current location");
+        PrintF(" from now on\n");
+      } else {
+        PrintF("Unknown command: %s\n", cmd);
+      }
+    }
+    DeleteArray(line);
+  }
+
+  // Add all the breakpoints back to stop execution and enter the debugger
+  // shell when hit.
+  RedoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+
+// Create one simulator per thread and keep it in thread local storage.
+static v8::internal::Thread::LocalStorageKey simulator_key;
+
+
+bool Simulator::initialized_ = false;
+
+
+void Simulator::Initialize() {
+  if (initialized_) return;
+  simulator_key = v8::internal::Thread::CreateThreadLocalKey();
+  initialized_ = true;
+  ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+}
+
+
+Simulator::Simulator() {
+  Initialize();
+  // Setup simulator support first. Some of this information is needed to
+  // setup the architecture state.
+  size_t stack_size = 1 * 1024*1024;  // allocate 1MB for stack
+  stack_ = reinterpret_cast<char*>(malloc(stack_size));
+  pc_modified_ = false;
+  icount_ = 0;
+  break_pc_ = NULL;
+  break_instr_ = 0;
+
+  // Setup architecture state.
+  // All registers are initialized to zero to start with.
+  for (int i = 0; i < num_registers; i++) {
+    registers_[i] = 0;
+  }
+  n_flag_ = false;
+  z_flag_ = false;
+  c_flag_ = false;
+  v_flag_ = false;
+
+  // The sp is initialized to point to the bottom (high address) of the
+  // allocated stack area. To be safe in potential stack underflows we leave
+  // some buffer below.
+  registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
+  // The lr and pc are initialized to a known bad value that will cause an
+  // access violation if the simulator ever tries to execute it.
+  registers_[pc] = bad_lr;
+  registers_[lr] = bad_lr;
+  InitializeCoverage();
+}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator.  The external reference will be a function compiled for the
+// host architecture.  We need to call that function instead of trying to
+// execute it with the simulator.  We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator.  We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ public:
+  Redirection(void* external_function, bool fp_return)
+      : external_function_(external_function),
+        swi_instruction_((AL << 28) | (0xf << 24) | call_rt_redirected),
+        fp_return_(fp_return),
+        next_(list_) {
+    list_ = this;
+  }
+
+  void* address_of_swi_instruction() {
+    return reinterpret_cast<void*>(&swi_instruction_);
+  }
+
+  void* external_function() { return external_function_; }
+  bool fp_return() { return fp_return_; }
+
+  static Redirection* Get(void* external_function, bool fp_return) {
+    Redirection* current;
+    for (current = list_; current != NULL; current = current->next_) {
+      if (current->external_function_ == external_function) return current;
+    }
+    return new Redirection(external_function, fp_return);
+  }
+
+  static Redirection* FromSwiInstruction(Instr* swi_instruction) {
+    char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+    char* addr_of_redirection =
+        addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+    return reinterpret_cast<Redirection*>(addr_of_redirection);
+  }
+
+ private:
+  void* external_function_;
+  uint32_t swi_instruction_;
+  bool fp_return_;
+  Redirection* next_;
+  static Redirection* list_;
+};
+
+
+Redirection* Redirection::list_ = NULL;
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+                                           bool fp_return) {
+  Redirection* redirection = Redirection::Get(external_function, fp_return);
+  return redirection->address_of_swi_instruction();
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current() {
+  Initialize();
+  Simulator* sim = reinterpret_cast<Simulator*>(
+      v8::internal::Thread::GetThreadLocal(simulator_key));
+  if (sim == NULL) {
+    // TODO(146): delete the simulator object when a thread goes away.
+    sim = new Simulator();
+    v8::internal::Thread::SetThreadLocal(simulator_key, sim);
+  }
+  return sim;
+}
+
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int32_t value) {
+  ASSERT((reg >= 0) && (reg < num_registers));
+  if (reg == pc) {
+    pc_modified_ = true;
+  }
+  registers_[reg] = value;
+}
+
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int32_t Simulator::get_register(int reg) const {
+  ASSERT((reg >= 0) && (reg < num_registers));
+  return registers_[reg] + ((reg == pc) ? Instr::kPCReadOffset : 0);
+}
+
+
+// Raw access to the PC register.
+void Simulator::set_pc(int32_t value) {
+  pc_modified_ = true;
+  registers_[pc] = value;
+}
+
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t Simulator::get_pc() const {
+  return registers_[pc];
+}
+
+
+// For use in calls that take two double values, constructed from r0, r1, r2
+// and r3.
+void Simulator::GetFpArgs(double* x, double* y) {
+  // We use a char buffer to get around the strict-aliasing rules which
+  // otherwise allow the compiler to optimize away the copy.
+  char buffer[2 * sizeof(registers_[0])];
+  // Registers 0 and 1 -> x.
+  memcpy(buffer, registers_, sizeof(buffer));
+  memcpy(x, buffer, sizeof(buffer));
+  // Registers 2 and 3 -> y.
+  memcpy(buffer, registers_ + 2, sizeof(buffer));
+  memcpy(y, buffer, sizeof(buffer));
+}
+
+
+void Simulator::SetFpResult(const double& result) {
+  char buffer[2 * sizeof(registers_[0])];
+  memcpy(buffer, &result, sizeof(buffer));
+  // result -> registers 0 and 1.
+  memcpy(registers_, buffer, sizeof(buffer));
+}
+
+
+void Simulator::TrashCallerSaveRegisters() {
+  // We don't trash the registers with the return value.
+  registers_[2] = 0x50Bad4U;
+  registers_[3] = 0x50Bad4U;
+  registers_[12] = 0x50Bad4U;
+}
+
+
+// The ARM cannot do unaligned reads and writes.  On some ARM platforms an
+// interrupt is caused.  On others it does a funky rotation thing.  For now we
+// simply disallow unaligned reads, but at some point we may want to move to
+// emulating the rotate behaviour.  Note that simulator runs have the runtime
+// system running directly on the host system and only generated code is
+// executed in the simulator.  Since the host is typically IA32 we will not
+// get the correct ARM-like behaviour on unaligned accesses.
+
+int Simulator::ReadW(int32_t addr, Instr* instr) {
+  if ((addr & 3) == 0) {
+    intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned read at 0x%08x\n", addr);
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+void Simulator::WriteW(int32_t addr, int value, Instr* instr) {
+  if ((addr & 3) == 0) {
+    intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+  UNIMPLEMENTED();
+}
+
+
+uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
+  if ((addr & 1) == 0) {
+    uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
+  if ((addr & 1) == 0) {
+    int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
+  if ((addr & 1) == 0) {
+    uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+  UNIMPLEMENTED();
+}
+
+
+void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) {
+  if ((addr & 1) == 0) {
+    int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
+  UNIMPLEMENTED();
+}
+
+
+uint8_t Simulator::ReadBU(int32_t addr) {
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  return *ptr;
+}
+
+
+int8_t Simulator::ReadB(int32_t addr) {
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  return *ptr;
+}
+
+
+void Simulator::WriteB(int32_t addr, uint8_t value) {
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  *ptr = value;
+}
+
+
+void Simulator::WriteB(int32_t addr, int8_t value) {
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  *ptr = value;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+  // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+  // pushing values.
+  return reinterpret_cast<uintptr_t>(stack_) + 256;
+}
+
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instr* instr, const char* format) {
+  PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
+         instr, format);
+  UNIMPLEMENTED();
+}
+
+
+// Checks if the current instruction should be executed based on its
+// condition bits.
+bool Simulator::ConditionallyExecute(Instr* instr) {
+  switch (instr->ConditionField()) {
+    case EQ: return z_flag_;
+    case NE: return !z_flag_;
+    case CS: return c_flag_;
+    case CC: return !c_flag_;
+    case MI: return n_flag_;
+    case PL: return !n_flag_;
+    case VS: return v_flag_;
+    case VC: return !v_flag_;
+    case HI: return c_flag_ && !z_flag_;
+    case LS: return !c_flag_ || z_flag_;
+    case GE: return n_flag_ == v_flag_;
+    case LT: return n_flag_ != v_flag_;
+    case GT: return !z_flag_ && (n_flag_ == v_flag_);
+    case LE: return z_flag_ || (n_flag_ != v_flag_);
+    case AL: return true;
+    default: UNREACHABLE();
+  }
+  return false;
+}
+
+
+// Calculate and set the Negative and Zero flags.
+void Simulator::SetNZFlags(int32_t val) {
+  n_flag_ = (val < 0);
+  z_flag_ = (val == 0);
+}
+
+
+// Set the Carry flag.
+void Simulator::SetCFlag(bool val) {
+  c_flag_ = val;
+}
+
+
+// Set the oVerflow flag.
+void Simulator::SetVFlag(bool val) {
+  v_flag_ = val;
+}
+
+
+// Calculate C flag value for additions.
+bool Simulator::CarryFrom(int32_t left, int32_t right) {
+  uint32_t uleft = static_cast<uint32_t>(left);
+  uint32_t uright = static_cast<uint32_t>(right);
+  uint32_t urest  = 0xffffffffU - uleft;
+
+  return (uright > urest);
+}
+
+
+// Calculate C flag value for subtractions.
+bool Simulator::BorrowFrom(int32_t left, int32_t right) {
+  uint32_t uleft = static_cast<uint32_t>(left);
+  uint32_t uright = static_cast<uint32_t>(right);
+
+  return (uright > uleft);
+}
+
+
+// Calculate V flag value for additions and subtractions.
+bool Simulator::OverflowFrom(int32_t alu_out,
+                             int32_t left, int32_t right, bool addition) {
+  bool overflow;
+  if (addition) {
+               // operands have the same sign
+    overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
+               // and operands and result have different sign
+               && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+  } else {
+               // operands have different signs
+    overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
+               // and first operand and result have different signs
+               && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+  }
+  return overflow;
+}
+
+
+// Addressing Mode 1 - Data-processing operands:
+// Get the value based on the shifter_operand with register.
+int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
+  Shift shift = instr->ShiftField();
+  int shift_amount = instr->ShiftAmountField();
+  int32_t result = get_register(instr->RmField());
+  if (instr->Bit(4) == 0) {
+    // by immediate
+    if ((shift == ROR) && (shift_amount == 0)) {
+      UNIMPLEMENTED();
+      return result;
+    } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
+      shift_amount = 32;
+    }
+    switch (shift) {
+      case ASR: {
+        if (shift_amount == 0) {
+          if (result < 0) {
+            result = 0xffffffff;
+            *carry_out = true;
+          } else {
+            result = 0;
+            *carry_out = false;
+          }
+        } else {
+          result >>= (shift_amount - 1);
+          *carry_out = (result & 1) == 1;
+          result >>= 1;
+        }
+        break;
+      }
+
+      case LSL: {
+        if (shift_amount == 0) {
+          *carry_out = c_flag_;
+        } else {
+          result <<= (shift_amount - 1);
+          *carry_out = (result < 0);
+          result <<= 1;
+        }
+        break;
+      }
+
+      case LSR: {
+        if (shift_amount == 0) {
+          result = 0;
+          *carry_out = c_flag_;
+        } else {
+          uint32_t uresult = static_cast<uint32_t>(result);
+          uresult >>= (shift_amount - 1);
+          *carry_out = (uresult & 1) == 1;
+          uresult >>= 1;
+          result = static_cast<int32_t>(uresult);
+        }
+        break;
+      }
+
+      case ROR: {
+        UNIMPLEMENTED();
+        break;
+      }
+
+      default: {
+        UNREACHABLE();
+        break;
+      }
+    }
+  } else {
+    // by register
+    int rs = instr->RsField();
+    shift_amount = get_register(rs) &0xff;
+    switch (shift) {
+      case ASR: {
+        if (shift_amount == 0) {
+          *carry_out = c_flag_;
+        } else if (shift_amount < 32) {
+          result >>= (shift_amount - 1);
+          *carry_out = (result & 1) == 1;
+          result >>= 1;
+        } else {
+          ASSERT(shift_amount >= 32);
+          if (result < 0) {
+            *carry_out = true;
+            result = 0xffffffff;
+          } else {
+            *carry_out = false;
+            result = 0;
+          }
+        }
+        break;
+      }
+
+      case LSL: {
+        if (shift_amount == 0) {
+          *carry_out = c_flag_;
+        } else if (shift_amount < 32) {
+          result <<= (shift_amount - 1);
+          *carry_out = (result < 0);
+          result <<= 1;
+        } else if (shift_amount == 32) {
+          *carry_out = (result & 1) == 1;
+          result = 0;
+        } else {
+          ASSERT(shift_amount > 32);
+          *carry_out = false;
+          result = 0;
+        }
+        break;
+      }
+
+      case LSR: {
+        if (shift_amount == 0) {
+          *carry_out = c_flag_;
+        } else if (shift_amount < 32) {
+          uint32_t uresult = static_cast<uint32_t>(result);
+          uresult >>= (shift_amount - 1);
+          *carry_out = (uresult & 1) == 1;
+          uresult >>= 1;
+          result = static_cast<int32_t>(uresult);
+        } else if (shift_amount == 32) {
+          *carry_out = (result < 0);
+          result = 0;
+        } else {
+          *carry_out = false;
+          result = 0;
+        }
+        break;
+      }
+
+      case ROR: {
+        UNIMPLEMENTED();
+        break;
+      }
+
+      default: {
+        UNREACHABLE();
+        break;
+      }
+    }
+  }
+  return result;
+}
+
+
+// Addressing Mode 1 - Data-processing operands:
+// Get the value based on the shifter_operand with immediate.
+int32_t Simulator::GetImm(Instr* instr, bool* carry_out) {
+  int rotate = instr->RotateField() * 2;
+  int immed8 = instr->Immed8Field();
+  int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
+  *carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
+  return imm;
+}
+
+
+static int count_bits(int bit_vector) {
+  int count = 0;
+  while (bit_vector != 0) {
+    if ((bit_vector & 1) != 0) {
+      count++;
+    }
+    bit_vector >>= 1;
+  }
+  return count;
+}
+
+
+// Addressing Mode 4 - Load and Store Multiple
+void Simulator::HandleRList(Instr* instr, bool load) {
+  int rn = instr->RnField();
+  int32_t rn_val = get_register(rn);
+  int rlist = instr->RlistField();
+  int num_regs = count_bits(rlist);
+
+  intptr_t start_address = 0;
+  intptr_t end_address = 0;
+  switch (instr->PUField()) {
+    case 0: {
+      // Print("da");
+      UNIMPLEMENTED();
+      break;
+    }
+    case 1: {
+      // Print("ia");
+      start_address = rn_val;
+      end_address = rn_val + (num_regs * 4) - 4;
+      rn_val = rn_val + (num_regs * 4);
+      break;
+    }
+    case 2: {
+      // Print("db");
+      start_address = rn_val - (num_regs * 4);
+      end_address = rn_val - 4;
+      rn_val = start_address;
+      break;
+    }
+    case 3: {
+      // Print("ib");
+      UNIMPLEMENTED();
+      break;
+    }
+    default: {
+      UNREACHABLE();
+      break;
+    }
+  }
+  if (instr->HasW()) {
+    set_register(rn, rn_val);
+  }
+  intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+  int reg = 0;
+  while (rlist != 0) {
+    if ((rlist & 1) != 0) {
+      if (load) {
+        set_register(reg, *address);
+      } else {
+        *address = get_register(reg);
+      }
+      address += 1;
+    }
+    reg++;
+    rlist >>= 1;
+  }
+  ASSERT(end_address == ((intptr_t)address) - 4);
+}
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the r1 result register contains a bogus
+// value, which is fine because it is caller-saved.
+typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
+                                        int32_t arg1,
+                                        int32_t arg2,
+                                        int32_t arg3);
+typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
+                                         int32_t arg1,
+                                         int32_t arg2,
+                                         int32_t arg3);
+
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime.
+void Simulator::SoftwareInterrupt(Instr* instr) {
+  int swi = instr->SwiField();
+  switch (swi) {
+    case call_rt_redirected: {
+      Redirection* redirection = Redirection::FromSwiInstruction(instr);
+      int32_t arg0 = get_register(r0);
+      int32_t arg1 = get_register(r1);
+      int32_t arg2 = get_register(r2);
+      int32_t arg3 = get_register(r3);
+      // This is dodgy but it works because the C entry stubs are never moved.
+      // See comment in codegen-arm.cc and bug 1242173.
+      int32_t saved_lr = get_register(lr);
+      if (redirection->fp_return()) {
+        intptr_t external =
+            reinterpret_cast<intptr_t>(redirection->external_function());
+        SimulatorRuntimeFPCall target =
+            reinterpret_cast<SimulatorRuntimeFPCall>(external);
+        if (::v8::internal::FLAG_trace_sim) {
+          double x, y;
+          GetFpArgs(&x, &y);
+          PrintF("Call to host function at %p with args %f, %f\n",
+                 FUNCTION_ADDR(target), x, y);
+        }
+        double result = target(arg0, arg1, arg2, arg3);
+        SetFpResult(result);
+      } else {
+        intptr_t external =
+            reinterpret_cast<int32_t>(redirection->external_function());
+        SimulatorRuntimeCall target =
+            reinterpret_cast<SimulatorRuntimeCall>(external);
+        if (::v8::internal::FLAG_trace_sim) {
+          PrintF(
+              "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
+              FUNCTION_ADDR(target),
+              arg0,
+              arg1,
+              arg2,
+              arg3);
+        }
+        int64_t result = target(arg0, arg1, arg2, arg3);
+        int32_t lo_res = static_cast<int32_t>(result);
+        int32_t hi_res = static_cast<int32_t>(result >> 32);
+        if (::v8::internal::FLAG_trace_sim) {
+          PrintF("Returned %08x\n", lo_res);
+        }
+        set_register(r0, lo_res);
+        set_register(r1, hi_res);
+      }
+      set_register(lr, saved_lr);
+      set_pc(get_register(lr));
+      break;
+    }
+    case break_point: {
+      Debugger dbg(this);
+      dbg.Debug();
+      break;
+    }
+    default: {
+      UNREACHABLE();
+      break;
+    }
+  }
+}
+
+
+// Handle execution based on instruction types.
+
+// Instruction types 0 and 1 are both rolled into one function because they
+// only differ in the handling of the shifter_operand.
+void Simulator::DecodeType01(Instr* instr) {
+  int type = instr->TypeField();
+  if ((type == 0) && instr->IsSpecialType0()) {
+    // multiply instruction or extra loads and stores
+    if (instr->Bits(7, 4) == 9) {
+      if (instr->Bit(24) == 0) {
+        // Raw field decoding here. Multiply instructions have their Rd in
+        // funny places.
+        int rn = instr->RnField();
+        int rm = instr->RmField();
+        int rs = instr->RsField();
+        int32_t rs_val = get_register(rs);
+        int32_t rm_val = get_register(rm);
+        if (instr->Bit(23) == 0) {
+          if (instr->Bit(21) == 0) {
+            // The MUL instruction description (A 4.1.33) refers to Rd as being
+            // the destination for the operation, but it confusingly uses the
+            // Rn field to encode it.
+            // Format(instr, "mul'cond's 'rn, 'rm, 'rs");
+            int rd = rn;  // Remap the rn field to the Rd register.
+            int32_t alu_out = rm_val * rs_val;
+            set_register(rd, alu_out);
+            if (instr->HasS()) {
+              SetNZFlags(alu_out);
+            }
+          } else {
+            // The MLA instruction description (A 4.1.28) refers to the order
+            // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+            // Rn field to encode the Rd register and the Rd field to encode
+            // the Rn register.
+            Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+          }
+        } else {
+          // The signed/long multiply instructions use the terms RdHi and RdLo
+          // when referring to the target registers. They are mapped to the Rn
+          // and Rd fields as follows:
+          // RdLo == Rd
+          // RdHi == Rn (This is confusingly stored in variable rd here
+          //             because the mul instruction from above uses the
+          //             Rn field to encode the Rd register. Good luck figuring
+          //             this out without reading the ARM instruction manual
+          //             at a very detailed level.)
+          // Format(instr, "'um'al'cond's 'rd, 'rn, 'rs, 'rm");
+          int rd_hi = rn;  // Remap the rn field to the RdHi register.
+          int rd_lo = instr->RdField();
+          int32_t hi_res = 0;
+          int32_t lo_res = 0;
+          if (instr->Bit(22) == 1) {
+            int64_t left_op  = static_cast<int32_t>(rm_val);
+            int64_t right_op = static_cast<int32_t>(rs_val);
+            uint64_t result = left_op * right_op;
+            hi_res = static_cast<int32_t>(result >> 32);
+            lo_res = static_cast<int32_t>(result & 0xffffffff);
+          } else {
+            // unsigned multiply
+            uint64_t left_op  = static_cast<uint32_t>(rm_val);
+            uint64_t right_op = static_cast<uint32_t>(rs_val);
+            uint64_t result = left_op * right_op;
+            hi_res = static_cast<int32_t>(result >> 32);
+            lo_res = static_cast<int32_t>(result & 0xffffffff);
+          }
+          set_register(rd_lo, lo_res);
+          set_register(rd_hi, hi_res);
+          if (instr->HasS()) {
+            UNIMPLEMENTED();
+          }
+        }
+      } else {
+        UNIMPLEMENTED();  // not used by V8
+      }
+    } else {
+      // extra load/store instructions
+      int rd = instr->RdField();
+      int rn = instr->RnField();
+      int32_t rn_val = get_register(rn);
+      int32_t addr = 0;
+      if (instr->Bit(22) == 0) {
+        int rm = instr->RmField();
+        int32_t rm_val = get_register(rm);
+        switch (instr->PUField()) {
+          case 0: {
+            // Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
+            ASSERT(!instr->HasW());
+            addr = rn_val;
+            rn_val -= rm_val;
+            set_register(rn, rn_val);
+            break;
+          }
+          case 1: {
+            // Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
+            ASSERT(!instr->HasW());
+            addr = rn_val;
+            rn_val += rm_val;
+            set_register(rn, rn_val);
+            break;
+          }
+          case 2: {
+            // Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
+            rn_val -= rm_val;
+            addr = rn_val;
+            if (instr->HasW()) {
+              set_register(rn, rn_val);
+            }
+            break;
+          }
+          case 3: {
+            // Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
+            rn_val += rm_val;
+            addr = rn_val;
+            if (instr->HasW()) {
+              set_register(rn, rn_val);
+            }
+            break;
+          }
+          default: {
+            // The PU field is a 2-bit field.
+            UNREACHABLE();
+            break;
+          }
+        }
+      } else {
+        int32_t imm_val = (instr->ImmedHField() << 4) | instr->ImmedLField();
+        switch (instr->PUField()) {
+          case 0: {
+            // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
+            ASSERT(!instr->HasW());
+            addr = rn_val;
+            rn_val -= imm_val;
+            set_register(rn, rn_val);
+            break;
+          }
+          case 1: {
+            // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
+            ASSERT(!instr->HasW());
+            addr = rn_val;
+            rn_val += imm_val;
+            set_register(rn, rn_val);
+            break;
+          }
+          case 2: {
+            // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
+            rn_val -= imm_val;
+            addr = rn_val;
+            if (instr->HasW()) {
+              set_register(rn, rn_val);
+            }
+            break;
+          }
+          case 3: {
+            // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
+            rn_val += imm_val;
+            addr = rn_val;
+            if (instr->HasW()) {
+              set_register(rn, rn_val);
+            }
+            break;
+          }
+          default: {
+            // The PU field is a 2-bit field.
+            UNREACHABLE();
+            break;
+          }
+        }
+      }
+      if (instr->HasH()) {
+        if (instr->HasSign()) {
+          if (instr->HasL()) {
+            int16_t val = ReadH(addr, instr);
+            set_register(rd, val);
+          } else {
+            int16_t val = get_register(rd);
+            WriteH(addr, val, instr);
+          }
+        } else {
+          if (instr->HasL()) {
+            uint16_t val = ReadHU(addr, instr);
+            set_register(rd, val);
+          } else {
+            uint16_t val = get_register(rd);
+            WriteH(addr, val, instr);
+          }
+        }
+      } else {
+        // signed byte loads
+        ASSERT(instr->HasSign());
+        ASSERT(instr->HasL());
+        int8_t val = ReadB(addr);
+        set_register(rd, val);
+      }
+      return;
+    }
+  } else {
+    int rd = instr->RdField();
+    int rn = instr->RnField();
+    int32_t rn_val = get_register(rn);
+    int32_t shifter_operand = 0;
+    bool shifter_carry_out = 0;
+    if (type == 0) {
+      shifter_operand = GetShiftRm(instr, &shifter_carry_out);
+    } else {
+      ASSERT(instr->TypeField() == 1);
+      shifter_operand = GetImm(instr, &shifter_carry_out);
+    }
+    int32_t alu_out;
+
+    switch (instr->OpcodeField()) {
+      case AND: {
+        // Format(instr, "and'cond's 'rd, 'rn, 'shift_rm");
+        // Format(instr, "and'cond's 'rd, 'rn, 'imm");
+        alu_out = rn_val & shifter_operand;
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(shifter_carry_out);
+        }
+        break;
+      }
+
+      case EOR: {
+        // Format(instr, "eor'cond's 'rd, 'rn, 'shift_rm");
+        // Format(instr, "eor'cond's 'rd, 'rn, 'imm");
+        alu_out = rn_val ^ shifter_operand;
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(shifter_carry_out);
+        }
+        break;
+      }
+
+      case SUB: {
+        // Format(instr, "sub'cond's 'rd, 'rn, 'shift_rm");
+        // Format(instr, "sub'cond's 'rd, 'rn, 'imm");
+        alu_out = rn_val - shifter_operand;
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(!BorrowFrom(rn_val, shifter_operand));
+          SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
+        }
+        break;
+      }
+
+      case RSB: {
+        // Format(instr, "rsb'cond's 'rd, 'rn, 'shift_rm");
+        // Format(instr, "rsb'cond's 'rd, 'rn, 'imm");
+        alu_out = shifter_operand - rn_val;
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(!BorrowFrom(shifter_operand, rn_val));
+          SetVFlag(OverflowFrom(alu_out, shifter_operand, rn_val, false));
+        }
+        break;
+      }
+
+      case ADD: {
+        // Format(instr, "add'cond's 'rd, 'rn, 'shift_rm");
+        // Format(instr, "add'cond's 'rd, 'rn, 'imm");
+        alu_out = rn_val + shifter_operand;
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(CarryFrom(rn_val, shifter_operand));
+          SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
+        }
+        break;
+      }
+
+      case ADC: {
+        Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
+        Format(instr, "adc'cond's 'rd, 'rn, 'imm");
+        break;
+      }
+
+      case SBC: {
+        Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
+        Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
+        break;
+      }
+
+      case RSC: {
+        Format(instr, "rsc'cond's 'rd, 'rn, 'shift_rm");
+        Format(instr, "rsc'cond's 'rd, 'rn, 'imm");
+        break;
+      }
+
+      case TST: {
+        if (instr->HasS()) {
+          // Format(instr, "tst'cond 'rn, 'shift_rm");
+          // Format(instr, "tst'cond 'rn, 'imm");
+          alu_out = rn_val & shifter_operand;
+          SetNZFlags(alu_out);
+          SetCFlag(shifter_carry_out);
+        } else {
+          UNIMPLEMENTED();
+        }
+        break;
+      }
+
+      case TEQ: {
+        if (instr->HasS()) {
+          // Format(instr, "teq'cond 'rn, 'shift_rm");
+          // Format(instr, "teq'cond 'rn, 'imm");
+          alu_out = rn_val ^ shifter_operand;
+          SetNZFlags(alu_out);
+          SetCFlag(shifter_carry_out);
+        } else {
+          ASSERT(type == 0);
+          int rm = instr->RmField();
+          switch (instr->Bits(7, 4)) {
+            case BX:
+              set_pc(get_register(rm));
+              break;
+            case BLX: {
+              uint32_t old_pc = get_pc();
+              set_pc(get_register(rm));
+              set_register(lr, old_pc + Instr::kInstrSize);
+              break;
+            }
+            default:
+              UNIMPLEMENTED();
+          }
+        }
+        break;
+      }
+
+      case CMP: {
+        if (instr->HasS()) {
+          // Format(instr, "cmp'cond 'rn, 'shift_rm");
+          // Format(instr, "cmp'cond 'rn, 'imm");
+          alu_out = rn_val - shifter_operand;
+          SetNZFlags(alu_out);
+          SetCFlag(!BorrowFrom(rn_val, shifter_operand));
+          SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
+        } else {
+          UNIMPLEMENTED();
+        }
+        break;
+      }
+
+      case CMN: {
+        if (instr->HasS()) {
+          // Format(instr, "cmn'cond 'rn, 'shift_rm");
+          // Format(instr, "cmn'cond 'rn, 'imm");
+          alu_out = rn_val + shifter_operand;
+          SetNZFlags(alu_out);
+          SetCFlag(!CarryFrom(rn_val, shifter_operand));
+          SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
+        } else {
+          ASSERT(type == 0);
+          int rm = instr->RmField();
+          int rd = instr->RdField();
+          switch (instr->Bits(7, 4)) {
+            case CLZ: {
+              uint32_t bits = get_register(rm);
+              int leading_zeros = 0;
+              if (bits == 0) {
+                leading_zeros = 32;
+              } else {
+                while ((bits & 0x80000000u) == 0) {
+                  bits <<= 1;
+                  leading_zeros++;
+                }
+              }
+              set_register(rd, leading_zeros);
+              break;
+            }
+            default:
+              UNIMPLEMENTED();
+          }
+        }
+        break;
+      }
+
+      case ORR: {
+        // Format(instr, "orr'cond's 'rd, 'rn, 'shift_rm");
+        // Format(instr, "orr'cond's 'rd, 'rn, 'imm");
+        alu_out = rn_val | shifter_operand;
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(shifter_carry_out);
+        }
+        break;
+      }
+
+      case MOV: {
+        // Format(instr, "mov'cond's 'rd, 'shift_rm");
+        // Format(instr, "mov'cond's 'rd, 'imm");
+        alu_out = shifter_operand;
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(shifter_carry_out);
+        }
+        break;
+      }
+
+      case BIC: {
+        // Format(instr, "bic'cond's 'rd, 'rn, 'shift_rm");
+        // Format(instr, "bic'cond's 'rd, 'rn, 'imm");
+        alu_out = rn_val & ~shifter_operand;
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(shifter_carry_out);
+        }
+        break;
+      }
+
+      case MVN: {
+        // Format(instr, "mvn'cond's 'rd, 'shift_rm");
+        // Format(instr, "mvn'cond's 'rd, 'imm");
+        alu_out = ~shifter_operand;
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(shifter_carry_out);
+        }
+        break;
+      }
+
+      default: {
+        UNREACHABLE();
+        break;
+      }
+    }
+  }
+}
+
+
+void Simulator::DecodeType2(Instr* instr) {
+  int rd = instr->RdField();
+  int rn = instr->RnField();
+  int32_t rn_val = get_register(rn);
+  int32_t im_val = instr->Offset12Field();
+  int32_t addr = 0;
+  switch (instr->PUField()) {
+    case 0: {
+      // Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
+      ASSERT(!instr->HasW());
+      addr = rn_val;
+      rn_val -= im_val;
+      set_register(rn, rn_val);
+      break;
+    }
+    case 1: {
+      // Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
+      ASSERT(!instr->HasW());
+      addr = rn_val;
+      rn_val += im_val;
+      set_register(rn, rn_val);
+      break;
+    }
+    case 2: {
+      // Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
+      rn_val -= im_val;
+      addr = rn_val;
+      if (instr->HasW()) {
+        set_register(rn, rn_val);
+      }
+      break;
+    }
+    case 3: {
+      // Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
+      rn_val += im_val;
+      addr = rn_val;
+      if (instr->HasW()) {
+        set_register(rn, rn_val);
+      }
+      break;
+    }
+    default: {
+      UNREACHABLE();
+      break;
+    }
+  }
+  if (instr->HasB()) {
+    if (instr->HasL()) {
+      byte val = ReadBU(addr);
+      set_register(rd, val);
+    } else {
+      byte val = get_register(rd);
+      WriteB(addr, val);
+    }
+  } else {
+    if (instr->HasL()) {
+      set_register(rd, ReadW(addr, instr));
+    } else {
+      WriteW(addr, get_register(rd), instr);
+    }
+  }
+}
+
+
+void Simulator::DecodeType3(Instr* instr) {
+  ASSERT(instr->Bit(4) == 0);
+  int rd = instr->RdField();
+  int rn = instr->RnField();
+  int32_t rn_val = get_register(rn);
+  bool shifter_carry_out = 0;
+  int32_t shifter_operand = GetShiftRm(instr, &shifter_carry_out);
+  int32_t addr = 0;
+  switch (instr->PUField()) {
+    case 0: {
+      ASSERT(!instr->HasW());
+      Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
+      break;
+    }
+    case 1: {
+      ASSERT(!instr->HasW());
+      Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+      break;
+    }
+    case 2: {
+      // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
+      addr = rn_val - shifter_operand;
+      if (instr->HasW()) {
+        set_register(rn, addr);
+      }
+      break;
+    }
+    case 3: {
+      // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+      addr = rn_val + shifter_operand;
+      if (instr->HasW()) {
+        set_register(rn, addr);
+      }
+      break;
+    }
+    default: {
+      UNREACHABLE();
+      break;
+    }
+  }
+  if (instr->HasB()) {
+    if (instr->HasL()) {
+      uint8_t byte = ReadB(addr);
+      set_register(rd, byte);
+    } else {
+      UNIMPLEMENTED();
+    }
+  } else {
+    if (instr->HasL()) {
+      set_register(rd, ReadW(addr, instr));
+    } else {
+      WriteW(addr, get_register(rd), instr);
+    }
+  }
+}
+
+
+void Simulator::DecodeType4(Instr* instr) {
+  ASSERT(instr->Bit(22) == 0);  // only allowed to be set in privileged mode
+  if (instr->HasL()) {
+    // Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
+    HandleRList(instr, true);
+  } else {
+    // Format(instr, "stm'cond'pu 'rn'w, 'rlist");
+    HandleRList(instr, false);
+  }
+}
+
+
+void Simulator::DecodeType5(Instr* instr) {
+  // Format(instr, "b'l'cond 'target");
+  int off = (instr->SImmed24Field() << 2);
+  intptr_t pc_address = get_pc();
+  if (instr->HasLink()) {
+    set_register(lr, pc_address + Instr::kInstrSize);
+  }
+  int pc_reg = get_register(pc);
+  set_pc(pc_reg + off);
+}
+
+
+void Simulator::DecodeType6(Instr* instr) {
+  UNIMPLEMENTED();
+}
+
+
+void Simulator::DecodeType7(Instr* instr) {
+  if (instr->Bit(24) == 1) {
+    // Format(instr, "swi 'swi");
+    SoftwareInterrupt(instr);
+  } else {
+    UNIMPLEMENTED();
+  }
+}
+
+
+void Simulator::DecodeUnconditional(Instr* instr) {
+  if (instr->Bits(7, 4) == 0x0B && instr->Bits(27, 25) == 0 && instr->HasL()) {
+    // Load halfword instruction, either register or immediate offset.
+    int rd = instr->RdField();
+    int rn = instr->RnField();
+    int32_t rn_val = get_register(rn);
+    int32_t addr = 0;
+    int32_t offset;
+    if (instr->Bit(22) == 0) {
+      // Register offset.
+      int rm = instr->RmField();
+      offset = get_register(rm);
+    } else {
+      // Immediate offset
+      offset = instr->Bits(3, 0) + (instr->Bits(11, 8) << 4);
+    }
+    switch (instr->PUField()) {
+      case 0: {
+        // Post index, negative.
+        ASSERT(!instr->HasW());
+        addr = rn_val;
+        rn_val -= offset;
+        set_register(rn, rn_val);
+        break;
+      }
+      case 1: {
+        // Post index, positive.
+        ASSERT(!instr->HasW());
+        addr = rn_val;
+        rn_val += offset;
+        set_register(rn, rn_val);
+        break;
+      }
+      case 2: {
+        // Pre index or offset, negative.
+        rn_val -= offset;
+        addr = rn_val;
+        if (instr->HasW()) {
+          set_register(rn, rn_val);
+        }
+        break;
+      }
+      case 3: {
+        // Pre index or offset, positive.
+        rn_val += offset;
+        addr = rn_val;
+        if (instr->HasW()) {
+          set_register(rn, rn_val);
+        }
+        break;
+      }
+      default: {
+        // The PU field is a 2-bit field.
+        UNREACHABLE();
+        break;
+      }
+    }
+    // Not sign extending, so load as unsigned.
+    uint16_t halfword = ReadH(addr, instr);
+    set_register(rd, halfword);
+  } else {
+    Debugger dbg(this);
+    dbg.Stop(instr);
+  }
+}
+
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instr* instr) {
+  pc_modified_ = false;
+  if (::v8::internal::FLAG_trace_sim) {
+    disasm::NameConverter converter;
+    disasm::Disassembler dasm(converter);
+    // use a reasonably large buffer
+    v8::internal::EmbeddedVector<char, 256> buffer;
+    dasm.InstructionDecode(buffer,
+                           reinterpret_cast<byte*>(instr));
+    PrintF("  0x%08x  %s\n", instr, buffer.start());
+  }
+  if (instr->ConditionField() == special_condition) {
+    DecodeUnconditional(instr);
+  } else if (ConditionallyExecute(instr)) {
+    switch (instr->TypeField()) {
+      case 0:
+      case 1: {
+        DecodeType01(instr);
+        break;
+      }
+      case 2: {
+        DecodeType2(instr);
+        break;
+      }
+      case 3: {
+        DecodeType3(instr);
+        break;
+      }
+      case 4: {
+        DecodeType4(instr);
+        break;
+      }
+      case 5: {
+        DecodeType5(instr);
+        break;
+      }
+      case 6: {
+        DecodeType6(instr);
+        break;
+      }
+      case 7: {
+        DecodeType7(instr);
+        break;
+      }
+      default: {
+        UNIMPLEMENTED();
+        break;
+      }
+    }
+  }
+  if (!pc_modified_) {
+    set_register(pc, reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+  }
+}
+
+
+//
+void Simulator::Execute() {
+  // Get the PC to simulate. Cannot use the accessor here as we need the
+  // raw PC value and not the one used as input to arithmetic instructions.
+  int program_counter = get_pc();
+
+  if (::v8::internal::FLAG_stop_sim_at == 0) {
+    // Fast version of the dispatch loop without checking whether the simulator
+    // should be stopping at a particular executed instruction.
+    while (program_counter != end_sim_pc) {
+      Instr* instr = reinterpret_cast<Instr*>(program_counter);
+      icount_++;
+      InstructionDecode(instr);
+      program_counter = get_pc();
+    }
+  } else {
+    // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+    // we reach the particular instuction count.
+    while (program_counter != end_sim_pc) {
+      Instr* instr = reinterpret_cast<Instr*>(program_counter);
+      icount_++;
+      if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
+        Debugger dbg(this);
+        dbg.Debug();
+      } else {
+        InstructionDecode(instr);
+      }
+      program_counter = get_pc();
+    }
+  }
+}
+
+
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
+  va_list parameters;
+  va_start(parameters, argument_count);
+  // Setup arguments
+
+  // First four arguments passed in registers.
+  ASSERT(argument_count >= 4);
+  set_register(r0, va_arg(parameters, int32_t));
+  set_register(r1, va_arg(parameters, int32_t));
+  set_register(r2, va_arg(parameters, int32_t));
+  set_register(r3, va_arg(parameters, int32_t));
+
+  // Remaining arguments passed on stack.
+  int original_stack = get_register(sp);
+  // Compute position of stack on entry to generated code.
+  int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
+  if (OS::ActivationFrameAlignment() != 0) {
+    entry_stack &= -OS::ActivationFrameAlignment();
+  }
+  // Store remaining arguments on stack, from low to high memory.
+  intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+  for (int i = 4; i < argument_count; i++) {
+    stack_argument[i - 4] = va_arg(parameters, int32_t);
+  }
+  va_end(parameters);
+  set_register(sp, entry_stack);
+
+  // Prepare to execute the code at entry
+  set_register(pc, reinterpret_cast<int32_t>(entry));
+  // Put down marker for end of simulation. The simulator will stop simulation
+  // when the PC reaches this value. By saving the "end simulation" value into
+  // the LR the simulation stops when returning to this call point.
+  set_register(lr, end_sim_pc);
+
+  // Remember the values of callee-saved registers.
+  // The code below assumes that r9 is not used as sb (static base) in
+  // simulator code and therefore is regarded as a callee-saved register.
+  int32_t r4_val = get_register(r4);
+  int32_t r5_val = get_register(r5);
+  int32_t r6_val = get_register(r6);
+  int32_t r7_val = get_register(r7);
+  int32_t r8_val = get_register(r8);
+  int32_t r9_val = get_register(r9);
+  int32_t r10_val = get_register(r10);
+  int32_t r11_val = get_register(r11);
+
+  // Setup the callee-saved registers with a known value. To be able to check
+  // that they are preserved properly across JS execution.
+  int32_t callee_saved_value = icount_;
+  set_register(r4, callee_saved_value);
+  set_register(r5, callee_saved_value);
+  set_register(r6, callee_saved_value);
+  set_register(r7, callee_saved_value);
+  set_register(r8, callee_saved_value);
+  set_register(r9, callee_saved_value);
+  set_register(r10, callee_saved_value);
+  set_register(r11, callee_saved_value);
+
+  // Start the simulation
+  Execute();
+
+  // Check that the callee-saved registers have been preserved.
+  CHECK_EQ(callee_saved_value, get_register(r4));
+  CHECK_EQ(callee_saved_value, get_register(r5));
+  CHECK_EQ(callee_saved_value, get_register(r6));
+  CHECK_EQ(callee_saved_value, get_register(r7));
+  CHECK_EQ(callee_saved_value, get_register(r8));
+  CHECK_EQ(callee_saved_value, get_register(r9));
+  CHECK_EQ(callee_saved_value, get_register(r10));
+  CHECK_EQ(callee_saved_value, get_register(r11));
+
+  // Restore callee-saved registers with the original value.
+  set_register(r4, r4_val);
+  set_register(r5, r5_val);
+  set_register(r6, r6_val);
+  set_register(r7, r7_val);
+  set_register(r8, r8_val);
+  set_register(r9, r9_val);
+  set_register(r10, r10_val);
+  set_register(r11, r11_val);
+
+  // Pop stack passed arguments.
+  CHECK_EQ(entry_stack, get_register(sp));
+  set_register(sp, original_stack);
+
+  int32_t result = get_register(r0);
+  return result;
+}
+
+} }  // namespace assembler::arm
+
+#endif  // !defined(__arm__)
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
new file mode 100644
index 0000000..ff6bbf4
--- /dev/null
+++ b/src/arm/simulator-arm.h
@@ -0,0 +1,237 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Declares a Simulator for ARM instructions if we are not generating a native
+// ARM binary. This Simulator allows us to run and debug ARM code generation on
+// regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a ARM HW platform.
+
+#ifndef V8_ARM_SIMULATOR_ARM_H_
+#define V8_ARM_SIMULATOR_ARM_H_
+
+#include "allocation.h"
+
+#if defined(__arm__)
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  (entry(p0, p1, p2, p3, p4))
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on arm uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return c_limit;
+  }
+};
+
+
+// Call the generated regexp code directly. The entry function pointer should
+// expect seven int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  entry(p0, p1, p2, p3, p4, p5, p6)
+
+#else  // defined(__arm__)
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  reinterpret_cast<Object*>( \
+      assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
+                                                 p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  assembler::arm::Simulator::current()->Call( \
+    FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+
+#include "constants-arm.h"
+
+
+namespace assembler {
+namespace arm {
+
+class Simulator {
+ public:
+  friend class Debugger;
+
+  enum Register {
+    no_reg = -1,
+    r0 = 0, r1, r2, r3, r4, r5, r6, r7,
+    r8, r9, r10, r11, r12, r13, r14, r15,
+    num_registers,
+    sp = 13,
+    lr = 14,
+    pc = 15
+  };
+
+  Simulator();
+  ~Simulator();
+
+  // The currently executing Simulator instance. Potentially there can be one
+  // for each native thread.
+  static Simulator* current();
+
+  // Accessors for register state. Reading the pc value adheres to the ARM
+  // architecture specification and is off by a 8 from the currently executing
+  // instruction.
+  void set_register(int reg, int32_t value);
+  int32_t get_register(int reg) const;
+
+  // Special case of set_register and get_register to access the raw PC value.
+  void set_pc(int32_t value);
+  int32_t get_pc() const;
+
+  // Accessor to the internal simulator stack area.
+  uintptr_t StackLimit() const;
+
+  // Executes ARM instructions until the PC reaches end_sim_pc.
+  void Execute();
+
+  // Call on program start.
+  static void Initialize();
+
+  // V8 generally calls into generated JS code with 5 parameters and into
+  // generated RegExp code with 7 parameters. This is a convenience function,
+  // which sets up the simulator state and grabs the result on return.
+  int32_t Call(byte* entry, int argument_count, ...);
+
+ private:
+  enum special_values {
+    // Known bad pc value to ensure that the simulator does not execute
+    // without being properly setup.
+    bad_lr = -1,
+    // A pc value used to signal the simulator to stop execution.  Generally
+    // the lr is set to this value on transition from native C code to
+    // simulated execution, so that the simulator can "return" to the native
+    // C code.
+    end_sim_pc = -2
+  };
+
+  // Unsupported instructions use Format to print an error and stop execution.
+  void Format(Instr* instr, const char* format);
+
+  // Checks if the current instruction should be executed based on its
+  // condition bits.
+  bool ConditionallyExecute(Instr* instr);
+
+  // Helper functions to set the conditional flags in the architecture state.
+  void SetNZFlags(int32_t val);
+  void SetCFlag(bool val);
+  void SetVFlag(bool val);
+  bool CarryFrom(int32_t left, int32_t right);
+  bool BorrowFrom(int32_t left, int32_t right);
+  bool OverflowFrom(int32_t alu_out,
+                    int32_t left,
+                    int32_t right,
+                    bool addition);
+
+  // Helper functions to decode common "addressing" modes
+  int32_t GetShiftRm(Instr* instr, bool* carry_out);
+  int32_t GetImm(Instr* instr, bool* carry_out);
+  void HandleRList(Instr* instr, bool load);
+  void SoftwareInterrupt(Instr* instr);
+
+  // Read and write memory.
+  inline uint8_t ReadBU(int32_t addr);
+  inline int8_t ReadB(int32_t addr);
+  inline void WriteB(int32_t addr, uint8_t value);
+  inline void WriteB(int32_t addr, int8_t value);
+
+  inline uint16_t ReadHU(int32_t addr, Instr* instr);
+  inline int16_t ReadH(int32_t addr, Instr* instr);
+  // Note: Overloaded on the sign of the value.
+  inline void WriteH(int32_t addr, uint16_t value, Instr* instr);
+  inline void WriteH(int32_t addr, int16_t value, Instr* instr);
+
+  inline int ReadW(int32_t addr, Instr* instr);
+  inline void WriteW(int32_t addr, int value, Instr* instr);
+
+  // Executing is handled based on the instruction type.
+  void DecodeType01(Instr* instr);  // both type 0 and type 1 rolled into one
+  void DecodeType2(Instr* instr);
+  void DecodeType3(Instr* instr);
+  void DecodeType4(Instr* instr);
+  void DecodeType5(Instr* instr);
+  void DecodeType6(Instr* instr);
+  void DecodeType7(Instr* instr);
+  void DecodeUnconditional(Instr* instr);
+
+  // Executes one instruction.
+  void InstructionDecode(Instr* instr);
+
+  // Runtime call support.
+  static void* RedirectExternalReference(void* external_function,
+                                         bool fp_return);
+
+  // For use in calls that take two double values, constructed from r0, r1, r2
+  // and r3.
+  void GetFpArgs(double* x, double* y);
+  void SetFpResult(const double& result);
+  void TrashCallerSaveRegisters();
+
+  // architecture state
+  int32_t registers_[16];
+  bool n_flag_;
+  bool z_flag_;
+  bool c_flag_;
+  bool v_flag_;
+
+  // simulator support
+  char* stack_;
+  bool pc_modified_;
+  int icount_;
+  static bool initialized_;
+
+  // registered breakpoints
+  Instr* break_pc_;
+  instr_t break_instr_;
+};
+
+} }  // namespace assembler::arm
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.  Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return assembler::arm::Simulator::current()->StackLimit();
+  }
+};
+
+
+#endif  // defined(__arm__)
+
+#endif  // V8_ARM_SIMULATOR_ARM_H_
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
new file mode 100644
index 0000000..8282655
--- /dev/null
+++ b/src/arm/stub-cache-arm.cc
@@ -0,0 +1,1490 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(MacroAssembler* masm,
+                       Code::Flags flags,
+                       StubCache::Table table,
+                       Register name,
+                       Register offset) {
+  ExternalReference key_offset(SCTableReference::keyReference(table));
+  ExternalReference value_offset(SCTableReference::valueReference(table));
+
+  Label miss;
+
+  // Save the offset on the stack.
+  __ push(offset);
+
+  // Check that the key in the entry matches the name.
+  __ mov(ip, Operand(key_offset));
+  __ ldr(ip, MemOperand(ip, offset, LSL, 1));
+  __ cmp(name, Operand(ip));
+  __ b(ne, &miss);
+
+  // Get the code entry from the cache.
+  __ mov(ip, Operand(value_offset));
+  __ ldr(offset, MemOperand(ip, offset, LSL, 1));
+
+  // Check that the flags match what we're looking for.
+  __ ldr(offset, FieldMemOperand(offset, Code::kFlagsOffset));
+  __ and_(offset, offset, Operand(~Code::kFlagsNotUsedInLookup));
+  __ cmp(offset, Operand(flags));
+  __ b(ne, &miss);
+
+  // Restore offset and re-load code entry from cache.
+  __ pop(offset);
+  __ mov(ip, Operand(value_offset));
+  __ ldr(offset, MemOperand(ip, offset, LSL, 1));
+
+  // Jump to the first instruction in the code stub.
+  __ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(offset);
+
+  // Miss: Restore offset and fall through.
+  __ bind(&miss);
+  __ pop(offset);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+                              Code::Flags flags,
+                              Register receiver,
+                              Register name,
+                              Register scratch,
+                              Register extra) {
+  Label miss;
+
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 8.
+  ASSERT(sizeof(Entry) == 8);
+
+  // Make sure the flags does not name a specific type.
+  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  ASSERT(!scratch.is(receiver));
+  ASSERT(!scratch.is(name));
+
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ ldr(scratch, FieldMemOperand(name, String::kLengthOffset));
+  __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ add(scratch, scratch, Operand(ip));
+  __ eor(scratch, scratch, Operand(flags));
+  __ and_(scratch,
+          scratch,
+          Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the primary table.
+  ProbeTable(masm, flags, kPrimary, name, scratch);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ sub(scratch, scratch, Operand(name));
+  __ add(scratch, scratch, Operand(flags));
+  __ and_(scratch,
+          scratch,
+          Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the secondary table.
+  ProbeTable(masm, flags, kSecondary, name, scratch);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+                                                       int index,
+                                                       Register prototype) {
+  // Load the global or builtins object from the current context.
+  __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Load the global context from the global or builtins object.
+  __ ldr(prototype,
+         FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
+  // Load the function from the global context.
+  __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
+  // Load the initial map.  The global functions all have initial maps.
+  __ ldr(prototype,
+         FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the prototype from the initial map.
+  __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
+  // Adjust for the number of properties stored in the holder.
+  index -= holder->map()->inobject_properties();
+  if (index < 0) {
+    // Get the property straight out of the holder.
+    int offset = holder->map()->instance_size() + (index * kPointerSize);
+    __ ldr(dst, FieldMemOperand(src, offset));
+  } else {
+    // Calculate the offset into the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+    __ ldr(dst, FieldMemOperand(dst, offset));
+  }
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register scratch,
+                                           Label* miss_label) {
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, miss_label);
+
+  // Check that the object is a JS array.
+  __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
+  __ b(ne, miss_label);
+
+  // Load length directly from the JS array.
+  __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Ret();
+}
+
+
+// Generate code to check if an object is a string.  If the object is
+// a string, the map's instance type is left in the scratch1 register.
+static void GenerateStringCheck(MacroAssembler* masm,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Label* smi,
+                                Label* non_string_object) {
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, smi);
+
+  // Check that the object is a string.
+  __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
+  // The cast is to resolve the overload for the argument of 0x0.
+  __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
+  __ b(ne, non_string_object);
+}
+
+
+// Generate code to load the length from a string object and return the length.
+// If the receiver object is not a string or a wrapped string object the
+// execution continues at the miss label. The register containing the
+// receiver is potentially clobbered.
+void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
+                                             Register receiver,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* miss) {
+  Label check_string, check_wrapper;
+
+  __ bind(&check_string);
+  // Check if the object is a string leaving the instance type in the
+  // scratch1 register.
+  GenerateStringCheck(masm, receiver, scratch1, scratch2,
+                      miss, &check_wrapper);
+
+  // Load length directly from the string.
+  __ and_(scratch1, scratch1, Operand(kStringSizeMask));
+  __ add(scratch1, scratch1, Operand(String::kHashShift));
+  __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
+  __ mov(r0, Operand(r0, LSR, scratch1));
+  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+  __ Ret();
+
+  // Check if the object is a JSValue wrapper.
+  __ bind(&check_wrapper);
+  __ cmp(scratch1, Operand(JS_VALUE_TYPE));
+  __ b(ne, miss);
+
+  // Unwrap the value in place and check if the wrapped value is a string.
+  __ ldr(receiver, FieldMemOperand(receiver, JSValue::kValueOffset));
+  __ b(&check_string);
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+                                                 Register receiver,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ mov(r0, scratch1);
+  __ Ret();
+}
+
+
+// Generate StoreField code, value is passed in r0 register.
+// After executing generated code, the receiver_reg and name_reg
+// may be clobbered.
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+                                      Builtins::Name storage_extend,
+                                      JSObject* object,
+                                      int index,
+                                      Map* transition,
+                                      Register receiver_reg,
+                                      Register name_reg,
+                                      Register scratch,
+                                      Label* miss_label) {
+  // r0 : value
+  Label exit;
+
+  // Check that the receiver isn't a smi.
+  __ tst(receiver_reg, Operand(kSmiTagMask));
+  __ b(eq, miss_label);
+
+  // Check that the map of the receiver hasn't changed.
+  __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(Handle<Map>(object->map())));
+  __ b(ne, miss_label);
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  // Perform map transition for the receiver if necessary.
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ mov(r2, Operand(Handle<Map>(transition)));
+    // Please note, if we implement keyed store for arm we need
+    // to call the Builtins::KeyedStoreIC_ExtendStorage.
+    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_ExtendStorage));
+    __ Jump(ic, RelocInfo::CODE_TARGET);
+    return;
+  }
+
+  if (transition != NULL) {
+    // Update the map of the object; no write barrier updating is
+    // needed because the map is never in new space.
+    __ mov(ip, Operand(Handle<Map>(transition)));
+    __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  }
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= object->map()->inobject_properties();
+
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = object->map()->instance_size() + (index * kPointerSize);
+    __ str(r0, FieldMemOperand(receiver_reg, offset));
+
+    // Skip updating write barrier if storing a smi.
+    __ tst(r0, Operand(kSmiTagMask));
+    __ b(eq, &exit);
+
+    // Update the write barrier for the array address.
+    // Pass the value being stored in the now unused name_reg.
+    __ mov(name_reg, Operand(offset));
+    __ RecordWrite(receiver_reg, name_reg, scratch);
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array
+    __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+    __ str(r0, FieldMemOperand(scratch, offset));
+
+    // Skip updating write barrier if storing a smi.
+    __ tst(r0, Operand(kSmiTagMask));
+    __ b(eq, &exit);
+
+    // Update the write barrier for the array address.
+    // Ok to clobber receiver_reg and name_reg, since we return.
+    __ mov(name_reg, Operand(offset));
+    __ RecordWrite(scratch, name_reg, receiver_reg);
+  }
+
+  // Return the value (register r0).
+  __ bind(&exit);
+  __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = Builtins::builtin(Builtins::LoadIC_Miss);
+  } else {
+    code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(JSObject* object,
+                                       Register object_reg,
+                                       JSObject* holder,
+                                       Register holder_reg,
+                                       Register scratch,
+                                       String* name,
+                                       Label* miss) {
+  // Check that the maps haven't changed.
+  Register result =
+      masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.
+  while (object != holder) {
+    if (object->IsGlobalObject()) {
+      GlobalObject* global = GlobalObject::cast(object);
+      Object* probe = global->EnsurePropertyCell(name);
+      if (probe->IsFailure()) {
+        set_failure(Failure::cast(probe));
+        return result;
+      }
+      JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+      ASSERT(cell->value()->IsTheHole());
+      __ mov(scratch, Operand(Handle<Object>(cell)));
+      __ ldr(scratch,
+             FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+      __ cmp(scratch, ip);
+      __ b(ne, miss);
+    }
+    object = JSObject::cast(object->GetPrototype());
+  }
+
+  // Return the register containin the holder.
+  return result;
+}
+
+
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
+                                     Register receiver,
+                                     Register scratch1,
+                                     Register scratch2,
+                                     int index,
+                                     String* name,
+                                     Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+  GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
+  __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Object* value,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+
+  // Return the constant value.
+  __ mov(r0, Operand(Handle<Object>(value)));
+  __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadCallback(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        AccessorInfo* callback,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+
+  // Push the arguments on the JS stack of the caller.
+  __ push(receiver);  // receiver
+  __ push(reg);  // holder
+  __ mov(ip, Operand(Handle<AccessorInfo>(callback)));  // callback data
+  __ push(ip);
+  __ ldr(reg, FieldMemOperand(ip, AccessorInfo::kDataOffset));
+  __ push(reg);
+  __ push(name_reg);  // name
+
+  // Do tail-call to the runtime system.
+  ExternalReference load_callback_property =
+      ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+  __ TailCallRuntime(load_callback_property, 5, 1);
+}
+
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* holder,
+                                           LookupResult* lookup,
+                                           Register receiver,
+                                           Register name_reg,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           String* name,
+                                           Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+
+  // Push the arguments on the JS stack of the caller.
+  __ push(receiver);  // receiver
+  __ push(reg);  // holder
+  __ push(name_reg);  // name
+
+  InterceptorInfo* interceptor = holder->GetNamedInterceptor();
+  ASSERT(!Heap::InNewSpace(interceptor));
+  __ mov(scratch1, Operand(Handle<Object>(interceptor)));
+  __ push(scratch1);
+  __ ldr(scratch2, FieldMemOperand(scratch1, InterceptorInfo::kDataOffset));
+  __ push(scratch2);
+
+  // Do tail-call to the runtime system.
+  ExternalReference load_ic_property =
+      ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+  __ TailCallRuntime(load_ic_property, 5, 1);
+}
+
+
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+  // ----------- S t a t e -------------
+  //  -- r1: function
+  //  -- lr: return address
+  // -----------------------------------
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Preserve the function.
+  __ push(r1);
+
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(r1);
+  __ CallRuntime(Runtime::kLazyCompile, 1);
+
+  // Calculate the entry point.
+  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Restore saved function.
+  __ pop(r1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ Jump(r2);
+
+  return GetCodeWithFlags(flags, "LazyCompileStub");
+}
+
+
+Object* CallStubCompiler::CompileCallField(Object* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  // ----------- S t a t e -------------
+  //  -- lr: return address
+  // -----------------------------------
+  Label miss;
+
+  const int argc = arguments().immediate();
+
+  // Get the receiver of the function from the stack into r0.
+  __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+  // Check that the receiver isn't a smi.
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Do the right check and compute the holder register.
+  Register reg =
+      CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
+  GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
+
+  // Check that the function really is a function.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+  // Get the map.
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+  __ b(ne, &miss);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+    __ str(r3, MemOperand(sp, argc * kPointerSize));
+  }
+
+  // Invoke the function.
+  __ InvokeFunction(r1, arguments(), JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              CheckType check) {
+  // ----------- S t a t e -------------
+  //  -- lr: return address
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver from the stack
+  const int argc = arguments().immediate();
+  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  if (check != NUMBER_CHECK) {
+    __ tst(r1, Operand(kSmiTagMask));
+    __ b(eq, &miss);
+  }
+
+  // Make sure that it's okay not to patch the on stack receiver
+  // unless we're doing a receiver map check.
+  ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+  switch (check) {
+    case RECEIVER_MAP_CHECK:
+      // Check that the maps haven't changed.
+      CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
+
+      // Patch the receiver on the stack with the global proxy if
+      // necessary.
+      if (object->IsGlobalObject()) {
+        __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+        __ str(r3, MemOperand(sp, argc * kPointerSize));
+      }
+      break;
+
+    case STRING_CHECK:
+      // Check that the object is a two-byte string or a symbol.
+      __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+      __ b(hs, &miss);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::STRING_FUNCTION_INDEX,
+                                          r2);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+                      r1, name, &miss);
+      break;
+
+    case NUMBER_CHECK: {
+      Label fast;
+      // Check that the object is a smi or a heap number.
+      __ tst(r1, Operand(kSmiTagMask));
+      __ b(eq, &fast);
+      __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
+      __ b(ne, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::NUMBER_FUNCTION_INDEX,
+                                          r2);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+                      r1, name, &miss);
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      Label fast;
+      // Check that the object is a boolean.
+      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+      __ cmp(r1, ip);
+      __ b(eq, &fast);
+      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+      __ cmp(r1, ip);
+      __ b(ne, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::BOOLEAN_FUNCTION_INDEX,
+                                          r2);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+                      r1, name, &miss);
+      break;
+    }
+
+    case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
+      CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
+      // Make sure object->HasFastElements().
+      // Get the elements array of the object.
+      __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
+      // Check that the object is in fast mode (not dictionary).
+      __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+      __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+      __ cmp(r2, ip);
+      __ b(ne, &miss);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+
+  // Get the function and setup the context.
+  __ mov(r1, Operand(Handle<JSFunction>(function)));
+  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  __ InvokeCode(code, expected, arguments(),
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+                                                 JSObject* holder,
+                                                 String* name) {
+  // ----------- S t a t e -------------
+  //  -- lr: return address
+  // -----------------------------------
+  Label miss;
+
+  // TODO(1224669): Implement.
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            JSFunction* function,
+                                            String* name) {
+  // ----------- S t a t e -------------
+  //  -- lr: return address
+  // -----------------------------------
+  Label miss;
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  // Get the receiver from the stack.
+  __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ tst(r0, Operand(kSmiTagMask));
+    __ b(eq, &miss);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, r0, holder, r3, r2, name, &miss);
+
+  // Get the value from the cell.
+  __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
+
+  // Check that the cell contains the same function.
+  __ cmp(r1, Operand(Handle<JSFunction>(function)));
+  __ b(ne, &miss);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+    __ str(r3, MemOperand(sp, argc * kPointerSize));
+  }
+
+  // Setup the context (function already in r1).
+  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  __ IncrementCounter(&Counters::call_global_inline, 1, r2, r3);
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  __ InvokeCode(code, expected, arguments(),
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* object,
+                                             int index,
+                                             Map* transition,
+                                             String* name) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver from the stack.
+  __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+
+  // name register might be clobbered.
+  GenerateStoreField(masm(),
+                     Builtins::StoreIC_ExtendStorage,
+                     object,
+                     index,
+                     transition,
+                     r3, r2, r1,
+                     &miss);
+  __ bind(&miss);
+  __ mov(r2, Operand(Handle<String>(name)));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                AccessorInfo* callback,
+                                                String* name) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ tst(r3, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ cmp(r1, Operand(Handle<Map>(object->map())));
+  __ b(ne, &miss);
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(r3, r1, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  __ ldr(ip, MemOperand(sp));  // receiver
+  __ push(ip);
+  __ mov(ip, Operand(Handle<AccessorInfo>(callback)));  // callback info
+  __ push(ip);
+  __ push(r2);  // name
+  __ push(r0);  // value
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+  __ TailCallRuntime(store_callback_property, 4, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ mov(r2, Operand(Handle<String>(name)));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                   String* name) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ tst(r3, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ cmp(r1, Operand(Handle<Map>(receiver->map())));
+  __ b(ne, &miss);
+
+  // Perform global security token check if needed.
+  if (receiver->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(r3, r1, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+  __ ldr(ip, MemOperand(sp));  // receiver
+  __ push(ip);
+  __ push(r2);  // name
+  __ push(r0);  // value
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property =
+      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+  __ TailCallRuntime(store_ic_property, 3, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ mov(r2, Operand(Handle<String>(name)));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+                                              JSGlobalPropertyCell* cell,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check that the map of the global has not changed.
+  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r3, Operand(Handle<Map>(object->map())));
+  __ b(ne, &miss);
+
+  // Store the value in the cell.
+  __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+
+  __ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
+  __ Ret();
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r1, r3);
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ ldr(r0, MemOperand(sp, 0));
+
+  GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+                                              JSObject* holder,
+                                              AccessorInfo* callback,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ ldr(r0, MemOperand(sp, 0));
+  GenerateLoadCallback(object, holder, r0, r2, r3, r1, callback, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                              JSObject* holder,
+                                              Object* value,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ ldr(r0, MemOperand(sp, 0));
+
+  GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+                                                 JSObject* holder,
+                                                 String* name) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ ldr(r0, MemOperand(sp, 0));
+
+  LookupResult lookup;
+  holder->LocalLookupRealNamedProperty(name, &lookup);
+  GenerateLoadInterceptor(object,
+                          holder,
+                          &lookup,
+                          r0,
+                          r2,
+                          r3,
+                          r1,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            String* name,
+                                            bool is_dont_delete) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver from the stack.
+  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ tst(r1, Operand(kSmiTagMask));
+    __ b(eq, &miss);
+  }
+
+  // Check that the map of the global has not changed.
+  CheckPrototypes(object, r1, holder, r3, r0, name, &miss);
+
+  // Get the value from the cell.
+  __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ ldr(r0, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (!is_dont_delete) {
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(r0, ip);
+    __ b(eq, &miss);
+  }
+
+  __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                JSObject* receiver,
+                                                JSObject* holder,
+                                                int index) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- sp[0] : key
+  //  -- sp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ ldr(r2, MemOperand(sp, 0));
+  __ ldr(r0, MemOperand(sp, kPointerSize));
+
+  __ cmp(r2, Operand(Handle<String>(name)));
+  __ b(ne, &miss);
+
+  GenerateLoadField(receiver, holder, r0, r3, r1, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(FIELD, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- sp[0] : key
+  //  -- sp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ ldr(r2, MemOperand(sp, 0));
+  __ ldr(r0, MemOperand(sp, kPointerSize));
+
+  __ cmp(r2, Operand(Handle<String>(name)));
+  __ b(ne, &miss);
+
+  GenerateLoadCallback(receiver, holder, r0, r2, r3, r1, callback, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   Object* value) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- sp[0] : key
+  //  -- sp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one
+  __ ldr(r2, MemOperand(sp, 0));
+  __ ldr(r0, MemOperand(sp, kPointerSize));
+
+  __ cmp(r2, Operand(Handle<String>(name)));
+  __ b(ne, &miss);
+
+  GenerateLoadConstant(receiver, holder, r0, r3, r1, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                      JSObject* holder,
+                                                      String* name) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- sp[0] : key
+  //  -- sp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one
+  __ ldr(r2, MemOperand(sp, 0));
+  __ ldr(r0, MemOperand(sp, kPointerSize));
+
+  __ cmp(r2, Operand(Handle<String>(name)));
+  __ b(ne, &miss);
+
+  LookupResult lookup;
+  holder->LocalLookupRealNamedProperty(name, &lookup);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          r0,
+                          r2,
+                          r3,
+                          r1,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- sp[0] : key
+  //  -- sp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one
+  __ ldr(r2, MemOperand(sp, 0));
+  __ ldr(r0, MemOperand(sp, kPointerSize));
+
+  __ cmp(r2, Operand(Handle<String>(name)));
+  __ b(ne, &miss);
+
+  GenerateLoadArrayLength(masm(), r0, r3, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- sp[0] : key
+  //  -- sp[4] : receiver
+  // -----------------------------------
+  Label miss;
+  __ IncrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
+
+  __ ldr(r2, MemOperand(sp));
+  __ ldr(r0, MemOperand(sp, kPointerSize));  // receiver
+
+  __ cmp(r2, Operand(Handle<String>(name)));
+  __ b(ne, &miss);
+
+  GenerateLoadStringLength2(masm(), r0, r1, r3, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
+
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
+}
+
+
+// TODO(1224671): implement the fast case.
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- sp[0] : key
+  //  -- sp[4] : receiver
+  // -----------------------------------
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+                                                  int index,
+                                                  Map* transition,
+                                                  String* name) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ IncrementCounter(&Counters::keyed_store_field, 1, r1, r3);
+
+  // Check that the name has not changed.
+  __ cmp(r2, Operand(Handle<String>(name)));
+  __ b(ne, &miss);
+
+  // Load receiver from the stack.
+  __ ldr(r3, MemOperand(sp));
+  // r1 is used as scratch register, r3 and r2 might be clobbered.
+  GenerateStoreField(masm(),
+                     Builtins::StoreIC_ExtendStorage,
+                     object,
+                     index,
+                     transition,
+                     r3, r2, r1,
+                     &miss);
+  __ bind(&miss);
+
+  __ DecrementCounter(&Counters::keyed_store_field, 1, r1, r3);
+  __ mov(r2, Operand(Handle<String>(name)));  // restore name register.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+Object* ConstructStubCompiler::CompileConstructStub(
+    SharedFunctionInfo* shared) {
+  // ----------- S t a t e -------------
+  //  -- r0    : argc
+  //  -- r1    : constructor
+  //  -- lr    : return address
+  //  -- [sp]  : last argument
+  // -----------------------------------
+  Label generic_stub_call;
+
+  // Use r7 for holding undefined which is used in several places below.
+  __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Check to see whether there are any break points in the function code. If
+  // there are jump to the generic constructor stub which calls the actual
+  // code for the function thereby hitting the break points.
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
+  __ cmp(r2, r7);
+  __ b(ne, &generic_stub_call);
+#endif
+
+  // Load the initial map and verify that it is in fact a map.
+  // r1: constructor function
+  // r7: undefined
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+  __ tst(r2, Operand(kSmiTagMask));
+  __ b(eq, &generic_stub_call);
+  __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+  __ b(ne, &generic_stub_call);
+
+#ifdef DEBUG
+  // Cannot construct functions this way.
+  // r0: argc
+  // r1: constructor function
+  // r2: initial map
+  // r7: undefined
+  __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+  __ Check(ne, "Function constructed by construct stub.");
+#endif
+
+  // Now allocate the JSObject in new space.
+  // r0: argc
+  // r1: constructor function
+  // r2: initial map
+  // r7: undefined
+  __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+  __ AllocateInNewSpace(r3,
+                        r4,
+                        r5,
+                        r6,
+                        &generic_stub_call,
+                        NO_ALLOCATION_FLAGS);
+
+  // Allocated the JSObject, now initialize the fields. Map is set to initial
+  // map and properties and elements are set to empty fixed array.
+  // r0: argc
+  // r1: constructor function
+  // r2: initial map
+  // r3: object size (in words)
+  // r4: JSObject (not tagged)
+  // r7: undefined
+  __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+  __ mov(r5, r4);
+  ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+  __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+  ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+  __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+  ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+  __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+
+  // Calculate the location of the first argument. The stack contains only the
+  // argc arguments.
+  __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
+
+  // Fill all the in-object properties with undefined.
+  // r0: argc
+  // r1: first argument
+  // r3: object size (in words)
+  // r4: JSObject (not tagged)
+  // r5: First in-object property of JSObject (not tagged)
+  // r7: undefined
+  // Fill the initialized properties with a constant value or a passed argument
+  // depending on the this.x = ...; assignment in the function.
+  for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+    if (shared->IsThisPropertyAssignmentArgument(i)) {
+      Label not_passed, next;
+      // Check if the argument assigned to the property is actually passed.
+      int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+      __ cmp(r0, Operand(arg_number));
+      __ b(le, &not_passed);
+      // Argument passed - find it on the stack.
+      __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize));
+      __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+      __ b(&next);
+      __ bind(&not_passed);
+      // Set the property to undefined.
+      __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
+      __ bind(&next);
+    } else {
+      // Set the property to the constant value.
+      Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+      __ mov(r2, Operand(constant));
+      __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+    }
+  }
+
+  // Fill the unused in-object property fields with undefined.
+  for (int i = shared->this_property_assignments_count();
+       i < shared->CalculateInObjectProperties();
+       i++) {
+      __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
+  }
+
+  // r0: argc
+  // r4: JSObject (not tagged)
+  // Move argc to r1 and the JSObject to return to r0 and tag it.
+  __ mov(r1, r0);
+  __ mov(r0, r4);
+  __ orr(r0, r0, Operand(kHeapObjectTag));
+
+  // r0: JSObject
+  // r1: argc
+  // Remove caller arguments and receiver from the stack and return.
+  __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
+  __ add(sp, sp, Operand(kPointerSize));
+  __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2);
+  __ IncrementCounter(&Counters::constructed_objects_stub, 1, r1, r2);
+  __ Jump(lr);
+
+  // Jump to the generic stub in case the specialized code cannot handle the
+  // construction.
+  __ bind(&generic_stub_call);
+  Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+  Handle<Code> generic_construct_stub(code);
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
new file mode 100644
index 0000000..2d5b140
--- /dev/null
+++ b/src/arm/virtual-frame-arm.cc
@@ -0,0 +1,397 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+#define __ ACCESS_MASM(masm())
+
+
+// On entry to a function, the virtual frame already contains the
+// receiver and the parameters.  All initial frame elements are in
+// memory.
+VirtualFrame::VirtualFrame()
+    : elements_(parameter_count() + local_count() + kPreallocatedElements),
+      stack_pointer_(parameter_count()) {  // 0-based index of TOS.
+  for (int i = 0; i <= stack_pointer_; i++) {
+    elements_.Add(FrameElement::MemoryElement());
+  }
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    register_locations_[i] = kIllegalIndex;
+  }
+}
+
+
+void VirtualFrame::SyncElementBelowStackPointer(int index) {
+  UNREACHABLE();
+}
+
+
+void VirtualFrame::SyncElementByPushing(int index) {
+  UNREACHABLE();
+}
+
+
+void VirtualFrame::SyncRange(int begin, int end) {
+  // All elements are in memory on ARM (ie, synced).
+#ifdef DEBUG
+  for (int i = begin; i <= end; i++) {
+    ASSERT(elements_[i].is_synced());
+  }
+#endif
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+  // ARM frames are currently always in memory.
+  ASSERT(Equals(expected));
+}
+
+
+void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
+  UNREACHABLE();
+}
+
+
+void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+  UNREACHABLE();
+}
+
+
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
+  UNREACHABLE();
+}
+
+
+void VirtualFrame::Enter() {
+  Comment cmnt(masm(), "[ Enter JS frame");
+
+#ifdef DEBUG
+  // Verify that r1 contains a JS function.  The following code relies
+  // on r2 being available for use.
+  if (FLAG_debug_code) {
+    Label map_check, done;
+    __ tst(r1, Operand(kSmiTagMask));
+    __ b(ne, &map_check);
+    __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
+    __ bind(&map_check);
+    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+    __ b(eq, &done);
+    __ stop("VirtualFrame::Enter - r1 is not a function (map check).");
+    __ bind(&done);
+  }
+#endif  // DEBUG
+
+  // We are about to push four values to the frame.
+  Adjust(4);
+  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+  // Adjust FP to point to saved FP.
+  __ add(fp, sp, Operand(2 * kPointerSize));
+  cgen()->allocator()->Unuse(r1);
+  cgen()->allocator()->Unuse(lr);
+}
+
+
+void VirtualFrame::Exit() {
+  Comment cmnt(masm(), "[ Exit JS frame");
+  // Record the location of the JS exit code for patching when setting
+  // break point.
+  __ RecordJSReturn();
+
+  // Drop the execution stack down to the frame pointer and restore the caller
+  // frame pointer and return address.
+  __ mov(sp, fp);
+  __ ldm(ia_w, sp, fp.bit() | lr.bit());
+}
+
+
+void VirtualFrame::AllocateStackSlots() {
+  int count = local_count();
+  if (count > 0) {
+    Comment cmnt(masm(), "[ Allocate space for locals");
+    Adjust(count);
+      // Initialize stack slots with 'undefined' value.
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  }
+  if (FLAG_check_stack) {
+    __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+  }
+  for (int i = 0; i < count; i++) {
+    __ push(ip);
+  }
+  if (FLAG_check_stack) {
+    // Put the lr setup instruction in the delay slot.  The kInstrSize is added
+    // to the implicit 8 byte offset that always applies to operations with pc
+    // and gives a return address 12 bytes down.
+    masm()->add(lr, pc, Operand(Assembler::kInstrSize));
+    masm()->cmp(sp, Operand(r2));
+    StackCheckStub stub;
+    // Call the stub if lower.
+    masm()->mov(pc,
+                Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+                        RelocInfo::CODE_TARGET),
+                LeaveCC,
+                lo);
+  }
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+  UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+  UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+  UNIMPLEMENTED();
+}
+
+
+int VirtualFrame::InvalidateFrameSlotAt(int index) {
+  UNIMPLEMENTED();
+  return kIllegalIndex;
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+  UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+  UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+  // Grow the expression stack by handler size less one (the return
+  // address in lr is already counted by a call instruction).
+  Adjust(kHandlerSize - 1);
+  __ PushTryHandler(IN_JAVASCRIPT, type);
+}
+
+
+void VirtualFrame::RawCallStub(CodeStub* stub) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallStub(stub);
+}
+
+
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+  PrepareForCall(0, 0);
+  arg->Unuse();
+  RawCallStub(stub);
+}
+
+
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+  PrepareForCall(0, 0);
+  arg0->Unuse();
+  arg1->Unuse();
+  RawCallStub(stub);
+}
+
+
+void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(f, arg_count);
+}
+
+
+void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(id, arg_count);
+}
+
+
+void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+                                 InvokeJSFlags flags,
+                                 Result* arg_count_register,
+                                 int arg_count) {
+  ASSERT(arg_count_register->reg().is(r0));
+  PrepareForCall(arg_count, arg_count);
+  arg_count_register->Unuse();
+  __ InvokeBuiltin(id, flags);
+}
+
+
+void VirtualFrame::RawCallCodeObject(Handle<Code> code,
+                                       RelocInfo::Mode rmode) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ Call(code, rmode);
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+                                  RelocInfo::Mode rmode,
+                                  int dropped_args) {
+  int spilled_args = 0;
+  switch (code->kind()) {
+    case Code::CALL_IC:
+      spilled_args = dropped_args + 1;
+      break;
+    case Code::FUNCTION:
+      spilled_args = dropped_args + 1;
+      break;
+    case Code::KEYED_LOAD_IC:
+      ASSERT(dropped_args == 0);
+      spilled_args = 2;
+      break;
+    default:
+      // The other types of code objects are called with values
+      // in specific registers, and are handled in functions with
+      // a different signature.
+      UNREACHABLE();
+      break;
+  }
+  PrepareForCall(spilled_args, dropped_args);
+  RawCallCodeObject(code, rmode);
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+                                  RelocInfo::Mode rmode,
+                                  Result* arg,
+                                  int dropped_args) {
+  int spilled_args = 0;
+  switch (code->kind()) {
+    case Code::LOAD_IC:
+      ASSERT(arg->reg().is(r2));
+      ASSERT(dropped_args == 0);
+      spilled_args = 1;
+      break;
+    case Code::KEYED_STORE_IC:
+      ASSERT(arg->reg().is(r0));
+      ASSERT(dropped_args == 0);
+      spilled_args = 2;
+      break;
+    default:
+      // No other types of code objects are called with values
+      // in exactly one register.
+      UNREACHABLE();
+      break;
+  }
+  PrepareForCall(spilled_args, dropped_args);
+  arg->Unuse();
+  RawCallCodeObject(code, rmode);
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+                                  RelocInfo::Mode rmode,
+                                  Result* arg0,
+                                  Result* arg1,
+                                  int dropped_args) {
+  int spilled_args = 1;
+  switch (code->kind()) {
+    case Code::STORE_IC:
+      ASSERT(arg0->reg().is(r0));
+      ASSERT(arg1->reg().is(r2));
+      ASSERT(dropped_args == 0);
+      spilled_args = 1;
+      break;
+    case Code::BUILTIN:
+      ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
+      ASSERT(arg0->reg().is(r0));
+      ASSERT(arg1->reg().is(r1));
+      spilled_args = dropped_args + 1;
+      break;
+    default:
+      // No other types of code objects are called with values
+      // in exactly two registers.
+      UNREACHABLE();
+      break;
+  }
+  PrepareForCall(spilled_args, dropped_args);
+  arg0->Unuse();
+  arg1->Unuse();
+  RawCallCodeObject(code, rmode);
+}
+
+
+void VirtualFrame::Drop(int count) {
+  ASSERT(count >= 0);
+  ASSERT(height() >= count);
+  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
+
+  // Emit code to lower the stack pointer if necessary.
+  if (num_virtual_elements < count) {
+    int num_dropped = count - num_virtual_elements;
+    stack_pointer_ -= num_dropped;
+    __ add(sp, sp, Operand(num_dropped * kPointerSize));
+  }
+
+  // Discard elements from the virtual frame and free any registers.
+  for (int i = 0; i < count; i++) {
+    FrameElement dropped = elements_.RemoveLast();
+    if (dropped.is_register()) {
+      Unuse(dropped.reg());
+    }
+  }
+}
+
+
+Result VirtualFrame::Pop() {
+  UNIMPLEMENTED();
+  return Result();
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(reg);
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(reg);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
new file mode 100644
index 0000000..457478d
--- /dev/null
+++ b/src/arm/virtual-frame-arm.h
@@ -0,0 +1,525 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_
+#define V8_ARM_VIRTUAL_FRAME_ARM_H_
+
+#include "register-allocator.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame.  It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack.  It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame : public ZoneObject {
+ public:
+  // A utility class to introduce a scope where the virtual frame is
+  // expected to remain spilled.  The constructor spills the code
+  // generator's current frame, but no attempt is made to require it
+  // to stay spilled.  It is intended as documentation while the code
+  // generator is being transformed.
+  class SpilledScope BASE_EMBEDDED {
+   public:
+    SpilledScope() {}
+  };
+
+  // An illegal index into the virtual frame.
+  static const int kIllegalIndex = -1;
+
+  // Construct an initial virtual frame on entry to a JS function.
+  VirtualFrame();
+
+  // Construct a virtual frame as a clone of an existing one.
+  explicit VirtualFrame(VirtualFrame* original);
+
+  CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+  MacroAssembler* masm() { return cgen()->masm(); }
+
+  // Create a duplicate of an existing valid frame element.
+  FrameElement CopyElementAt(int index);
+
+  // The number of elements on the virtual frame.
+  int element_count() { return elements_.length(); }
+
+  // The height of the virtual expression stack.
+  int height() {
+    return element_count() - expression_base_index();
+  }
+
+  int register_location(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num];
+  }
+
+  int register_location(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)];
+  }
+
+  void set_register_location(Register reg, int index) {
+    register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+  }
+
+  bool is_used(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num] != kIllegalIndex;
+  }
+
+  bool is_used(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)]
+        != kIllegalIndex;
+  }
+
+  // Add extra in-memory elements to the top of the frame to match an actual
+  // frame (eg, the frame after an exception handler is pushed).  No code is
+  // emitted.
+  void Adjust(int count);
+
+  // Forget elements from the top of the frame to match an actual frame (eg,
+  // the frame after a runtime call).  No code is emitted.
+  void Forget(int count) {
+    ASSERT(count >= 0);
+    ASSERT(stack_pointer_ == element_count() - 1);
+    stack_pointer_ -= count;
+    // On ARM, all elements are in memory, so there is no extra bookkeeping
+    // (registers, copies, etc.) beyond dropping the elements.
+    elements_.Rewind(stack_pointer_ + 1);
+  }
+
+  // Forget count elements from the top of the frame and adjust the stack
+  // pointer downward.  This is used, for example, before merging frames at
+  // break, continue, and return targets.
+  void ForgetElements(int count);
+
+  // Spill all values from the frame to memory.
+  void SpillAll();
+
+  // Spill all occurrences of a specific register from the frame.
+  void Spill(Register reg) {
+    if (is_used(reg)) SpillElementAt(register_location(reg));
+  }
+
+  // Spill all occurrences of an arbitrary register if possible.  Return the
+  // register spilled or no_reg if it was not possible to free any register
+  // (ie, they all have frame-external references).
+  Register SpillAnyRegister();
+
+  // Prepare this virtual frame for merging to an expected frame by
+  // performing some state changes that do not require generating
+  // code.  It is guaranteed that no code will be generated.
+  void PrepareMergeTo(VirtualFrame* expected);
+
+  // Make this virtual frame have a state identical to an expected virtual
+  // frame.  As a side effect, code may be emitted to make this frame match
+  // the expected one.
+  void MergeTo(VirtualFrame* expected);
+
+  // Detach a frame from its code generator, perhaps temporarily.  This
+  // tells the register allocator that it is free to use frame-internal
+  // registers.  Used when the code generator's frame is switched from this
+  // one to NULL by an unconditional jump.
+  void DetachFromCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Unuse(i);
+    }
+  }
+
+  // (Re)attach a frame to its code generator.  This informs the register
+  // allocator that the frame-internal register references are active again.
+  // Used when a code generator's frame is switched from NULL to this one by
+  // binding a label.
+  void AttachToCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Unuse(i);
+    }
+  }
+
+  // Emit code for the physical JS entry and exit frame sequences.  After
+  // calling Enter, the virtual frame is ready for use; and after calling
+  // Exit it should not be used.  Note that Enter does not allocate space in
+  // the physical frame for storing frame-allocated locals.
+  void Enter();
+  void Exit();
+
+  // Prepare for returning from the frame by spilling locals and
+  // dropping all non-locals elements in the virtual frame.  This
+  // avoids generating unnecessary merge code when jumping to the
+  // shared return site.  Emits code for spills.
+  void PrepareForReturn();
+
+  // Allocate and initialize the frame-allocated locals.
+  void AllocateStackSlots();
+
+  // The current top of the expression stack as an assembly operand.
+  MemOperand Top() { return MemOperand(sp, 0); }
+
+  // An element of the expression stack as an assembly operand.
+  MemOperand ElementAt(int index) {
+    return MemOperand(sp, index * kPointerSize);
+  }
+
+  // Random-access store to a frame-top relative frame element.  The result
+  // becomes owned by the frame and is invalidated.
+  void SetElementAt(int index, Result* value);
+
+  // Set a frame element to a constant.  The index is frame-top relative.
+  void SetElementAt(int index, Handle<Object> value) {
+    Result temp(value);
+    SetElementAt(index, &temp);
+  }
+
+  void PushElementAt(int index) {
+    PushFrameSlotAt(element_count() - index - 1);
+  }
+
+  // A frame-allocated local as an assembly operand.
+  MemOperand LocalAt(int index) {
+    ASSERT(0 <= index);
+    ASSERT(index < local_count());
+    return MemOperand(fp, kLocal0Offset - index * kPointerSize);
+  }
+
+  // Push a copy of the value of a local frame slot on top of the frame.
+  void PushLocalAt(int index) {
+    PushFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the value of a local frame slot on top of the frame and invalidate
+  // the local slot.  The slot should be written to before trying to read
+  // from it again.
+  void TakeLocalAt(int index) {
+    TakeFrameSlotAt(local0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a local frame slot.  The
+  // value is left in place on top of the frame.
+  void StoreToLocalAt(int index) {
+    StoreToFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the address of the receiver slot on the frame.
+  void PushReceiverSlotAddress();
+
+  // The function frame slot.
+  MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
+
+  // Push the function on top of the frame.
+  void PushFunction() { PushFrameSlotAt(function_index()); }
+
+  // The context frame slot.
+  MemOperand Context() { return MemOperand(fp, kContextOffset); }
+
+  // Save the value of the esi register to the context frame slot.
+  void SaveContextRegister();
+
+  // Restore the esi register from the value of the context frame
+  // slot.
+  void RestoreContextRegister();
+
+  // A parameter as an assembly operand.
+  MemOperand ParameterAt(int index) {
+    // Index -1 corresponds to the receiver.
+    ASSERT(-1 <= index);  // -1 is the receiver.
+    ASSERT(index <= parameter_count());
+    return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
+  }
+
+  // Push a copy of the value of a parameter frame slot on top of the frame.
+  void PushParameterAt(int index) {
+    PushFrameSlotAt(param0_index() + index);
+  }
+
+  // Push the value of a paramter frame slot on top of the frame and
+  // invalidate the parameter slot.  The slot should be written to before
+  // trying to read from it again.
+  void TakeParameterAt(int index) {
+    TakeFrameSlotAt(param0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a parameter frame slot.
+  // The value is left in place on top of the frame.
+  void StoreToParameterAt(int index) {
+    StoreToFrameSlotAt(param0_index() + index);
+  }
+
+  // The receiver frame slot.
+  MemOperand Receiver() { return ParameterAt(-1); }
+
+  // Push a try-catch or try-finally handler on top of the virtual frame.
+  void PushTryHandler(HandlerType type);
+
+  // Call stub given the number of arguments it expects on (and
+  // removes from) the stack.
+  void CallStub(CodeStub* stub, int arg_count) {
+    PrepareForCall(arg_count, arg_count);
+    RawCallStub(stub);
+  }
+
+  // Call stub that expects its argument in r0.  The argument is given
+  // as a result which must be the register r0.
+  void CallStub(CodeStub* stub, Result* arg);
+
+  // Call stub that expects its arguments in r1 and r0.  The arguments
+  // are given as results which must be the appropriate registers.
+  void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+
+  // Call runtime given the number of arguments expected on (and
+  // removed from) the stack.
+  void CallRuntime(Runtime::Function* f, int arg_count);
+  void CallRuntime(Runtime::FunctionId id, int arg_count);
+
+  // Invoke builtin given the number of arguments it expects on (and
+  // removes from) the stack.
+  void InvokeBuiltin(Builtins::JavaScript id,
+                     InvokeJSFlags flag,
+                     Result* arg_count_register,
+                     int arg_count);
+
+  // Call into an IC stub given the number of arguments it removes
+  // from the stack.  Register arguments are passed as results and
+  // consumed by the call.
+  void CallCodeObject(Handle<Code> ic,
+                      RelocInfo::Mode rmode,
+                      int dropped_args);
+  void CallCodeObject(Handle<Code> ic,
+                      RelocInfo::Mode rmode,
+                      Result* arg,
+                      int dropped_args);
+  void CallCodeObject(Handle<Code> ic,
+                      RelocInfo::Mode rmode,
+                      Result* arg0,
+                      Result* arg1,
+                      int dropped_args);
+
+  // Drop a number of elements from the top of the expression stack.  May
+  // emit code to affect the physical frame.  Does not clobber any registers
+  // excepting possibly the stack pointer.
+  void Drop(int count);
+
+  // Drop one element.
+  void Drop() { Drop(1); }
+
+  // Duplicate the top element of the frame.
+  void Dup() { PushFrameSlotAt(element_count() - 1); }
+
+  // Pop an element from the top of the expression stack.  Returns a
+  // Result, which may be a constant or a register.
+  Result Pop();
+
+  // Pop and save an element from the top of the expression stack and
+  // emit a corresponding pop instruction.
+  void EmitPop(Register reg);
+
+  // Push an element on top of the expression stack and emit a
+  // corresponding push instruction.
+  void EmitPush(Register reg);
+
+  // Push an element on the virtual frame.
+  void Push(Register reg);
+  void Push(Handle<Object> value);
+  void Push(Smi* value) { Push(Handle<Object>(value)); }
+
+  // Pushing a result invalidates it (its contents become owned by the frame).
+  void Push(Result* result) {
+    if (result->is_register()) {
+      Push(result->reg());
+    } else {
+      ASSERT(result->is_constant());
+      Push(result->handle());
+    }
+    result->Unuse();
+  }
+
+  // Nip removes zero or more elements from immediately below the top
+  // of the frame, leaving the previous top-of-frame value on top of
+  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+  void Nip(int num_dropped);
+
+ private:
+  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+  static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
+
+  ZoneList<FrameElement> elements_;
+
+  // The index of the element that is at the processor's stack pointer
+  // (the sp register).
+  int stack_pointer_;
+
+  // The index of the register frame element using each register, or
+  // kIllegalIndex if a register is not on the frame.
+  int register_locations_[RegisterAllocator::kNumRegisters];
+
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count() { return cgen()->scope()->num_parameters(); }
+  int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+  // The index of the element that is at the processor's frame pointer
+  // (the fp register).  The parameters, receiver, function, and context
+  // are below the frame pointer.
+  int frame_pointer() { return parameter_count() + 3; }
+
+  // The index of the first parameter.  The receiver lies below the first
+  // parameter.
+  int param0_index() { return 1; }
+
+  // The index of the context slot in the frame.  It is immediately
+  // below the frame pointer.
+  int context_index() { return frame_pointer() - 1; }
+
+  // The index of the function slot in the frame.  It is below the frame
+  // pointer and context slot.
+  int function_index() { return frame_pointer() - 2; }
+
+  // The index of the first local.  Between the frame pointer and the
+  // locals lies the return address.
+  int local0_index() { return frame_pointer() + 2; }
+
+  // The index of the base of the expression stack.
+  int expression_base_index() { return local0_index() + local_count(); }
+
+  // Convert a frame index into a frame pointer relative offset into the
+  // actual stack.
+  int fp_relative(int index) {
+    ASSERT(index < element_count());
+    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
+    return (frame_pointer() - index) * kPointerSize;
+  }
+
+  // Record an occurrence of a register in the virtual frame.  This has the
+  // effect of incrementing the register's external reference count and
+  // of updating the index of the register's location in the frame.
+  void Use(Register reg, int index) {
+    ASSERT(!is_used(reg));
+    set_register_location(reg, index);
+    cgen()->allocator()->Use(reg);
+  }
+
+  // Record that a register reference has been dropped from the frame.  This
+  // decrements the register's external reference count and invalidates the
+  // index of the register's location in the frame.
+  void Unuse(Register reg) {
+    ASSERT(is_used(reg));
+    set_register_location(reg, kIllegalIndex);
+    cgen()->allocator()->Unuse(reg);
+  }
+
+  // Spill the element at a particular index---write it to memory if
+  // necessary, free any associated register, and forget its value if
+  // constant.
+  void SpillElementAt(int index);
+
+  // Sync the element at a particular index.  If it is a register or
+  // constant that disagrees with the value on the stack, write it to memory.
+  // Keep the element type as register or constant, and clear the dirty bit.
+  void SyncElementAt(int index);
+
+  // Sync the range of elements in [begin, end] with memory.
+  void SyncRange(int begin, int end);
+
+  // Sync a single unsynced element that lies beneath or at the stack pointer.
+  void SyncElementBelowStackPointer(int index);
+
+  // Sync a single unsynced element that lies just above the stack pointer.
+  void SyncElementByPushing(int index);
+
+  // Push a copy of a frame slot (typically a local or parameter) on top of
+  // the frame.
+  void PushFrameSlotAt(int index);
+
+  // Push a the value of a frame slot (typically a local or parameter) on
+  // top of the frame and invalidate the slot.
+  void TakeFrameSlotAt(int index);
+
+  // Store the value on top of the frame to a frame slot (typically a local
+  // or parameter).
+  void StoreToFrameSlotAt(int index);
+
+  // Spill all elements in registers. Spill the top spilled_args elements
+  // on the frame.  Sync all other frame elements.
+  // Then drop dropped_args elements from the virtual frame, to match
+  // the effect of an upcoming call that will drop them from the stack.
+  void PrepareForCall(int spilled_args, int dropped_args);
+
+  // Move frame elements currently in registers or constants, that
+  // should be in memory in the expected frame, to memory.
+  void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+  // Make the register-to-register moves necessary to
+  // merge this frame with the expected frame.
+  // Register to memory moves must already have been made,
+  // and memory to register moves must follow this call.
+  // This is because some new memory-to-register moves are
+  // created in order to break cycles of register moves.
+  // Used in the implementation of MergeTo().
+  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+  // Make the memory-to-register and constant-to-register moves
+  // needed to make this frame equal the expected frame.
+  // Called after all register-to-memory and register-to-register
+  // moves have been made.  After this function returns, the frames
+  // should be equal.
+  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+  // Invalidates a frame slot (puts an invalid frame element in it).
+  // Copies on the frame are correctly handled, and if this slot was
+  // the backing store of copies, the index of the new backing store
+  // is returned.  Otherwise, returns kIllegalIndex.
+  // Register counts are correctly updated.
+  int InvalidateFrameSlotAt(int index);
+
+  // Call a code stub that has already been prepared for calling (via
+  // PrepareForCall).
+  void RawCallStub(CodeStub* stub);
+
+  // Calls a code object which has already been prepared for calling
+  // (via PrepareForCall).
+  void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+  bool Equals(VirtualFrame* other);
+
+  // Classes that need raw access to the elements_ array.
+  friend class DeferredCode;
+  friend class JumpTarget;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_VIRTUAL_FRAME_ARM_H_
diff --git a/src/array.js b/src/array.js
new file mode 100644
index 0000000..f8e63d0
--- /dev/null
+++ b/src/array.js
@@ -0,0 +1,1120 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// const $Array = global.Array;
+
+// -------------------------------------------------------------------
+
+// Global list of arrays visited during toString, toLocaleString and
+// join invocations.
+var visited_arrays = new $Array();
+
+
+// Gets a sorted array of array keys.  Useful for operations on sparse
+// arrays.  Dupes have not been removed.
+function GetSortedArrayKeys(array, intervals) {
+  var length = intervals.length;
+  var keys = [];
+  for (var k = 0; k < length; k++) {
+    var key = intervals[k];
+    if (key < 0) {
+      var j = -1 - key;
+      var limit = j + intervals[++k];
+      for (; j < limit; j++) {
+        var e = array[j];
+        if (!IS_UNDEFINED(e) || j in array) {
+          keys.push(j);
+        }
+      }
+    } else {
+      // The case where key is undefined also ends here.
+      if (!IS_UNDEFINED(key)) {
+        var e = array[key];
+        if (!IS_UNDEFINED(e) || key in array) {
+          keys.push(key);
+        }
+      }
+    }
+  }
+  keys.sort(function(a, b) { return a - b; });
+  return keys;
+}
+
+
+// Optimized for sparse arrays if separator is ''.
+function SparseJoin(array, len, convert) {
+  var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
+  var builder = new StringBuilder();
+  var last_key = -1;
+  var keys_length = keys.length;
+  for (var i = 0; i < keys_length; i++) {
+    var key = keys[i];
+    if (key != last_key) {
+      var e = array[key];
+      builder.add(convert(e));
+      last_key = key;
+    }
+  }
+  return builder.generate();
+}
+
+
+function UseSparseVariant(object, length, is_array) {
+   return is_array &&
+       length > 1000 &&
+       (!%_IsSmi(length) ||
+        %EstimateNumberOfElements(object) < (length >> 2));
+}
+
+
+function Join(array, length, separator, convert) {
+  if (length == 0) return '';
+
+  var is_array = IS_ARRAY(array);
+
+  if (is_array) {
+    // If the array is cyclic, return the empty string for already
+    // visited arrays.
+    if (!%PushIfAbsent(visited_arrays, array)) return '';
+  }
+
+  // Attempt to convert the elements.
+  try {
+    if (UseSparseVariant(array, length, is_array) && separator === '') {
+      return SparseJoin(array, length, convert);
+    }
+
+    // Fast case for one-element arrays.
+    if (length == 1) {
+      var e = array[0];
+      if (!IS_UNDEFINED(e) || (0 in array)) {
+        return convert(e);
+      }
+    }
+
+    var builder = new StringBuilder();
+
+    for (var i = 0; i < length; i++) {
+      var e = array[i];
+      if (i != 0) builder.add(separator);
+      if (!IS_UNDEFINED(e) || (i in array)) {
+        builder.add(convert(e));
+      }
+    }
+    return builder.generate();
+  } finally {
+    // Make sure to pop the visited array no matter what happens.
+    if (is_array) visited_arrays.pop();
+  }
+}
+
+
+function ConvertToString(e) {
+  if (e == null) return '';
+  else return ToString(e);
+}
+
+
+function ConvertToLocaleString(e) {
+  if (e == null) return '';
+  else {
+    // e_obj's toLocaleString might be overwritten, check if it is a function.
+    // Call ToString if toLocaleString is not a function.
+    // See issue 877615.
+    var e_obj = ToObject(e);
+    if (IS_FUNCTION(e_obj.toLocaleString))
+      return e_obj.toLocaleString();
+    else
+      return ToString(e);
+  }
+}
+
+
+// This function implements the optimized splice implementation that can use
+// special array operations to handle sparse arrays in a sensible fashion.
+function SmartSlice(array, start_i, del_count, len, deleted_elements) {
+  // Move deleted elements to a new array (the return value from splice).
+  // Intervals array can contain keys and intervals.  See comment in Concat.
+  var intervals = %GetArrayKeys(array, start_i + del_count);
+  var length = intervals.length;
+  for (var k = 0; k < length; k++) {
+    var key = intervals[k];
+    if (key < 0) {
+      var j = -1 - key;
+      var interval_limit = j + intervals[++k];
+      if (j < start_i) {
+        j = start_i;
+      }
+      for (; j < interval_limit; j++) {
+        // ECMA-262 15.4.4.12 line 10.  The spec could also be
+        // interpreted such that %HasLocalProperty would be the
+        // appropriate test.  We follow KJS in consulting the
+        // prototype.
+        var current = array[j];
+        if (!IS_UNDEFINED(current) || j in array) {
+          deleted_elements[j - start_i] = current;
+        }
+      }
+    } else {
+      if (!IS_UNDEFINED(key)) {
+        if (key >= start_i) {
+          // ECMA-262 15.4.4.12 line 10.  The spec could also be
+          // interpreted such that %HasLocalProperty would be the
+          // appropriate test.  We follow KJS in consulting the
+          // prototype.
+          var current = array[key];
+          if (!IS_UNDEFINED(current) || key in array) {
+            deleted_elements[key - start_i] = current;
+          }
+        }
+      }
+    }
+  }
+}
+
+
+// This function implements the optimized splice implementation that can use
+// special array operations to handle sparse arrays in a sensible fashion.
+function SmartMove(array, start_i, del_count, len, num_additional_args) {
+  // Move data to new array.
+  var new_array = new $Array(len - del_count + num_additional_args);
+  var intervals = %GetArrayKeys(array, len);
+  var length = intervals.length;
+  for (var k = 0; k < length; k++) {
+    var key = intervals[k];
+    if (key < 0) {
+      var j = -1 - key;
+      var interval_limit = j + intervals[++k];
+      while (j < start_i && j < interval_limit) {
+        // The spec could also be interpreted such that
+        // %HasLocalProperty would be the appropriate test.  We follow
+        // KJS in consulting the prototype.
+        var current = array[j];
+        if (!IS_UNDEFINED(current) || j in array) {
+          new_array[j] = current;
+        }
+        j++;
+      }
+      j = start_i + del_count;
+      while (j < interval_limit) {
+        // ECMA-262 15.4.4.12 lines 24 and 41.  The spec could also be
+        // interpreted such that %HasLocalProperty would be the
+        // appropriate test.  We follow KJS in consulting the
+        // prototype.
+        var current = array[j];
+        if (!IS_UNDEFINED(current) || j in array) {
+          new_array[j - del_count + num_additional_args] = current;
+        }
+        j++;
+      }
+    } else {
+      if (!IS_UNDEFINED(key)) {
+        if (key < start_i) {
+          // The spec could also be interpreted such that
+          // %HasLocalProperty would be the appropriate test.  We follow
+          // KJS in consulting the prototype.
+          var current = array[key];
+          if (!IS_UNDEFINED(current) || key in array) {
+            new_array[key] = current;
+          }
+        } else if (key >= start_i + del_count) {
+          // ECMA-262 15.4.4.12 lines 24 and 41.  The spec could also
+          // be interpreted such that %HasLocalProperty would be the
+          // appropriate test.  We follow KJS in consulting the
+          // prototype.
+          var current = array[key];
+          if (!IS_UNDEFINED(current) || key in array) {
+            new_array[key - del_count + num_additional_args] = current;
+          }
+        }
+      }
+    }
+  }
+  // Move contents of new_array into this array
+  %MoveArrayContents(new_array, array);
+}
+
+
+// This is part of the old simple-minded splice.  We are using it either
+// because the receiver is not an array (so we have no choice) or because we
+// know we are not deleting or moving a lot of elements.
+function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
+  for (var i = 0; i < del_count; i++) {
+    var index = start_i + i;
+    // The spec could also be interpreted such that %HasLocalProperty
+    // would be the appropriate test.  We follow KJS in consulting the
+    // prototype.
+    var current = array[index];
+    if (!IS_UNDEFINED(current) || index in array)
+      deleted_elements[i] = current;
+  }
+}
+
+
+function SimpleMove(array, start_i, del_count, len, num_additional_args) {
+  if (num_additional_args !== del_count) {
+    // Move the existing elements after the elements to be deleted
+    // to the right position in the resulting array.
+    if (num_additional_args > del_count) {
+      for (var i = len - del_count; i > start_i; i--) {
+        var from_index = i + del_count - 1;
+        var to_index = i + num_additional_args - 1;
+        // The spec could also be interpreted such that
+        // %HasLocalProperty would be the appropriate test.  We follow
+        // KJS in consulting the prototype.
+        var current = array[from_index];
+        if (!IS_UNDEFINED(current) || from_index in array) {
+          array[to_index] = current;
+        } else {
+          delete array[to_index];
+        }
+      }
+    } else {
+      for (var i = start_i; i < len - del_count; i++) {
+        var from_index = i + del_count;
+        var to_index = i + num_additional_args;
+        // The spec could also be interpreted such that
+        // %HasLocalProperty would be the appropriate test.  We follow
+        // KJS in consulting the prototype.
+        var current = array[from_index];
+        if (!IS_UNDEFINED(current) || from_index in array) {
+          array[to_index] = current;
+        } else {
+          delete array[to_index];
+        }
+      }
+      for (var i = len; i > len - del_count + num_additional_args; i--) {
+        delete array[i - 1];
+      }
+    }
+  }
+}
+
+
+// -------------------------------------------------------------------
+
+
+function ArrayToString() {
+  if (!IS_ARRAY(this)) {
+    throw new $TypeError('Array.prototype.toString is not generic');
+  }
+  return Join(this, this.length, ',', ConvertToString);
+}
+
+
+function ArrayToLocaleString() {
+  if (!IS_ARRAY(this)) {
+    throw new $TypeError('Array.prototype.toString is not generic');
+  }
+  return Join(this, this.length, ',', ConvertToLocaleString);
+}
+
+
+function ArrayJoin(separator) {
+  if (IS_UNDEFINED(separator)) separator = ',';
+  else separator = ToString(separator);
+  return Join(this, ToUint32(this.length), separator, ConvertToString);
+}
+
+
+// Removes the last element from the array and returns it. See
+// ECMA-262, section 15.4.4.6.
+function ArrayPop() {
+  var n = ToUint32(this.length);
+  if (n == 0) {
+    this.length = n;
+    return;
+  }
+  n--;
+  var value = this[n];
+  this.length = n;
+  delete this[n];
+  return value;
+}
+
+
+// Appends the arguments to the end of the array and returns the new
+// length of the array. See ECMA-262, section 15.4.4.7.
+function ArrayPush() {
+  var n = ToUint32(this.length);
+  var m = %_ArgumentsLength();
+  for (var i = 0; i < m; i++) {
+    this[i+n] = %_Arguments(i);
+  }
+  this.length = n + m;
+  return this.length;
+}
+
+
+function ArrayConcat(arg1) {  // length == 1
+  // TODO: can we just use arguments?
+  var arg_count = %_ArgumentsLength();
+  var arrays = new $Array(1 + arg_count);
+  arrays[0] = this;
+  for (var i = 0; i < arg_count; i++) {
+    arrays[i + 1] = %_Arguments(i);
+  }
+
+  return %ArrayConcat(arrays);
+}
+
+
+// For implementing reverse() on large, sparse arrays.
+function SparseReverse(array, len) {
+  var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
+  var high_counter = keys.length - 1;
+  var low_counter = 0;
+  while (low_counter <= high_counter) {
+    var i = keys[low_counter];
+    var j = keys[high_counter];
+
+    var j_complement = len - j - 1;
+    var low, high;
+
+    if (j_complement <= i) {
+      high = j;
+      while (keys[--high_counter] == j);
+      low = j_complement;
+    }
+    if (j_complement >= i) {
+      low = i;
+      while (keys[++low_counter] == i);
+      high = len - i - 1;
+    }
+
+    var current_i = array[low];
+    if (!IS_UNDEFINED(current_i) || low in array) {
+      var current_j = array[high];
+      if (!IS_UNDEFINED(current_j) || high in array) {
+        array[low] = current_j;
+        array[high] = current_i;
+      } else {
+        array[high] = current_i;
+        delete array[low];
+      }
+    } else {
+      var current_j = array[high];
+      if (!IS_UNDEFINED(current_j) || high in array) {
+        array[low] = current_j;
+        delete array[high];
+      }
+    }
+  }
+}
+
+
+function ArrayReverse() {
+  var j = ToUint32(this.length) - 1;
+
+  if (UseSparseVariant(this, j, IS_ARRAY(this))) {
+    SparseReverse(this, j+1);
+    return this;
+  }
+
+  for (var i = 0; i < j; i++, j--) {
+    var current_i = this[i];
+    if (!IS_UNDEFINED(current_i) || i in this) {
+      var current_j = this[j];
+      if (!IS_UNDEFINED(current_j) || j in this) {
+        this[i] = current_j;
+        this[j] = current_i;
+      } else {
+        this[j] = current_i;
+        delete this[i];
+      }
+    } else {
+      var current_j = this[j];
+      if (!IS_UNDEFINED(current_j) || j in this) {
+        this[i] = current_j;
+        delete this[j];
+      }
+    }
+  }
+  return this;
+}
+
+
+function ArrayShift() {
+  var len = ToUint32(this.length);
+
+  if (len === 0) {
+    this.length = 0;
+    return;
+  }
+
+  var first = this[0];
+
+  if (IS_ARRAY(this))
+    SmartMove(this, 0, 1, len, 0);
+  else
+    SimpleMove(this, 0, 1, len, 0);
+
+  this.length = len - 1;
+
+  return first;
+}
+
+
+function ArrayUnshift(arg1) {  // length == 1
+  var len = ToUint32(this.length);
+  var num_arguments = %_ArgumentsLength();
+
+  if (IS_ARRAY(this))
+    SmartMove(this, 0, 0, len, num_arguments);
+  else
+    SimpleMove(this, 0, 0, len, num_arguments);
+
+  for (var i = 0; i < num_arguments; i++) {
+    this[i] = %_Arguments(i);
+  }
+
+  this.length = len + num_arguments;
+
+  return len + num_arguments;
+}
+
+
+function ArraySlice(start, end) {
+  var len = ToUint32(this.length);
+  var start_i = TO_INTEGER(start);
+  var end_i = len;
+
+  if (end !== void 0) end_i = TO_INTEGER(end);
+
+  if (start_i < 0) {
+    start_i += len;
+    if (start_i < 0) start_i = 0;
+  } else {
+    if (start_i > len) start_i = len;
+  }
+
+  if (end_i < 0) {
+    end_i += len;
+    if (end_i < 0) end_i = 0;
+  } else {
+    if (end_i > len) end_i = len;
+  }
+
+  var result = [];
+
+  if (end_i < start_i) return result;
+
+  if (IS_ARRAY(this)) {
+    SmartSlice(this, start_i, end_i - start_i, len, result);
+  } else {
+    SimpleSlice(this, start_i, end_i - start_i, len, result);
+  }
+
+  result.length = end_i - start_i;
+
+  return result;
+}
+
+
+function ArraySplice(start, delete_count) {
+  var num_arguments = %_ArgumentsLength();
+
+  // SpiderMonkey and KJS return undefined in the case where no
+  // arguments are given instead of using the implicit undefined
+  // arguments.  This does not follow ECMA-262, but we do the same for
+  // compatibility.
+  if (num_arguments == 0) return;
+
+  var len = ToUint32(this.length);
+  var start_i = TO_INTEGER(start);
+
+  if (start_i < 0) {
+    start_i += len;
+    if (start_i < 0) start_i = 0;
+  } else {
+    if (start_i > len) start_i = len;
+  }
+
+  // SpiderMonkey and KJS treat the case where no delete count is
+  // given differently from when an undefined delete count is given.
+  // This does not follow ECMA-262, but we do the same for
+  // compatibility.
+  var del_count = 0;
+  if (num_arguments > 1) {
+    del_count = TO_INTEGER(delete_count);
+    if (del_count < 0) del_count = 0;
+    if (del_count > len - start_i) del_count = len - start_i;
+  } else {
+    del_count = len - start_i;
+  }
+
+  var deleted_elements = [];
+  deleted_elements.length = del_count;
+
+  // Number of elements to add.
+  var num_additional_args = 0;
+  if (num_arguments > 2) {
+    num_additional_args = num_arguments - 2;
+  }
+
+  var use_simple_splice = true;
+
+  if (IS_ARRAY(this) && num_additional_args !== del_count) {
+    // If we are only deleting/moving a few things near the end of the
+    // array then the simple version is going to be faster, because it
+    // doesn't touch most of the array.
+    var estimated_non_hole_elements = %EstimateNumberOfElements(this);
+    if (len > 20 && (estimated_non_hole_elements >> 2) < (len - start_i)) {
+      use_simple_splice = false;
+    }
+  }
+
+  if (use_simple_splice) {
+    SimpleSlice(this, start_i, del_count, len, deleted_elements);
+    SimpleMove(this, start_i, del_count, len, num_additional_args);
+  } else {
+    SmartSlice(this, start_i, del_count, len, deleted_elements);
+    SmartMove(this, start_i, del_count, len, num_additional_args);
+  }
+
+  // Insert the arguments into the resulting array in
+  // place of the deleted elements.
+  var i = start_i;
+  var arguments_index = 2;
+  var arguments_length = %_ArgumentsLength();
+  while (arguments_index < arguments_length) {
+    this[i++] = %_Arguments(arguments_index++);
+  }
+  this.length = len - del_count + num_additional_args;
+
+  // Return the deleted elements.
+  return deleted_elements;
+}
+
+
+function ArraySort(comparefn) {
+  // In-place QuickSort algorithm.
+  // For short (length <= 22) arrays, insertion sort is used for efficiency.
+
+  var custom_compare = IS_FUNCTION(comparefn);
+
+  function Compare(x,y) {
+    // Assume the comparefn, if any, is a consistent comparison function.
+    // If it isn't, we are allowed arbitrary behavior by ECMA 15.4.4.11.
+    if (x === y) return 0;
+    if (custom_compare) {
+      // Don't call directly to avoid exposing the builtin's global object.
+      return comparefn.call(null, x, y);
+    }
+    if (%_IsSmi(x) && %_IsSmi(y)) {
+      return %SmiLexicographicCompare(x, y);
+    }
+    x = ToString(x);
+    y = ToString(y);
+    if (x == y) return 0;
+    else return x < y ? -1 : 1;
+  };
+
+  function InsertionSort(a, from, to) {
+    for (var i = from + 1; i < to; i++) {
+      var element = a[i];
+      // Pre-convert the element to a string for comparison if we know
+      // it will happen on each compare anyway.
+      var key =
+          (custom_compare || %_IsSmi(element)) ? element : ToString(element);
+      // place element in a[from..i[
+      // binary search
+      var min = from;
+      var max = i;
+      // The search interval is a[min..max[
+      while (min < max) {
+        var mid = min + ((max - min) >> 1);
+        var order = Compare(a[mid], key);
+        if (order == 0) {
+          min = max = mid;
+          break;
+        }
+        if (order < 0) {
+          min = mid + 1;
+        } else {
+          max = mid;
+        }
+      }
+      // place element at position min==max.
+      for (var j = i; j > min; j--) {
+        a[j] = a[j - 1];
+      }
+      a[min] = element;
+    }
+  }
+
+  function QuickSort(a, from, to) {
+    // Insertion sort is faster for short arrays.
+    if (to - from <= 22) {
+      InsertionSort(a, from, to);
+      return;
+    }
+    var pivot_index = $floor($random() * (to - from)) + from;
+    var pivot = a[pivot_index];
+    // Pre-convert the element to a string for comparison if we know
+    // it will happen on each compare anyway.
+    var pivot_key =
+      (custom_compare || %_IsSmi(pivot)) ? pivot : ToString(pivot);
+    // Issue 95: Keep the pivot element out of the comparisons to avoid
+    // infinite recursion if comparefn(pivot, pivot) != 0.
+    a[pivot_index] = a[from];
+    a[from] = pivot;
+    var low_end = from;   // Upper bound of the elements lower than pivot.
+    var high_start = to;  // Lower bound of the elements greater than pivot.
+    // From low_end to i are elements equal to pivot.
+    // From i to high_start are elements that haven't been compared yet.
+    for (var i = from + 1; i < high_start; ) {
+      var element = a[i];
+      var order = Compare(element, pivot_key);
+      if (order < 0) {
+        a[i] = a[low_end];
+        a[low_end] = element;
+        i++;
+        low_end++;
+      } else if (order > 0) {
+        high_start--;
+        a[i] = a[high_start];
+        a[high_start] = element;
+      } else {  // order == 0
+        i++;
+      }
+    }
+    QuickSort(a, from, low_end);
+    QuickSort(a, high_start, to);
+  }
+
+  var length;
+
+  // Copies elements in the range 0..length from obj's prototype chain
+  // to obj itself, if obj has holes. Returns one more than the maximal index
+  // of a prototype property.
+  function CopyFromPrototype(obj, length) {
+    var max = 0;
+    for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
+      var indices = %GetArrayKeys(proto, length);
+      if (indices.length > 0) {
+        if (indices[0] == -1) {
+          // It's an interval.
+          var proto_length = indices[1];
+          for (var i = 0; i < proto_length; i++) {
+            if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) {
+              obj[i] = proto[i];
+              if (i >= max) { max = i + 1; }
+            }
+          }
+        } else {
+          for (var i = 0; i < indices.length; i++) {
+            var index = indices[i];
+            if (!IS_UNDEFINED(index) &&
+                !obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) {
+              obj[index] = proto[index];
+              if (index >= max) { max = index + 1; }
+            }
+          }
+        }
+      }
+    }
+    return max;
+  }
+
+  // Set a value of "undefined" on all indices in the range from..to
+  // where a prototype of obj has an element. I.e., shadow all prototype
+  // elements in that range.
+  function ShadowPrototypeElements(obj, from, to) {
+    for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
+      var indices = %GetArrayKeys(proto, to);
+      if (indices.length > 0) {
+        if (indices[0] == -1) {
+          // It's an interval.
+          var proto_length = indices[1];
+          for (var i = from; i < proto_length; i++) {
+            if (proto.hasOwnProperty(i)) {
+              obj[i] = void 0;
+            }
+          }
+        } else {
+          for (var i = 0; i < indices.length; i++) {
+            var index = indices[i];
+            if (!IS_UNDEFINED(index) && from <= index &&
+                proto.hasOwnProperty(index)) {
+              obj[index] = void 0;
+            }
+          }
+        }
+      }
+    }
+  }
+
+  function SafeRemoveArrayHoles(obj) {
+    // Copy defined elements from the end to fill in all holes and undefineds
+    // in the beginning of the array.  Write undefineds and holes at the end
+    // after loop is finished.
+    var first_undefined = 0;
+    var last_defined = length - 1;
+    var num_holes = 0;
+    while (first_undefined < last_defined) {
+      // Find first undefined element.
+      while (first_undefined < last_defined &&
+             !IS_UNDEFINED(obj[first_undefined])) {
+        first_undefined++;
+      }
+      // Maintain the invariant num_holes = the number of holes in the original
+      // array with indices <= first_undefined or > last_defined.
+      if (!obj.hasOwnProperty(first_undefined)) {
+        num_holes++;
+      }
+
+      // Find last defined element.
+      while (first_undefined < last_defined &&
+             IS_UNDEFINED(obj[last_defined])) {
+        if (!obj.hasOwnProperty(last_defined)) {
+          num_holes++;
+        }
+        last_defined--;
+      }
+      if (first_undefined < last_defined) {
+        // Fill in hole or undefined.
+        obj[first_undefined] = obj[last_defined];
+        obj[last_defined] = void 0;
+      }
+    }
+    // If there were any undefineds in the entire array, first_undefined
+    // points to one past the last defined element.  Make this true if
+    // there were no undefineds, as well, so that first_undefined == number
+    // of defined elements.
+    if (!IS_UNDEFINED(obj[first_undefined])) first_undefined++;
+    // Fill in the undefineds and the holes.  There may be a hole where
+    // an undefined should be and vice versa.
+    var i;
+    for (i = first_undefined; i < length - num_holes; i++) {
+      obj[i] = void 0;
+    }
+    for (i = length - num_holes; i < length; i++) {
+      // For compatability with Webkit, do not expose elements in the prototype.
+      if (i in obj.__proto__) {
+        obj[i] = void 0;
+      } else {
+        delete obj[i];
+      }
+    }
+
+    // Return the number of defined elements.
+    return first_undefined;
+  }
+
+  length = ToUint32(this.length);
+  if (length < 2) return this;
+
+  var is_array = IS_ARRAY(this);
+  var max_prototype_element;
+  if (!is_array) {
+    // For compatibility with JSC, we also sort elements inherited from
+    // the prototype chain on non-Array objects.
+    // We do this by copying them to this object and sorting only
+    // local elements. This is not very efficient, but sorting with
+    // inherited elements happens very, very rarely, if at all.
+    // The specification allows "implementation dependent" behavior
+    // if an element on the prototype chain has an element that
+    // might interact with sorting.
+    max_prototype_element = CopyFromPrototype(this, length);
+  }
+
+  var num_non_undefined = %RemoveArrayHoles(this, length);
+  if (num_non_undefined == -1) {
+    // There were indexed accessors in the array.  Move array holes and
+    // undefineds to the end using a Javascript function that is safe
+    // in the presence of accessors.
+    num_non_undefined = SafeRemoveArrayHoles(this);
+  }
+
+  QuickSort(this, 0, num_non_undefined);
+
+  if (!is_array && (num_non_undefined + 1 < max_prototype_element)) {
+    // For compatibility with JSC, we shadow any elements in the prototype
+    // chain that has become exposed by sort moving a hole to its position.
+    ShadowPrototypeElements(this, num_non_undefined, max_prototype_element);
+  }
+
+  return this;
+}
+
+
+// The following functions cannot be made efficient on sparse arrays while
+// preserving the semantics, since the calls to the receiver function can add
+// or delete elements from the array.
+function ArrayFilter(f, receiver) {
+  if (!IS_FUNCTION(f)) {
+    throw MakeTypeError('called_non_callable', [ f ]);
+  }
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length = this.length;
+  var result = [];
+  var result_length = 0;
+  for (var i = 0; i < length; i++) {
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      if (f.call(receiver, current, i, this)) result[result_length++] = current;
+    }
+  }
+  return result;
+}
+
+
+function ArrayForEach(f, receiver) {
+  if (!IS_FUNCTION(f)) {
+    throw MakeTypeError('called_non_callable', [ f ]);
+  }
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length = this.length;
+  for (var i = 0; i < length; i++) {
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      f.call(receiver, current, i, this);
+    }
+  }
+}
+
+
+// Executes the function once for each element present in the
+// array until it finds one where callback returns true.
+function ArraySome(f, receiver) {
+  if (!IS_FUNCTION(f)) {
+    throw MakeTypeError('called_non_callable', [ f ]);
+  }
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length = this.length;
+  for (var i = 0; i < length; i++) {
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      if (f.call(receiver, current, i, this)) return true;
+    }
+  }
+  return false;
+}
+
+
+function ArrayEvery(f, receiver) {
+  if (!IS_FUNCTION(f)) {
+    throw MakeTypeError('called_non_callable', [ f ]);
+  }
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length = this.length;
+  for (var i = 0; i < length; i++) {
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      if (!f.call(receiver, current, i, this)) return false;
+    }
+  }
+
+  return true;
+}
+
+
+function ArrayMap(f, receiver) {
+  if (!IS_FUNCTION(f)) {
+    throw MakeTypeError('called_non_callable', [ f ]);
+  }
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length = this.length;
+  var result = new $Array(length);
+  for (var i = 0; i < length; i++) {
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      result[i] = f.call(receiver, current, i, this);
+    }
+  }
+  return result;
+}
+
+
+function ArrayIndexOf(element, index) {
+  var length = this.length;
+  if (index == null) {
+    index = 0;
+  } else {
+    index = TO_INTEGER(index);
+    // If index is negative, index from the end of the array.
+    if (index < 0) index = length + index;
+    // If index is still negative, search the entire array.
+    if (index < 0) index = 0;
+  }
+  // Lookup through the array.
+  for (var i = index; i < length; i++) {
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      if (current === element) return i;
+    }
+  }
+  return -1;
+}
+
+
+function ArrayLastIndexOf(element, index) {
+  var length = this.length;
+  if (index == null) {
+    index = length - 1;
+  } else {
+    index = TO_INTEGER(index);
+    // If index is negative, index from end of the array.
+    if (index < 0) index = length + index;
+    // If index is still negative, do not search the array.
+    if (index < 0) index = -1;
+    else if (index >= length) index = length - 1;
+  }
+  // Lookup through the array.
+  for (var i = index; i >= 0; i--) {
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      if (current === element) return i;
+    }
+  }
+  return -1;
+}
+
+
+function ArrayReduce(callback, current) {
+  if (!IS_FUNCTION(callback)) {
+    throw MakeTypeError('called_non_callable', [callback]);
+  }
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length = this.length;
+  var i = 0;
+
+  find_initial: if (%_ArgumentsLength() < 2) {
+    for (; i < length; i++) {
+      current = this[i];
+      if (!IS_UNDEFINED(current) || i in this) {
+        i++;
+        break find_initial;
+      }
+    }
+    throw MakeTypeError('reduce_no_initial', []);
+  }
+
+  for (; i < length; i++) {
+    var element = this[i];
+    if (!IS_UNDEFINED(element) || i in this) {
+      current = callback.call(null, current, element, i, this);
+    }
+  }
+  return current;
+}
+
+function ArrayReduceRight(callback, current) {
+  if (!IS_FUNCTION(callback)) {
+    throw MakeTypeError('called_non_callable', [callback]);
+  }
+  var i = this.length - 1;
+
+  find_initial: if (%_ArgumentsLength() < 2) {
+    for (; i >= 0; i--) {
+      current = this[i];
+      if (!IS_UNDEFINED(current) || i in this) {
+        i--;
+        break find_initial;
+      }
+    }
+    throw MakeTypeError('reduce_no_initial', []);
+  }
+
+  for (; i >= 0; i--) {
+    var element = this[i];
+    if (!IS_UNDEFINED(element) || i in this) {
+      current = callback.call(null, current, element, i, this);
+    }
+  }
+  return current;
+}
+
+
+// -------------------------------------------------------------------
+
+
+function UpdateFunctionLengths(lengths) {
+  for (var key in lengths) {
+    %FunctionSetLength(this[key], lengths[key]);
+  }
+}
+
+
+// -------------------------------------------------------------------
+function SetupArray() {
+  // Setup non-enumerable constructor property on the Array.prototype
+  // object.
+  %SetProperty($Array.prototype, "constructor", $Array, DONT_ENUM);
+
+  // Setup non-enumerable functions of the Array.prototype object and
+  // set their names.
+  InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
+    "toString", ArrayToString,
+    "toLocaleString", ArrayToLocaleString,
+    "join", ArrayJoin,
+    "pop", ArrayPop,
+    "push", ArrayPush,
+    "concat", ArrayConcat,
+    "reverse", ArrayReverse,
+    "shift", ArrayShift,
+    "unshift", ArrayUnshift,
+    "slice", ArraySlice,
+    "splice", ArraySplice,
+    "sort", ArraySort,
+    "filter", ArrayFilter,
+    "forEach", ArrayForEach,
+    "some", ArraySome,
+    "every", ArrayEvery,
+    "map", ArrayMap,
+    "indexOf", ArrayIndexOf,
+    "lastIndexOf", ArrayLastIndexOf,
+    "reduce", ArrayReduce,
+    "reduceRight", ArrayReduceRight));
+
+  // Manipulate the length of some of the functions to meet
+  // expectations set by ECMA-262 or Mozilla.
+  UpdateFunctionLengths({
+    ArrayFilter: 1,
+    ArrayForEach: 1,
+    ArraySome: 1,
+    ArrayEvery: 1,
+    ArrayMap: 1,
+    ArrayIndexOf: 1,
+    ArrayLastIndexOf: 1,
+    ArrayPush: 1,
+    ArrayReduce: 1,
+    ArrayReduceRight: 1
+  });
+}
+
+
+SetupArray();
diff --git a/src/assembler.cc b/src/assembler.cc
new file mode 100644
index 0000000..d81b4b0
--- /dev/null
+++ b/src/assembler.cc
@@ -0,0 +1,722 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+
+#include "v8.h"
+
+#include "arguments.h"
+#include "execution.h"
+#include "ic-inl.h"
+#include "factory.h"
+#include "runtime.h"
+#include "serialize.h"
+#include "stub-cache.h"
+#include "regexp-stack.h"
+#include "ast.h"
+#include "regexp-macro-assembler.h"
+// Include native regexp-macro-assembler.
+#ifdef V8_NATIVE_REGEXP
+#if V8_TARGET_ARCH_IA32
+#include "ia32/regexp-macro-assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/regexp-macro-assembler-arm.h"
+#else  // Unknown architecture.
+#error "Unknown architecture."
+#endif  // Target architecture.
+#endif  // V8_NATIVE_REGEXP
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Label
+
+int Label::pos() const {
+  if (pos_ < 0) return -pos_ - 1;
+  if (pos_ > 0) return  pos_ - 1;
+  UNREACHABLE();
+  return 0;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfoWriter and RelocIterator
+//
+// Encoding
+//
+// The most common modes are given single-byte encodings.  Also, it is
+// easy to identify the type of reloc info and skip unwanted modes in
+// an iteration.
+//
+// The encoding relies on the fact that there are less than 14
+// different relocation modes.
+//
+// embedded_object:    [6 bits pc delta] 00
+//
+// code_taget:         [6 bits pc delta] 01
+//
+// position:           [6 bits pc delta] 10,
+//                     [7 bits signed data delta] 0
+//
+// statement_position: [6 bits pc delta] 10,
+//                     [7 bits signed data delta] 1
+//
+// any nondata mode:   00 [4 bits rmode] 11,  // rmode: 0..13 only
+//                     00 [6 bits pc delta]
+//
+// pc-jump:            00 1111 11,
+//                     00 [6 bits pc delta]
+//
+// pc-jump:            01 1111 11,
+// (variable length)   7 - 26 bit pc delta, written in chunks of 7
+//                     bits, the lowest 7 bits written first.
+//
+// data-jump + pos:    00 1110 11,
+//                     signed intptr_t, lowest byte written first
+//
+// data-jump + st.pos: 01 1110 11,
+//                     signed intptr_t, lowest byte written first
+//
+// data-jump + comm.:  10 1110 11,
+//                     signed intptr_t, lowest byte written first
+//
+const int kMaxRelocModes = 14;
+
+const int kTagBits = 2;
+const int kTagMask = (1 << kTagBits) - 1;
+const int kExtraTagBits = 4;
+const int kPositionTypeTagBits = 1;
+const int kSmallDataBits = kBitsPerByte - kPositionTypeTagBits;
+
+const int kEmbeddedObjectTag = 0;
+const int kCodeTargetTag = 1;
+const int kPositionTag = 2;
+const int kDefaultTag = 3;
+
+const int kPCJumpTag = (1 << kExtraTagBits) - 1;
+
+const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
+const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
+
+const int kVariableLengthPCJumpTopTag = 1;
+const int kChunkBits = 7;
+const int kChunkMask = (1 << kChunkBits) - 1;
+const int kLastChunkTagBits = 1;
+const int kLastChunkTagMask = 1;
+const int kLastChunkTag = 1;
+
+
+const int kDataJumpTag = kPCJumpTag - 1;
+
+const int kNonstatementPositionTag = 0;
+const int kStatementPositionTag = 1;
+const int kCommentTag = 2;
+
+
+uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
+  // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
+  // Otherwise write a variable length PC jump for the bits that do
+  // not fit in the kSmallPCDeltaBits bits.
+  if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
+  WriteExtraTag(kPCJumpTag, kVariableLengthPCJumpTopTag);
+  uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
+  ASSERT(pc_jump > 0);
+  // Write kChunkBits size chunks of the pc_jump.
+  for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
+    byte b = pc_jump & kChunkMask;
+    *--pos_ = b << kLastChunkTagBits;
+  }
+  // Tag the last chunk so it can be identified.
+  *pos_ = *pos_ | kLastChunkTag;
+  // Return the remaining kSmallPCDeltaBits of the pc_delta.
+  return pc_delta & kSmallPCDeltaMask;
+}
+
+
+void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
+  // Write a byte of tagged pc-delta, possibly preceded by var. length pc-jump.
+  pc_delta = WriteVariableLengthPCJump(pc_delta);
+  *--pos_ = pc_delta << kTagBits | tag;
+}
+
+
+void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
+  *--pos_ = data_delta << kPositionTypeTagBits | tag;
+}
+
+
+void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) {
+  *--pos_ = top_tag << (kTagBits + kExtraTagBits) |
+            extra_tag << kTagBits |
+            kDefaultTag;
+}
+
+
+void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
+  // Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
+  pc_delta = WriteVariableLengthPCJump(pc_delta);
+  WriteExtraTag(extra_tag, 0);
+  *--pos_ = pc_delta;
+}
+
+
+void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
+  WriteExtraTag(kDataJumpTag, top_tag);
+  for (int i = 0; i < kIntptrSize; i++) {
+    *--pos_ = data_delta;
+  // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
+    data_delta = data_delta >> kBitsPerByte;
+  }
+}
+
+
+void RelocInfoWriter::Write(const RelocInfo* rinfo) {
+#ifdef DEBUG
+  byte* begin_pos = pos_;
+#endif
+  Counters::reloc_info_count.Increment();
+  ASSERT(rinfo->pc() - last_pc_ >= 0);
+  ASSERT(RelocInfo::NUMBER_OF_MODES < kMaxRelocModes);
+  // Use unsigned delta-encoding for pc.
+  uint32_t pc_delta = rinfo->pc() - last_pc_;
+  RelocInfo::Mode rmode = rinfo->rmode();
+
+  // The two most common modes are given small tags, and usually fit in a byte.
+  if (rmode == RelocInfo::EMBEDDED_OBJECT) {
+    WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
+  } else if (rmode == RelocInfo::CODE_TARGET) {
+    WriteTaggedPC(pc_delta, kCodeTargetTag);
+  } else if (RelocInfo::IsPosition(rmode)) {
+    // Use signed delta-encoding for data.
+    intptr_t data_delta = rinfo->data() - last_data_;
+    int pos_type_tag = rmode == RelocInfo::POSITION ? kNonstatementPositionTag
+                                                    : kStatementPositionTag;
+    // Check if data is small enough to fit in a tagged byte.
+    // We cannot use is_intn because data_delta is not an int32_t.
+    if (data_delta >= -(1 << (kSmallDataBits-1)) &&
+        data_delta < 1 << (kSmallDataBits-1)) {
+      WriteTaggedPC(pc_delta, kPositionTag);
+      WriteTaggedData(data_delta, pos_type_tag);
+      last_data_ = rinfo->data();
+    } else {
+      // Otherwise, use costly encoding.
+      WriteExtraTaggedPC(pc_delta, kPCJumpTag);
+      WriteExtraTaggedData(data_delta, pos_type_tag);
+      last_data_ = rinfo->data();
+    }
+  } else if (RelocInfo::IsComment(rmode)) {
+    // Comments are normally not generated, so we use the costly encoding.
+    WriteExtraTaggedPC(pc_delta, kPCJumpTag);
+    WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
+    last_data_ = rinfo->data();
+  } else {
+    // For all other modes we simply use the mode as the extra tag.
+    // None of these modes need a data component.
+    ASSERT(rmode < kPCJumpTag && rmode < kDataJumpTag);
+    WriteExtraTaggedPC(pc_delta, rmode);
+  }
+  last_pc_ = rinfo->pc();
+#ifdef DEBUG
+  ASSERT(begin_pos - pos_ <= kMaxSize);
+#endif
+}
+
+
+inline int RelocIterator::AdvanceGetTag() {
+  return *--pos_ & kTagMask;
+}
+
+
+inline int RelocIterator::GetExtraTag() {
+  return (*pos_ >> kTagBits) & ((1 << kExtraTagBits) - 1);
+}
+
+
+inline int RelocIterator::GetTopTag() {
+  return *pos_ >> (kTagBits + kExtraTagBits);
+}
+
+
+inline void RelocIterator::ReadTaggedPC() {
+  rinfo_.pc_ += *pos_ >> kTagBits;
+}
+
+
+inline void RelocIterator::AdvanceReadPC() {
+  rinfo_.pc_ += *--pos_;
+}
+
+
+void RelocIterator::AdvanceReadData() {
+  intptr_t x = 0;
+  for (int i = 0; i < kIntptrSize; i++) {
+    x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
+  }
+  rinfo_.data_ += x;
+}
+
+
+void RelocIterator::AdvanceReadVariableLengthPCJump() {
+  // Read the 32-kSmallPCDeltaBits most significant bits of the
+  // pc jump in kChunkBits bit chunks and shift them into place.
+  // Stop when the last chunk is encountered.
+  uint32_t pc_jump = 0;
+  for (int i = 0; i < kIntSize; i++) {
+    byte pc_jump_part = *--pos_;
+    pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
+    if ((pc_jump_part & kLastChunkTagMask) == 1) break;
+  }
+  // The least significant kSmallPCDeltaBits bits will be added
+  // later.
+  rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
+}
+
+
+inline int RelocIterator::GetPositionTypeTag() {
+  return *pos_ & ((1 << kPositionTypeTagBits) - 1);
+}
+
+
+inline void RelocIterator::ReadTaggedData() {
+  int8_t signed_b = *pos_;
+  // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
+  rinfo_.data_ += signed_b >> kPositionTypeTagBits;
+}
+
+
+inline RelocInfo::Mode RelocIterator::DebugInfoModeFromTag(int tag) {
+  if (tag == kStatementPositionTag) {
+    return RelocInfo::STATEMENT_POSITION;
+  } else if (tag == kNonstatementPositionTag) {
+    return RelocInfo::POSITION;
+  } else {
+    ASSERT(tag == kCommentTag);
+    return RelocInfo::COMMENT;
+  }
+}
+
+
+void RelocIterator::next() {
+  ASSERT(!done());
+  // Basically, do the opposite of RelocInfoWriter::Write.
+  // Reading of data is as far as possible avoided for unwanted modes,
+  // but we must always update the pc.
+  //
+  // We exit this loop by returning when we find a mode we want.
+  while (pos_ > end_) {
+    int tag = AdvanceGetTag();
+    if (tag == kEmbeddedObjectTag) {
+      ReadTaggedPC();
+      if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
+    } else if (tag == kCodeTargetTag) {
+      ReadTaggedPC();
+      if (*(reinterpret_cast<int*>(rinfo_.pc())) == 0x61) {
+        tag = 0;
+      }
+      if (SetMode(RelocInfo::CODE_TARGET)) return;
+    } else if (tag == kPositionTag) {
+      ReadTaggedPC();
+      Advance();
+      // Check if we want source positions.
+      if (mode_mask_ & RelocInfo::kPositionMask) {
+        // Check if we want this type of source position.
+        if (SetMode(DebugInfoModeFromTag(GetPositionTypeTag()))) {
+          // Finally read the data before returning.
+          ReadTaggedData();
+          return;
+        }
+      }
+    } else {
+      ASSERT(tag == kDefaultTag);
+      int extra_tag = GetExtraTag();
+      if (extra_tag == kPCJumpTag) {
+        int top_tag = GetTopTag();
+        if (top_tag == kVariableLengthPCJumpTopTag) {
+          AdvanceReadVariableLengthPCJump();
+        } else {
+          AdvanceReadPC();
+        }
+      } else if (extra_tag == kDataJumpTag) {
+        // Check if we want debug modes (the only ones with data).
+        if (mode_mask_ & RelocInfo::kDebugMask) {
+          int top_tag = GetTopTag();
+          AdvanceReadData();
+          if (SetMode(DebugInfoModeFromTag(top_tag))) return;
+        } else {
+          // Otherwise, just skip over the data.
+          Advance(kIntptrSize);
+        }
+      } else {
+        AdvanceReadPC();
+        if (SetMode(static_cast<RelocInfo::Mode>(extra_tag))) return;
+      }
+    }
+  }
+  done_ = true;
+}
+
+
+RelocIterator::RelocIterator(Code* code, int mode_mask) {
+  rinfo_.pc_ = code->instruction_start();
+  rinfo_.data_ = 0;
+  // relocation info is read backwards
+  pos_ = code->relocation_start() + code->relocation_size();
+  end_ = code->relocation_start();
+  done_ = false;
+  mode_mask_ = mode_mask;
+  if (mode_mask_ == 0) pos_ = end_;
+  next();
+}
+
+
+RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
+  rinfo_.pc_ = desc.buffer;
+  rinfo_.data_ = 0;
+  // relocation info is read backwards
+  pos_ = desc.buffer + desc.buffer_size;
+  end_ = pos_ - desc.reloc_size;
+  done_ = false;
+  mode_mask_ = mode_mask;
+  if (mode_mask_ == 0) pos_ = end_;
+  next();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+
+#ifdef ENABLE_DISASSEMBLER
+const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
+  switch (rmode) {
+    case RelocInfo::NONE:
+      return "no reloc";
+    case RelocInfo::EMBEDDED_OBJECT:
+      return "embedded object";
+    case RelocInfo::EMBEDDED_STRING:
+      return "embedded string";
+    case RelocInfo::CONSTRUCT_CALL:
+      return "code target (js construct call)";
+    case RelocInfo::CODE_TARGET_CONTEXT:
+      return "code target (context)";
+    case RelocInfo::CODE_TARGET:
+      return "code target";
+    case RelocInfo::RUNTIME_ENTRY:
+      return "runtime entry";
+    case RelocInfo::JS_RETURN:
+      return "js return";
+    case RelocInfo::COMMENT:
+      return "comment";
+    case RelocInfo::POSITION:
+      return "position";
+    case RelocInfo::STATEMENT_POSITION:
+      return "statement position";
+    case RelocInfo::EXTERNAL_REFERENCE:
+      return "external reference";
+    case RelocInfo::INTERNAL_REFERENCE:
+      return "internal reference";
+    case RelocInfo::NUMBER_OF_MODES:
+      UNREACHABLE();
+      return "number_of_modes";
+  }
+  return "unknown relocation type";
+}
+
+
+void RelocInfo::Print() {
+  PrintF("%p  %s", pc_, RelocModeName(rmode_));
+  if (IsComment(rmode_)) {
+    PrintF("  (%s)", data_);
+  } else if (rmode_ == EMBEDDED_OBJECT) {
+    PrintF("  (");
+    target_object()->ShortPrint();
+    PrintF(")");
+  } else if (rmode_ == EXTERNAL_REFERENCE) {
+    ExternalReferenceEncoder ref_encoder;
+    PrintF(" (%s)  (%p)",
+           ref_encoder.NameOfAddress(*target_reference_address()),
+           *target_reference_address());
+  } else if (IsCodeTarget(rmode_)) {
+    Code* code = Code::GetCodeFromTargetAddress(target_address());
+    PrintF(" (%s)  (%p)", Code::Kind2String(code->kind()), target_address());
+  } else if (IsPosition(rmode_)) {
+    PrintF("  (%d)", data());
+  }
+
+  PrintF("\n");
+}
+#endif  // ENABLE_DISASSEMBLER
+
+
+#ifdef DEBUG
+void RelocInfo::Verify() {
+  switch (rmode_) {
+    case EMBEDDED_OBJECT:
+      Object::VerifyPointer(target_object());
+      break;
+    case CONSTRUCT_CALL:
+    case CODE_TARGET_CONTEXT:
+    case CODE_TARGET: {
+      // convert inline target address to code object
+      Address addr = target_address();
+      ASSERT(addr != NULL);
+      // Check that we can find the right code object.
+      Code* code = Code::GetCodeFromTargetAddress(addr);
+      Object* found = Heap::FindCodeObject(addr);
+      ASSERT(found->IsCode());
+      ASSERT(code->address() == HeapObject::cast(found)->address());
+      break;
+    }
+    case RelocInfo::EMBEDDED_STRING:
+    case RUNTIME_ENTRY:
+    case JS_RETURN:
+    case COMMENT:
+    case POSITION:
+    case STATEMENT_POSITION:
+    case EXTERNAL_REFERENCE:
+    case INTERNAL_REFERENCE:
+    case NONE:
+      break;
+    case NUMBER_OF_MODES:
+      UNREACHABLE();
+      break;
+  }
+}
+#endif  // DEBUG
+
+
+// -----------------------------------------------------------------------------
+// Implementation of ExternalReference
+
+ExternalReference::ExternalReference(Builtins::CFunctionId id)
+  : address_(Redirect(Builtins::c_function_address(id))) {}
+
+
+ExternalReference::ExternalReference(Builtins::Name name)
+  : address_(Builtins::builtin_address(name)) {}
+
+
+ExternalReference::ExternalReference(Runtime::FunctionId id)
+  : address_(Redirect(Runtime::FunctionForId(id)->entry)) {}
+
+
+ExternalReference::ExternalReference(Runtime::Function* f)
+  : address_(Redirect(f->entry)) {}
+
+
+ExternalReference::ExternalReference(const IC_Utility& ic_utility)
+  : address_(Redirect(ic_utility.address())) {}
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ExternalReference::ExternalReference(const Debug_Address& debug_address)
+  : address_(debug_address.address()) {}
+#endif
+
+ExternalReference::ExternalReference(StatsCounter* counter)
+  : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
+
+
+ExternalReference::ExternalReference(Top::AddressId id)
+  : address_(Top::get_address_from_id(id)) {}
+
+
+ExternalReference::ExternalReference(const SCTableReference& table_ref)
+  : address_(table_ref.address()) {}
+
+
+ExternalReference ExternalReference::perform_gc_function() {
+  return ExternalReference(Redirect(FUNCTION_ADDR(Runtime::PerformGC)));
+}
+
+
+ExternalReference ExternalReference::builtin_passed_function() {
+  return ExternalReference(&Builtins::builtin_passed_function);
+}
+
+
+ExternalReference ExternalReference::random_positive_smi_function() {
+  return ExternalReference(Redirect(FUNCTION_ADDR(V8::RandomPositiveSmi)));
+}
+
+
+ExternalReference ExternalReference::the_hole_value_location() {
+  return ExternalReference(Factory::the_hole_value().location());
+}
+
+
+ExternalReference ExternalReference::roots_address() {
+  return ExternalReference(Heap::roots_address());
+}
+
+
+ExternalReference ExternalReference::address_of_stack_guard_limit() {
+  return ExternalReference(StackGuard::address_of_jslimit());
+}
+
+
+ExternalReference ExternalReference::address_of_regexp_stack_limit() {
+  return ExternalReference(RegExpStack::limit_address());
+}
+
+
+ExternalReference ExternalReference::new_space_start() {
+  return ExternalReference(Heap::NewSpaceStart());
+}
+
+
+ExternalReference ExternalReference::new_space_allocation_top_address() {
+  return ExternalReference(Heap::NewSpaceAllocationTopAddress());
+}
+
+
+ExternalReference ExternalReference::heap_always_allocate_scope_depth() {
+  return ExternalReference(Heap::always_allocate_scope_depth_address());
+}
+
+
+ExternalReference ExternalReference::new_space_allocation_limit_address() {
+  return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
+}
+
+#ifdef V8_NATIVE_REGEXP
+
+ExternalReference ExternalReference::re_check_stack_guard_state() {
+  Address function;
+#ifdef V8_TARGET_ARCH_X64
+  function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
+#elif V8_TARGET_ARCH_IA32
+  function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
+#elif V8_TARGET_ARCH_ARM
+  function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
+#else
+  UNREACHABLE("Unexpected architecture");
+#endif
+  return ExternalReference(Redirect(function));
+}
+
+ExternalReference ExternalReference::re_grow_stack() {
+  return ExternalReference(
+      Redirect(FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
+}
+
+ExternalReference ExternalReference::re_case_insensitive_compare_uc16() {
+  return ExternalReference(Redirect(
+      FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
+}
+
+#endif
+
+
+static double add_two_doubles(double x, double y) {
+  return x + y;
+}
+
+
+static double sub_two_doubles(double x, double y) {
+  return x - y;
+}
+
+
+static double mul_two_doubles(double x, double y) {
+  return x * y;
+}
+
+
+static double div_two_doubles(double x, double y) {
+  return x / y;
+}
+
+
+static double mod_two_doubles(double x, double y) {
+  return fmod(x, y);
+}
+
+
+static int native_compare_doubles(double x, double y) {
+  if (x == y) return 0;
+  return x < y ? 1 : -1;
+}
+
+
+ExternalReference ExternalReference::double_fp_operation(
+    Token::Value operation) {
+  typedef double BinaryFPOperation(double x, double y);
+  BinaryFPOperation* function = NULL;
+  switch (operation) {
+    case Token::ADD:
+      function = &add_two_doubles;
+      break;
+    case Token::SUB:
+      function = &sub_two_doubles;
+      break;
+    case Token::MUL:
+      function = &mul_two_doubles;
+      break;
+    case Token::DIV:
+      function = &div_two_doubles;
+      break;
+    case Token::MOD:
+      function = &mod_two_doubles;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  // Passing true as 2nd parameter indicates that they return an fp value.
+  return ExternalReference(Redirect(FUNCTION_ADDR(function), true));
+}
+
+
+ExternalReference ExternalReference::compare_doubles() {
+  return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles),
+                                    false));
+}
+
+
+ExternalReferenceRedirector* ExternalReference::redirector_ = NULL;
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ExternalReference ExternalReference::debug_break() {
+  return ExternalReference(Redirect(FUNCTION_ADDR(Debug::Break)));
+}
+
+
+ExternalReference ExternalReference::debug_step_in_fp_address() {
+  return ExternalReference(Debug::step_in_fp_addr());
+}
+#endif
+
+} }  // namespace v8::internal
diff --git a/src/assembler.h b/src/assembler.h
new file mode 100644
index 0000000..827389a
--- /dev/null
+++ b/src/assembler.h
@@ -0,0 +1,500 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+
+#ifndef V8_ASSEMBLER_H_
+#define V8_ASSEMBLER_H_
+
+#include "runtime.h"
+#include "top.h"
+#include "zone-inl.h"
+#include "token.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Labels represent pc locations; they are typically jump or call targets.
+// After declaration, a label can be freely used to denote known or (yet)
+// unknown pc location. Assembler::bind() is used to bind a label to the
+// current pc. A label can be bound only once.
+
+class Label BASE_EMBEDDED {
+ public:
+  INLINE(Label())                 { Unuse(); }
+  INLINE(~Label())                { ASSERT(!is_linked()); }
+
+  INLINE(void Unuse())            { pos_ = 0; }
+
+  INLINE(bool is_bound()  const)  { return pos_ <  0; }
+  INLINE(bool is_unused() const)  { return pos_ == 0; }
+  INLINE(bool is_linked() const)  { return pos_ >  0; }
+
+  // Returns the position of bound or linked labels. Cannot be used
+  // for unused labels.
+  int pos() const;
+
+ private:
+  // pos_ encodes both the binding state (via its sign)
+  // and the binding position (via its value) of a label.
+  //
+  // pos_ <  0  bound label, pos() returns the jump target position
+  // pos_ == 0  unused label
+  // pos_ >  0  linked label, pos() returns the last reference position
+  int pos_;
+
+  void bind_to(int pos)  {
+    pos_ = -pos - 1;
+    ASSERT(is_bound());
+  }
+  void link_to(int pos)  {
+    pos_ =  pos + 1;
+    ASSERT(is_linked());
+  }
+
+  friend class Assembler;
+  friend class RegexpAssembler;
+  friend class Displacement;
+  friend class ShadowTarget;
+  friend class RegExpMacroAssemblerIrregexp;
+};
+
+
+// -----------------------------------------------------------------------------
+// Relocation information
+
+
+// Relocation information consists of the address (pc) of the datum
+// to which the relocation information applies, the relocation mode
+// (rmode), and an optional data field. The relocation mode may be
+// "descriptive" and not indicate a need for relocation, but simply
+// describe a property of the datum. Such rmodes are useful for GC
+// and nice disassembly output.
+
+class RelocInfo BASE_EMBEDDED {
+ public:
+  // The constant kNoPosition is used with the collecting of source positions
+  // in the relocation information. Two types of source positions are collected
+  // "position" (RelocMode position) and "statement position" (RelocMode
+  // statement_position). The "position" is collected at places in the source
+  // code which are of interest when making stack traces to pin-point the source
+  // location of a stack frame as close as possible. The "statement position" is
+  // collected at the beginning at each statement, and is used to indicate
+  // possible break locations. kNoPosition is used to indicate an
+  // invalid/uninitialized position value.
+  static const int kNoPosition = -1;
+
+  enum Mode {
+    // Please note the order is important (see IsCodeTarget, IsGCRelocMode).
+    CONSTRUCT_CALL,  // code target that is a call to a JavaScript constructor.
+    CODE_TARGET_CONTEXT,  // code target used for contextual loads.
+    CODE_TARGET,         // code target which is not any of the above.
+    EMBEDDED_OBJECT,
+    EMBEDDED_STRING,
+
+    // Everything after runtime_entry (inclusive) is not GC'ed.
+    RUNTIME_ENTRY,
+    JS_RETURN,  // Marks start of the ExitJSFrame code.
+    COMMENT,
+    POSITION,  // See comment for kNoPosition above.
+    STATEMENT_POSITION,  // See comment for kNoPosition above.
+    EXTERNAL_REFERENCE,  // The address of an external C++ function.
+    INTERNAL_REFERENCE,  // An address inside the same function.
+
+    // add more as needed
+    // Pseudo-types
+    NUMBER_OF_MODES,  // must be no greater than 14 - see RelocInfoWriter
+    NONE,  // never recorded
+    LAST_CODE_ENUM = CODE_TARGET,
+    LAST_GCED_ENUM = EMBEDDED_STRING
+  };
+
+
+  RelocInfo() {}
+  RelocInfo(byte* pc, Mode rmode, intptr_t data)
+      : pc_(pc), rmode_(rmode), data_(data) {
+  }
+
+  static inline bool IsConstructCall(Mode mode) {
+    return mode == CONSTRUCT_CALL;
+  }
+  static inline bool IsCodeTarget(Mode mode) {
+    return mode <= LAST_CODE_ENUM;
+  }
+  // Is the relocation mode affected by GC?
+  static inline bool IsGCRelocMode(Mode mode) {
+    return mode <= LAST_GCED_ENUM;
+  }
+  static inline bool IsJSReturn(Mode mode) {
+    return mode == JS_RETURN;
+  }
+  static inline bool IsComment(Mode mode) {
+    return mode == COMMENT;
+  }
+  static inline bool IsPosition(Mode mode) {
+    return mode == POSITION || mode == STATEMENT_POSITION;
+  }
+  static inline bool IsStatementPosition(Mode mode) {
+    return mode == STATEMENT_POSITION;
+  }
+  static inline bool IsExternalReference(Mode mode) {
+    return mode == EXTERNAL_REFERENCE;
+  }
+  static inline bool IsInternalReference(Mode mode) {
+    return mode == INTERNAL_REFERENCE;
+  }
+  static inline int ModeMask(Mode mode) { return 1 << mode; }
+
+  // Accessors
+  byte* pc() const  { return pc_; }
+  void set_pc(byte* pc) { pc_ = pc; }
+  Mode rmode() const {  return rmode_; }
+  intptr_t data() const  { return data_; }
+
+  // Apply a relocation by delta bytes
+  INLINE(void apply(intptr_t delta));
+
+  // Read/modify the code target in the branch/call instruction
+  // this relocation applies to;
+  // can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+  INLINE(Address target_address());
+  INLINE(void set_target_address(Address target));
+  INLINE(Object* target_object());
+  INLINE(Object** target_object_address());
+  INLINE(void set_target_object(Object* target));
+
+  // Read the address of the word containing the target_address. Can only
+  // be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY.
+  INLINE(Address target_address_address());
+
+  // Read/modify the reference in the instruction this relocation
+  // applies to; can only be called if rmode_ is external_reference
+  INLINE(Address* target_reference_address());
+
+  // Read/modify the address of a call instruction. This is used to relocate
+  // the break points where straight-line code is patched with a call
+  // instruction.
+  INLINE(Address call_address());
+  INLINE(void set_call_address(Address target));
+  INLINE(Object* call_object());
+  INLINE(Object** call_object_address());
+  INLINE(void set_call_object(Object* target));
+
+  // Patch the code with some other code.
+  void PatchCode(byte* instructions, int instruction_count);
+
+  // Patch the code with a call.
+  void PatchCodeWithCall(Address target, int guard_bytes);
+  // Check whether the current instruction is currently a call
+  // sequence (whether naturally or a return sequence overwritten
+  // to enter the debugger).
+  INLINE(bool IsCallInstruction());
+
+#ifdef ENABLE_DISASSEMBLER
+  // Printing
+  static const char* RelocModeName(Mode rmode);
+  void Print();
+#endif  // ENABLE_DISASSEMBLER
+#ifdef DEBUG
+  // Debugging
+  void Verify();
+#endif
+
+  static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
+  static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
+  static const int kDebugMask = kPositionMask | 1 << COMMENT;
+  static const int kApplyMask;  // Modes affected by apply. Depends on arch.
+
+ private:
+  // On ARM, note that pc_ is the address of the constant pool entry
+  // to be relocated and not the address of the instruction
+  // referencing the constant pool entry (except when rmode_ ==
+  // comment).
+  byte* pc_;
+  Mode rmode_;
+  intptr_t data_;
+  friend class RelocIterator;
+};
+
+
+// RelocInfoWriter serializes a stream of relocation info. It writes towards
+// lower addresses.
+class RelocInfoWriter BASE_EMBEDDED {
+ public:
+  RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_data_(0) {}
+  RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc),
+                                         last_data_(0) {}
+
+  byte* pos() const { return pos_; }
+  byte* last_pc() const { return last_pc_; }
+
+  void Write(const RelocInfo* rinfo);
+
+  // Update the state of the stream after reloc info buffer
+  // and/or code is moved while the stream is active.
+  void Reposition(byte* pos, byte* pc) {
+    pos_ = pos;
+    last_pc_ = pc;
+  }
+
+  // Max size (bytes) of a written RelocInfo. Longest encoding is
+  // ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, ExtraTag, data_delta.
+  // On ia32 and arm this is 1 + 4 + 1 + 1 + 1 + 4 = 12.
+  // On x64 this is 1 + 4 + 1 + 1 + 1 + 8 == 16;
+  // Here we use the maximum of the two.
+  static const int kMaxSize = 16;
+
+ private:
+  inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
+  inline void WriteTaggedPC(uint32_t pc_delta, int tag);
+  inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
+  inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
+  inline void WriteTaggedData(intptr_t data_delta, int tag);
+  inline void WriteExtraTag(int extra_tag, int top_tag);
+
+  byte* pos_;
+  byte* last_pc_;
+  intptr_t last_data_;
+  DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
+};
+
+
+// A RelocIterator iterates over relocation information.
+// Typical use:
+//
+//   for (RelocIterator it(code); !it.done(); it.next()) {
+//     // do something with it.rinfo() here
+//   }
+//
+// A mask can be specified to skip unwanted modes.
+class RelocIterator: public Malloced {
+ public:
+  // Create a new iterator positioned at
+  // the beginning of the reloc info.
+  // Relocation information with mode k is included in the
+  // iteration iff bit k of mode_mask is set.
+  explicit RelocIterator(Code* code, int mode_mask = -1);
+  explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
+
+  // Iteration
+  bool done() const  { return done_; }
+  void next();
+
+  // Return pointer valid until next next().
+  RelocInfo* rinfo() {
+    ASSERT(!done());
+    return &rinfo_;
+  }
+
+ private:
+  // Advance* moves the position before/after reading.
+  // *Read* reads from current byte(s) into rinfo_.
+  // *Get* just reads and returns info on current byte.
+  void Advance(int bytes = 1) { pos_ -= bytes; }
+  int AdvanceGetTag();
+  int GetExtraTag();
+  int GetTopTag();
+  void ReadTaggedPC();
+  void AdvanceReadPC();
+  void AdvanceReadData();
+  void AdvanceReadVariableLengthPCJump();
+  int GetPositionTypeTag();
+  void ReadTaggedData();
+
+  static RelocInfo::Mode DebugInfoModeFromTag(int tag);
+
+  // If the given mode is wanted, set it in rinfo_ and return true.
+  // Else return false. Used for efficiently skipping unwanted modes.
+  bool SetMode(RelocInfo::Mode mode) {
+    return (mode_mask_ & 1 << mode) ? (rinfo_.rmode_ = mode, true) : false;
+  }
+
+  byte* pos_;
+  byte* end_;
+  RelocInfo rinfo_;
+  bool done_;
+  int mode_mask_;
+  DISALLOW_COPY_AND_ASSIGN(RelocIterator);
+};
+
+
+//------------------------------------------------------------------------------
+// External function
+
+//----------------------------------------------------------------------------
+class IC_Utility;
+class SCTableReference;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+class Debug_Address;
+#endif
+
+
+typedef void* ExternalReferenceRedirector(void* original, bool fp_return);
+
+
+// An ExternalReference represents a C++ address used in the generated
+// code. All references to C++ functions and variables must be encapsulated in
+// an ExternalReference instance. This is done in order to track the origin of
+// all external references in the code so that they can be bound to the correct
+// addresses when deserializing a heap.
+class ExternalReference BASE_EMBEDDED {
+ public:
+  explicit ExternalReference(Builtins::CFunctionId id);
+
+  explicit ExternalReference(Builtins::Name name);
+
+  explicit ExternalReference(Runtime::FunctionId id);
+
+  explicit ExternalReference(Runtime::Function* f);
+
+  explicit ExternalReference(const IC_Utility& ic_utility);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  explicit ExternalReference(const Debug_Address& debug_address);
+#endif
+
+  explicit ExternalReference(StatsCounter* counter);
+
+  explicit ExternalReference(Top::AddressId id);
+
+  explicit ExternalReference(const SCTableReference& table_ref);
+
+  // One-of-a-kind references. These references are not part of a general
+  // pattern. This means that they have to be added to the
+  // ExternalReferenceTable in serialize.cc manually.
+
+  static ExternalReference perform_gc_function();
+  static ExternalReference builtin_passed_function();
+  static ExternalReference random_positive_smi_function();
+
+  // Static variable Factory::the_hole_value.location()
+  static ExternalReference the_hole_value_location();
+
+  // Static variable Heap::roots_address()
+  static ExternalReference roots_address();
+
+  // Static variable StackGuard::address_of_jslimit()
+  static ExternalReference address_of_stack_guard_limit();
+
+  // Static variable RegExpStack::limit_address()
+  static ExternalReference address_of_regexp_stack_limit();
+
+  // Static variable Heap::NewSpaceStart()
+  static ExternalReference new_space_start();
+  static ExternalReference heap_always_allocate_scope_depth();
+
+  // Used for fast allocation in generated code.
+  static ExternalReference new_space_allocation_top_address();
+  static ExternalReference new_space_allocation_limit_address();
+
+  static ExternalReference double_fp_operation(Token::Value operation);
+  static ExternalReference compare_doubles();
+
+  Address address() const {return reinterpret_cast<Address>(address_);}
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Function Debug::Break()
+  static ExternalReference debug_break();
+
+  // Used to check if single stepping is enabled in generated code.
+  static ExternalReference debug_step_in_fp_address();
+#endif
+
+#ifdef V8_NATIVE_REGEXP
+  // C functions called from RegExp generated code.
+
+  // Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()
+  static ExternalReference re_case_insensitive_compare_uc16();
+
+  // Function RegExpMacroAssembler*::CheckStackGuardState()
+  static ExternalReference re_check_stack_guard_state();
+
+  // Function NativeRegExpMacroAssembler::GrowStack()
+  static ExternalReference re_grow_stack();
+#endif
+
+  // This lets you register a function that rewrites all external references.
+  // Used by the ARM simulator to catch calls to external references.
+  static void set_redirector(ExternalReferenceRedirector* redirector) {
+    ASSERT(redirector_ == NULL);  // We can't stack them.
+    redirector_ = redirector;
+  }
+
+ private:
+  explicit ExternalReference(void* address)
+      : address_(address) {}
+
+  static ExternalReferenceRedirector* redirector_;
+
+  static void* Redirect(void* address, bool fp_return = false) {
+    if (redirector_ == NULL) return address;
+    return (*redirector_)(address, fp_return);
+  }
+
+  static void* Redirect(Address address_arg, bool fp_return = false) {
+    void* address = reinterpret_cast<void*>(address_arg);
+    return redirector_ == NULL ? address : (*redirector_)(address, fp_return);
+  }
+
+  void* address_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+static inline bool is_intn(int x, int n)  {
+  return -(1 << (n-1)) <= x && x < (1 << (n-1));
+}
+
+static inline bool is_int24(int x)  { return is_intn(x, 24); }
+static inline bool is_int8(int x)  { return is_intn(x, 8); }
+
+static inline bool is_uintn(int x, int n) {
+  return (x & -(1 << n)) == 0;
+}
+
+static inline bool is_uint2(int x)  { return is_uintn(x, 2); }
+static inline bool is_uint3(int x)  { return is_uintn(x, 3); }
+static inline bool is_uint4(int x)  { return is_uintn(x, 4); }
+static inline bool is_uint5(int x)  { return is_uintn(x, 5); }
+static inline bool is_uint6(int x)  { return is_uintn(x, 6); }
+static inline bool is_uint8(int x)  { return is_uintn(x, 8); }
+static inline bool is_uint12(int x)  { return is_uintn(x, 12); }
+static inline bool is_uint16(int x)  { return is_uintn(x, 16); }
+static inline bool is_uint24(int x)  { return is_uintn(x, 24); }
+
+} }  // namespace v8::internal
+
+#endif  // V8_ASSEMBLER_H_
diff --git a/src/ast.cc b/src/ast.cc
new file mode 100644
index 0000000..692bec0
--- /dev/null
+++ b/src/ast.cc
@@ -0,0 +1,511 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "scopes.h"
+#include "string-stream.h"
+
+namespace v8 {
+namespace internal {
+
+
+VariableProxySentinel VariableProxySentinel::this_proxy_(true);
+VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
+ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
+Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0);
+Call Call::sentinel_(NULL, NULL, 0);
+
+
+// ----------------------------------------------------------------------------
+// All the Accept member functions for each syntax tree node type.
+
+#define DECL_ACCEPT(type)                \
+  void type::Accept(AstVisitor* v) {        \
+    if (v->CheckStackOverflow()) return; \
+    v->Visit##type(this);                \
+  }
+AST_NODE_LIST(DECL_ACCEPT)
+#undef DECL_ACCEPT
+
+
+// ----------------------------------------------------------------------------
+// Implementation of other node functionality.
+
+VariableProxy::VariableProxy(Handle<String> name,
+                             bool is_this,
+                             bool inside_with)
+  : name_(name),
+    var_(NULL),
+    is_this_(is_this),
+    inside_with_(inside_with) {
+  // names must be canonicalized for fast equality checks
+  ASSERT(name->IsSymbol());
+  // at least one access, otherwise no need for a VariableProxy
+  var_uses_.RecordRead(1);
+}
+
+
+VariableProxy::VariableProxy(bool is_this)
+  : is_this_(is_this) {
+}
+
+
+void VariableProxy::BindTo(Variable* var) {
+  ASSERT(var_ == NULL);  // must be bound only once
+  ASSERT(var != NULL);  // must bind
+  ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
+  // Ideally CONST-ness should match. However, this is very hard to achieve
+  // because we don't know the exact semantics of conflicting (const and
+  // non-const) multiple variable declarations, const vars introduced via
+  // eval() etc.  Const-ness and variable declarations are a complete mess
+  // in JS. Sigh...
+  var_ = var;
+  var->var_uses()->RecordUses(&var_uses_);
+  var->obj_uses()->RecordUses(&obj_uses_);
+}
+
+
+#ifdef DEBUG
+
+const char* LoopStatement::OperatorString() const {
+  switch (type()) {
+    case DO_LOOP: return "DO";
+    case FOR_LOOP: return "FOR";
+    case WHILE_LOOP: return "WHILE";
+  }
+  return NULL;
+}
+
+#endif  // DEBUG
+
+
+Token::Value Assignment::binary_op() const {
+  switch (op_) {
+    case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
+    case Token::ASSIGN_BIT_XOR: return Token::BIT_XOR;
+    case Token::ASSIGN_BIT_AND: return Token::BIT_AND;
+    case Token::ASSIGN_SHL: return Token::SHL;
+    case Token::ASSIGN_SAR: return Token::SAR;
+    case Token::ASSIGN_SHR: return Token::SHR;
+    case Token::ASSIGN_ADD: return Token::ADD;
+    case Token::ASSIGN_SUB: return Token::SUB;
+    case Token::ASSIGN_MUL: return Token::MUL;
+    case Token::ASSIGN_DIV: return Token::DIV;
+    case Token::ASSIGN_MOD: return Token::MOD;
+    default: UNREACHABLE();
+  }
+  return Token::ILLEGAL;
+}
+
+
+bool FunctionLiteral::AllowsLazyCompilation() {
+  return scope()->AllowsLazyCompilation();
+}
+
+
+ObjectLiteral::Property::Property(Literal* key, Expression* value) {
+  key_ = key;
+  value_ = value;
+  Object* k = *key->handle();
+  if (k->IsSymbol() && Heap::Proto_symbol()->Equals(String::cast(k))) {
+    kind_ = PROTOTYPE;
+  } else if (value_->AsMaterializedLiteral() != NULL) {
+    kind_ = MATERIALIZED_LITERAL;
+  } else if (value_->AsLiteral() != NULL) {
+    kind_ = CONSTANT;
+  } else {
+    kind_ = COMPUTED;
+  }
+}
+
+
+ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
+  key_ = new Literal(value->name());
+  value_ = value;
+  kind_ = is_getter ? GETTER : SETTER;
+}
+
+
+bool ObjectLiteral::IsValidJSON() {
+  int length = properties()->length();
+  for (int i = 0; i < length; i++) {
+    Property* prop = properties()->at(i);
+    if (!prop->value()->IsValidJSON())
+      return false;
+  }
+  return true;
+}
+
+
+bool ArrayLiteral::IsValidJSON() {
+  int length = values()->length();
+  for (int i = 0; i < length; i++) {
+    if (!values()->at(i)->IsValidJSON())
+      return false;
+  }
+  return true;
+}
+
+
+void TargetCollector::AddTarget(BreakTarget* target) {
+  // Add the label to the collector, but discard duplicates.
+  int length = targets_->length();
+  for (int i = 0; i < length; i++) {
+    if (targets_->at(i) == target) return;
+  }
+  targets_->Add(target);
+}
+
+
+// ----------------------------------------------------------------------------
+// Implementation of AstVisitor
+
+
+void AstVisitor::VisitStatements(ZoneList<Statement*>* statements) {
+  for (int i = 0; i < statements->length(); i++) {
+    Visit(statements->at(i));
+  }
+}
+
+
+void AstVisitor::VisitExpressions(ZoneList<Expression*>* expressions) {
+  for (int i = 0; i < expressions->length(); i++) {
+    // The variable statement visiting code may pass NULL expressions
+    // to this code. Maybe this should be handled by introducing an
+    // undefined expression or literal?  Revisit this code if this
+    // changes
+    Expression* expression = expressions->at(i);
+    if (expression != NULL) Visit(expression);
+  }
+}
+
+
+// ----------------------------------------------------------------------------
+// Regular expressions
+
+#define MAKE_ACCEPT(Name)                                            \
+  void* RegExp##Name::Accept(RegExpVisitor* visitor, void* data) {   \
+    return visitor->Visit##Name(this, data);                         \
+  }
+FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ACCEPT)
+#undef MAKE_ACCEPT
+
+#define MAKE_TYPE_CASE(Name)                                         \
+  RegExp##Name* RegExpTree::As##Name() {                             \
+    return NULL;                                                     \
+  }                                                                  \
+  bool RegExpTree::Is##Name() { return false; }
+FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
+#undef MAKE_TYPE_CASE
+
+#define MAKE_TYPE_CASE(Name)                                        \
+  RegExp##Name* RegExp##Name::As##Name() {                          \
+    return this;                                                    \
+  }                                                                 \
+  bool RegExp##Name::Is##Name() { return true; }
+FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
+#undef MAKE_TYPE_CASE
+
+RegExpEmpty RegExpEmpty::kInstance;
+
+
+static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
+  Interval result = Interval::Empty();
+  for (int i = 0; i < children->length(); i++)
+    result = result.Union(children->at(i)->CaptureRegisters());
+  return result;
+}
+
+
+Interval RegExpAlternative::CaptureRegisters() {
+  return ListCaptureRegisters(nodes());
+}
+
+
+Interval RegExpDisjunction::CaptureRegisters() {
+  return ListCaptureRegisters(alternatives());
+}
+
+
+Interval RegExpLookahead::CaptureRegisters() {
+  return body()->CaptureRegisters();
+}
+
+
+Interval RegExpCapture::CaptureRegisters() {
+  Interval self(StartRegister(index()), EndRegister(index()));
+  return self.Union(body()->CaptureRegisters());
+}
+
+
+Interval RegExpQuantifier::CaptureRegisters() {
+  return body()->CaptureRegisters();
+}
+
+
+bool RegExpAssertion::IsAnchored() {
+  return type() == RegExpAssertion::START_OF_INPUT;
+}
+
+
+bool RegExpAlternative::IsAnchored() {
+  ZoneList<RegExpTree*>* nodes = this->nodes();
+  for (int i = 0; i < nodes->length(); i++) {
+    RegExpTree* node = nodes->at(i);
+    if (node->IsAnchored()) { return true; }
+    if (node->max_match() > 0) { return false; }
+  }
+  return false;
+}
+
+
+bool RegExpDisjunction::IsAnchored() {
+  ZoneList<RegExpTree*>* alternatives = this->alternatives();
+  for (int i = 0; i < alternatives->length(); i++) {
+    if (!alternatives->at(i)->IsAnchored())
+      return false;
+  }
+  return true;
+}
+
+
+bool RegExpLookahead::IsAnchored() {
+  return is_positive() && body()->IsAnchored();
+}
+
+
+bool RegExpCapture::IsAnchored() {
+  return body()->IsAnchored();
+}
+
+
+// Convert regular expression trees to a simple sexp representation.
+// This representation should be different from the input grammar
+// in as many cases as possible, to make it more difficult for incorrect
+// parses to look as correct ones which is likely if the input and
+// output formats are alike.
+class RegExpUnparser: public RegExpVisitor {
+ public:
+  RegExpUnparser();
+  void VisitCharacterRange(CharacterRange that);
+  SmartPointer<const char> ToString() { return stream_.ToCString(); }
+#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
+  FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
+#undef MAKE_CASE
+ private:
+  StringStream* stream() { return &stream_; }
+  HeapStringAllocator alloc_;
+  StringStream stream_;
+};
+
+
+RegExpUnparser::RegExpUnparser() : stream_(&alloc_) {
+}
+
+
+void* RegExpUnparser::VisitDisjunction(RegExpDisjunction* that, void* data) {
+  stream()->Add("(|");
+  for (int i = 0; i <  that->alternatives()->length(); i++) {
+    stream()->Add(" ");
+    that->alternatives()->at(i)->Accept(this, data);
+  }
+  stream()->Add(")");
+  return NULL;
+}
+
+
+void* RegExpUnparser::VisitAlternative(RegExpAlternative* that, void* data) {
+  stream()->Add("(:");
+  for (int i = 0; i <  that->nodes()->length(); i++) {
+    stream()->Add(" ");
+    that->nodes()->at(i)->Accept(this, data);
+  }
+  stream()->Add(")");
+  return NULL;
+}
+
+
+void RegExpUnparser::VisitCharacterRange(CharacterRange that) {
+  stream()->Add("%k", that.from());
+  if (!that.IsSingleton()) {
+    stream()->Add("-%k", that.to());
+  }
+}
+
+
+
+void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
+                                          void* data) {
+  if (that->is_negated())
+    stream()->Add("^");
+  stream()->Add("[");
+  for (int i = 0; i < that->ranges()->length(); i++) {
+    if (i > 0) stream()->Add(" ");
+    VisitCharacterRange(that->ranges()->at(i));
+  }
+  stream()->Add("]");
+  return NULL;
+}
+
+
+void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
+  switch (that->type()) {
+    case RegExpAssertion::START_OF_INPUT:
+      stream()->Add("@^i");
+      break;
+    case RegExpAssertion::END_OF_INPUT:
+      stream()->Add("@$i");
+      break;
+    case RegExpAssertion::START_OF_LINE:
+      stream()->Add("@^l");
+      break;
+    case RegExpAssertion::END_OF_LINE:
+      stream()->Add("@$l");
+       break;
+    case RegExpAssertion::BOUNDARY:
+      stream()->Add("@b");
+      break;
+    case RegExpAssertion::NON_BOUNDARY:
+      stream()->Add("@B");
+      break;
+  }
+  return NULL;
+}
+
+
+void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
+  stream()->Add("'");
+  Vector<const uc16> chardata = that->data();
+  for (int i = 0; i < chardata.length(); i++) {
+    stream()->Add("%k", chardata[i]);
+  }
+  stream()->Add("'");
+  return NULL;
+}
+
+
+void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
+  if (that->elements()->length() == 1) {
+    that->elements()->at(0).data.u_atom->Accept(this, data);
+  } else {
+    stream()->Add("(!");
+    for (int i = 0; i < that->elements()->length(); i++) {
+      stream()->Add(" ");
+      that->elements()->at(i).data.u_atom->Accept(this, data);
+    }
+    stream()->Add(")");
+  }
+  return NULL;
+}
+
+
+void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) {
+  stream()->Add("(# %i ", that->min());
+  if (that->max() == RegExpTree::kInfinity) {
+    stream()->Add("- ");
+  } else {
+    stream()->Add("%i ", that->max());
+  }
+  stream()->Add(that->is_greedy() ? "g " : "n ");
+  that->body()->Accept(this, data);
+  stream()->Add(")");
+  return NULL;
+}
+
+
+void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) {
+  stream()->Add("(^ ");
+  that->body()->Accept(this, data);
+  stream()->Add(")");
+  return NULL;
+}
+
+
+void* RegExpUnparser::VisitLookahead(RegExpLookahead* that, void* data) {
+  stream()->Add("(-> ");
+  stream()->Add(that->is_positive() ? "+ " : "- ");
+  that->body()->Accept(this, data);
+  stream()->Add(")");
+  return NULL;
+}
+
+
+void* RegExpUnparser::VisitBackReference(RegExpBackReference* that,
+                                         void* data) {
+  stream()->Add("(<- %i)", that->index());
+  return NULL;
+}
+
+
+void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
+  stream()->Put('%');
+  return NULL;
+}
+
+
+SmartPointer<const char> RegExpTree::ToString() {
+  RegExpUnparser unparser;
+  Accept(&unparser, NULL);
+  return unparser.ToString();
+}
+
+
+RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
+    : alternatives_(alternatives) {
+  ASSERT(alternatives->length() > 1);
+  RegExpTree* first_alternative = alternatives->at(0);
+  min_match_ = first_alternative->min_match();
+  max_match_ = first_alternative->max_match();
+  for (int i = 1; i < alternatives->length(); i++) {
+    RegExpTree* alternative = alternatives->at(i);
+    min_match_ = Min(min_match_, alternative->min_match());
+    max_match_ = Max(max_match_, alternative->max_match());
+  }
+}
+
+
+RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
+    : nodes_(nodes) {
+  ASSERT(nodes->length() > 1);
+  min_match_ = 0;
+  max_match_ = 0;
+  for (int i = 0; i < nodes->length(); i++) {
+    RegExpTree* node = nodes->at(i);
+    min_match_ += node->min_match();
+    int node_max_match = node->max_match();
+    if (kInfinity - max_match_ < node_max_match) {
+      max_match_ = kInfinity;
+    } else {
+      max_match_ += node->max_match();
+    }
+  }
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/ast.h b/src/ast.h
new file mode 100644
index 0000000..6a1cdf5
--- /dev/null
+++ b/src/ast.h
@@ -0,0 +1,1724 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_AST_H_
+#define V8_AST_H_
+
+#include "execution.h"
+#include "factory.h"
+#include "runtime.h"
+#include "token.h"
+#include "variables.h"
+#include "macro-assembler.h"
+#include "jsregexp.h"
+#include "jump-target.h"
+
+namespace v8 {
+namespace internal {
+
+// The abstract syntax tree is an intermediate, light-weight
+// representation of the parsed JavaScript code suitable for
+// compilation to native code.
+
+// Nodes are allocated in a separate zone, which allows faster
+// allocation and constant-time deallocation of the entire syntax
+// tree.
+
+
+// ----------------------------------------------------------------------------
+// Nodes of the abstract syntax tree. Only concrete classes are
+// enumerated here.
+
+#define STATEMENT_NODE_LIST(V)                  \
+  V(Block)                                      \
+  V(ExpressionStatement)                        \
+  V(EmptyStatement)                             \
+  V(IfStatement)                                \
+  V(ContinueStatement)                          \
+  V(BreakStatement)                             \
+  V(ReturnStatement)                            \
+  V(WithEnterStatement)                         \
+  V(WithExitStatement)                          \
+  V(SwitchStatement)                            \
+  V(LoopStatement)                              \
+  V(ForInStatement)                             \
+  V(TryCatch)                                   \
+  V(TryFinally)                                 \
+  V(DebuggerStatement)
+
+#define EXPRESSION_NODE_LIST(V)                 \
+  V(FunctionLiteral)                            \
+  V(FunctionBoilerplateLiteral)                 \
+  V(Conditional)                                \
+  V(Slot)                                       \
+  V(VariableProxy)                              \
+  V(Literal)                                    \
+  V(RegExpLiteral)                              \
+  V(ObjectLiteral)                              \
+  V(ArrayLiteral)                               \
+  V(CatchExtensionObject)                       \
+  V(Assignment)                                 \
+  V(Throw)                                      \
+  V(Property)                                   \
+  V(Call)                                       \
+  V(CallNew)                                    \
+  V(CallRuntime)                                \
+  V(UnaryOperation)                             \
+  V(CountOperation)                             \
+  V(BinaryOperation)                            \
+  V(CompareOperation)                           \
+  V(ThisFunction)
+
+#define AST_NODE_LIST(V)                        \
+  V(Declaration)                                \
+  STATEMENT_NODE_LIST(V)                        \
+  EXPRESSION_NODE_LIST(V)
+
+// Forward declarations
+class TargetCollector;
+class MaterializedLiteral;
+
+#define DEF_FORWARD_DECLARATION(type) class type;
+AST_NODE_LIST(DEF_FORWARD_DECLARATION)
+#undef DEF_FORWARD_DECLARATION
+
+
+// Typedef only introduced to avoid unreadable code.
+// Please do appreciate the required space in "> >".
+typedef ZoneList<Handle<String> > ZoneStringList;
+typedef ZoneList<Handle<Object> > ZoneObjectList;
+
+
+class AstNode: public ZoneObject {
+ public:
+  virtual ~AstNode() { }
+  virtual void Accept(AstVisitor* v) = 0;
+
+  // Type testing & conversion.
+  virtual Statement* AsStatement() { return NULL; }
+  virtual ExpressionStatement* AsExpressionStatement() { return NULL; }
+  virtual EmptyStatement* AsEmptyStatement() { return NULL; }
+  virtual Expression* AsExpression() { return NULL; }
+  virtual Literal* AsLiteral() { return NULL; }
+  virtual Slot* AsSlot() { return NULL; }
+  virtual VariableProxy* AsVariableProxy() { return NULL; }
+  virtual Property* AsProperty() { return NULL; }
+  virtual Call* AsCall() { return NULL; }
+  virtual TargetCollector* AsTargetCollector() { return NULL; }
+  virtual BreakableStatement* AsBreakableStatement() { return NULL; }
+  virtual IterationStatement* AsIterationStatement() { return NULL; }
+  virtual UnaryOperation* AsUnaryOperation() { return NULL; }
+  virtual BinaryOperation* AsBinaryOperation() { return NULL; }
+  virtual Assignment* AsAssignment() { return NULL; }
+  virtual FunctionLiteral* AsFunctionLiteral() { return NULL; }
+  virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
+  virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
+  virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
+};
+
+
+class Statement: public AstNode {
+ public:
+  Statement() : statement_pos_(RelocInfo::kNoPosition) {}
+
+  virtual Statement* AsStatement()  { return this; }
+  virtual ReturnStatement* AsReturnStatement() { return NULL; }
+
+  bool IsEmpty() { return AsEmptyStatement() != NULL; }
+
+  void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
+  int statement_pos() const { return statement_pos_; }
+
+ private:
+  int statement_pos_;
+};
+
+
+class Expression: public AstNode {
+ public:
+  virtual Expression* AsExpression()  { return this; }
+
+  virtual bool IsValidJSON() { return false; }
+  virtual bool IsValidLeftHandSide() { return false; }
+
+  // Mark the expression as being compiled as an expression
+  // statement. This is used to transform postfix increments to
+  // (faster) prefix increments.
+  virtual void MarkAsStatement() { /* do nothing */ }
+
+  // Static type information for this expression.
+  SmiAnalysis* type() { return &type_; }
+
+ private:
+  SmiAnalysis type_;
+};
+
+
+/**
+ * A sentinel used during pre parsing that represents some expression
+ * that is a valid left hand side without having to actually build
+ * the expression.
+ */
+class ValidLeftHandSideSentinel: public Expression {
+ public:
+  virtual bool IsValidLeftHandSide() { return true; }
+  virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
+  static ValidLeftHandSideSentinel* instance() { return &instance_; }
+ private:
+  static ValidLeftHandSideSentinel instance_;
+};
+
+
+class BreakableStatement: public Statement {
+ public:
+  enum Type {
+    TARGET_FOR_ANONYMOUS,
+    TARGET_FOR_NAMED_ONLY
+  };
+
+  // The labels associated with this statement. May be NULL;
+  // if it is != NULL, guaranteed to contain at least one entry.
+  ZoneStringList* labels() const { return labels_; }
+
+  // Type testing & conversion.
+  virtual BreakableStatement* AsBreakableStatement() { return this; }
+
+  // Code generation
+  BreakTarget* break_target() { return &break_target_; }
+
+  // Testers.
+  bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
+
+ protected:
+  BreakableStatement(ZoneStringList* labels, Type type)
+      : labels_(labels), type_(type) {
+    ASSERT(labels == NULL || labels->length() > 0);
+  }
+
+ private:
+  ZoneStringList* labels_;
+  Type type_;
+  BreakTarget break_target_;
+};
+
+
+class Block: public BreakableStatement {
+ public:
+  Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
+      : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
+        statements_(capacity),
+        is_initializer_block_(is_initializer_block) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  void AddStatement(Statement* statement) { statements_.Add(statement); }
+
+  ZoneList<Statement*>* statements() { return &statements_; }
+  bool is_initializer_block() const  { return is_initializer_block_; }
+
+ private:
+  ZoneList<Statement*> statements_;
+  bool is_initializer_block_;
+};
+
+
+class Declaration: public AstNode {
+ public:
+  Declaration(VariableProxy* proxy, Variable::Mode mode, FunctionLiteral* fun)
+      : proxy_(proxy),
+        mode_(mode),
+        fun_(fun) {
+    ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+    // At the moment there are no "const functions"'s in JavaScript...
+    ASSERT(fun == NULL || mode == Variable::VAR);
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  VariableProxy* proxy() const  { return proxy_; }
+  Variable::Mode mode() const  { return mode_; }
+  FunctionLiteral* fun() const  { return fun_; }  // may be NULL
+
+ private:
+  VariableProxy* proxy_;
+  Variable::Mode mode_;
+  FunctionLiteral* fun_;
+};
+
+
+class IterationStatement: public BreakableStatement {
+ public:
+  // Type testing & conversion.
+  virtual IterationStatement* AsIterationStatement() { return this; }
+
+  Statement* body() const { return body_; }
+
+  // Code generation
+  BreakTarget* continue_target()  { return &continue_target_; }
+
+ protected:
+  explicit IterationStatement(ZoneStringList* labels)
+      : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { }
+
+  void Initialize(Statement* body) {
+    body_ = body;
+  }
+
+ private:
+  Statement* body_;
+  BreakTarget continue_target_;
+};
+
+
+class LoopStatement: public IterationStatement {
+ public:
+  enum Type { DO_LOOP, FOR_LOOP, WHILE_LOOP };
+
+  LoopStatement(ZoneStringList* labels, Type type)
+      : IterationStatement(labels),
+        type_(type),
+        init_(NULL),
+        cond_(NULL),
+        next_(NULL),
+        may_have_function_literal_(true) {
+  }
+
+  void Initialize(Statement* init,
+                  Expression* cond,
+                  Statement* next,
+                  Statement* body) {
+    ASSERT(init == NULL || type_ == FOR_LOOP);
+    ASSERT(next == NULL || type_ == FOR_LOOP);
+    IterationStatement::Initialize(body);
+    init_ = init;
+    cond_ = cond;
+    next_ = next;
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  Type type() const  { return type_; }
+  Statement* init() const  { return init_; }
+  Expression* cond() const  { return cond_; }
+  Statement* next() const  { return next_; }
+  bool may_have_function_literal() const {
+    return may_have_function_literal_;
+  }
+
+#ifdef DEBUG
+  const char* OperatorString() const;
+#endif
+
+ private:
+  Type type_;
+  Statement* init_;
+  Expression* cond_;
+  Statement* next_;
+  // True if there is a function literal subexpression in the condition.
+  bool may_have_function_literal_;
+
+  friend class AstOptimizer;
+};
+
+
+class ForInStatement: public IterationStatement {
+ public:
+  explicit ForInStatement(ZoneStringList* labels)
+      : IterationStatement(labels), each_(NULL), enumerable_(NULL) { }
+
+  void Initialize(Expression* each, Expression* enumerable, Statement* body) {
+    IterationStatement::Initialize(body);
+    each_ = each;
+    enumerable_ = enumerable;
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  Expression* each() const { return each_; }
+  Expression* enumerable() const { return enumerable_; }
+
+ private:
+  Expression* each_;
+  Expression* enumerable_;
+};
+
+
+class ExpressionStatement: public Statement {
+ public:
+  explicit ExpressionStatement(Expression* expression)
+      : expression_(expression) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  // Type testing & conversion.
+  virtual ExpressionStatement* AsExpressionStatement() { return this; }
+
+  void set_expression(Expression* e) { expression_ = e; }
+  Expression* expression() { return expression_; }
+
+ private:
+  Expression* expression_;
+};
+
+
+class ContinueStatement: public Statement {
+ public:
+  explicit ContinueStatement(IterationStatement* target)
+      : target_(target) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  IterationStatement* target() const  { return target_; }
+
+ private:
+  IterationStatement* target_;
+};
+
+
+class BreakStatement: public Statement {
+ public:
+  explicit BreakStatement(BreakableStatement* target)
+      : target_(target) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  BreakableStatement* target() const  { return target_; }
+
+ private:
+  BreakableStatement* target_;
+};
+
+
+class ReturnStatement: public Statement {
+ public:
+  explicit ReturnStatement(Expression* expression)
+      : expression_(expression) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  // Type testing & conversion.
+  virtual ReturnStatement* AsReturnStatement() { return this; }
+
+  Expression* expression() { return expression_; }
+
+ private:
+  Expression* expression_;
+};
+
+
+class WithEnterStatement: public Statement {
+ public:
+  explicit WithEnterStatement(Expression* expression, bool is_catch_block)
+      : expression_(expression), is_catch_block_(is_catch_block) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  Expression* expression() const  { return expression_; }
+
+  bool is_catch_block() const { return is_catch_block_; }
+
+ private:
+  Expression* expression_;
+  bool is_catch_block_;
+};
+
+
+class WithExitStatement: public Statement {
+ public:
+  WithExitStatement() { }
+
+  virtual void Accept(AstVisitor* v);
+};
+
+
+class CaseClause: public ZoneObject {
+ public:
+  CaseClause(Expression* label, ZoneList<Statement*>* statements)
+      : label_(label), statements_(statements) { }
+
+  bool is_default() const  { return label_ == NULL; }
+  Expression* label() const  {
+    CHECK(!is_default());
+    return label_;
+  }
+  JumpTarget* body_target() { return &body_target_; }
+  ZoneList<Statement*>* statements() const  { return statements_; }
+
+ private:
+  Expression* label_;
+  JumpTarget body_target_;
+  ZoneList<Statement*>* statements_;
+};
+
+
+class SwitchStatement: public BreakableStatement {
+ public:
+  explicit SwitchStatement(ZoneStringList* labels)
+      : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+        tag_(NULL), cases_(NULL) { }
+
+  void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
+    tag_ = tag;
+    cases_ = cases;
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  Expression* tag() const  { return tag_; }
+  ZoneList<CaseClause*>* cases() const  { return cases_; }
+
+ private:
+  Expression* tag_;
+  ZoneList<CaseClause*>* cases_;
+};
+
+
+// If-statements always have non-null references to their then- and
+// else-parts. When parsing if-statements with no explicit else-part,
+// the parser implicitly creates an empty statement. Use the
+// HasThenStatement() and HasElseStatement() functions to check if a
+// given if-statement has a then- or an else-part containing code.
+class IfStatement: public Statement {
+ public:
+  IfStatement(Expression* condition,
+              Statement* then_statement,
+              Statement* else_statement)
+      : condition_(condition),
+        then_statement_(then_statement),
+        else_statement_(else_statement) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
+  bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
+
+  Expression* condition() const { return condition_; }
+  Statement* then_statement() const { return then_statement_; }
+  Statement* else_statement() const { return else_statement_; }
+
+ private:
+  Expression* condition_;
+  Statement* then_statement_;
+  Statement* else_statement_;
+};
+
+
+// NOTE: TargetCollectors are represented as nodes to fit in the target
+// stack in the compiler; this should probably be reworked.
+class TargetCollector: public AstNode {
+ public:
+  explicit TargetCollector(ZoneList<BreakTarget*>* targets)
+      : targets_(targets) {
+  }
+
+  // Adds a jump target to the collector. The collector stores a pointer not
+  // a copy of the target to make binding work, so make sure not to pass in
+  // references to something on the stack.
+  void AddTarget(BreakTarget* target);
+
+  // Virtual behaviour. TargetCollectors are never part of the AST.
+  virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
+  virtual TargetCollector* AsTargetCollector() { return this; }
+
+  ZoneList<BreakTarget*>* targets() { return targets_; }
+
+ private:
+  ZoneList<BreakTarget*>* targets_;
+};
+
+
+class TryStatement: public Statement {
+ public:
+  explicit TryStatement(Block* try_block)
+      : try_block_(try_block), escaping_targets_(NULL) { }
+
+  void set_escaping_targets(ZoneList<BreakTarget*>* targets) {
+    escaping_targets_ = targets;
+  }
+
+  Block* try_block() const { return try_block_; }
+  ZoneList<BreakTarget*>* escaping_targets() const { return escaping_targets_; }
+
+ private:
+  Block* try_block_;
+  ZoneList<BreakTarget*>* escaping_targets_;
+};
+
+
+class TryCatch: public TryStatement {
+ public:
+  TryCatch(Block* try_block, Expression* catch_var, Block* catch_block)
+      : TryStatement(try_block),
+        catch_var_(catch_var),
+        catch_block_(catch_block) {
+    ASSERT(catch_var->AsVariableProxy() != NULL);
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  Expression* catch_var() const  { return catch_var_; }
+  Block* catch_block() const  { return catch_block_; }
+
+ private:
+  Expression* catch_var_;
+  Block* catch_block_;
+};
+
+
+class TryFinally: public TryStatement {
+ public:
+  TryFinally(Block* try_block, Block* finally_block)
+      : TryStatement(try_block),
+        finally_block_(finally_block) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  Block* finally_block() const { return finally_block_; }
+
+ private:
+  Block* finally_block_;
+};
+
+
+class DebuggerStatement: public Statement {
+ public:
+  virtual void Accept(AstVisitor* v);
+};
+
+
+class EmptyStatement: public Statement {
+ public:
+  virtual void Accept(AstVisitor* v);
+
+  // Type testing & conversion.
+  virtual EmptyStatement* AsEmptyStatement() { return this; }
+};
+
+
+class Literal: public Expression {
+ public:
+  explicit Literal(Handle<Object> handle) : handle_(handle) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  // Type testing & conversion.
+  virtual Literal* AsLiteral() { return this; }
+
+  // Check if this literal is identical to the other literal.
+  bool IsIdenticalTo(const Literal* other) const {
+    return handle_.is_identical_to(other->handle_);
+  }
+
+  virtual bool IsValidJSON() { return true; }
+
+  // Identity testers.
+  bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
+  bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
+  bool IsFalse() const {
+    return handle_.is_identical_to(Factory::false_value());
+  }
+
+  Handle<Object> handle() const { return handle_; }
+
+ private:
+  Handle<Object> handle_;
+};
+
+
+// Base class for literals that needs space in the corresponding JSFunction.
+class MaterializedLiteral: public Expression {
+ public:
+  explicit MaterializedLiteral(int literal_index, bool is_simple, int depth)
+      : literal_index_(literal_index), is_simple_(is_simple), depth_(depth) {}
+
+  virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
+
+  int literal_index() { return literal_index_; }
+
+  // A materialized literal is simple if the values consist of only
+  // constants and simple object and array literals.
+  bool is_simple() const { return is_simple_; }
+
+  virtual bool IsValidJSON() { return true; }
+
+  int depth() const { return depth_; }
+
+ private:
+  int literal_index_;
+  bool is_simple_;
+  int depth_;
+};
+
+
+// An object literal has a boilerplate object that is used
+// for minimizing the work when constructing it at runtime.
+class ObjectLiteral: public MaterializedLiteral {
+ public:
+  // Property is used for passing information
+  // about an object literal's properties from the parser
+  // to the code generator.
+  class Property: public ZoneObject {
+   public:
+
+    enum Kind {
+      CONSTANT,              // Property with constant value (compile time).
+      COMPUTED,              // Property with computed value (execution time).
+      MATERIALIZED_LITERAL,  // Property value is a materialized literal.
+      GETTER, SETTER,        // Property is an accessor function.
+      PROTOTYPE              // Property is __proto__.
+    };
+
+    Property(Literal* key, Expression* value);
+    Property(bool is_getter, FunctionLiteral* value);
+
+    Literal* key() { return key_; }
+    Expression* value() { return value_; }
+    Kind kind() { return kind_; }
+
+   private:
+    Literal* key_;
+    Expression* value_;
+    Kind kind_;
+  };
+
+  ObjectLiteral(Handle<FixedArray> constant_properties,
+                ZoneList<Property*>* properties,
+                int literal_index,
+                bool is_simple,
+                int depth)
+      : MaterializedLiteral(literal_index, is_simple, depth),
+        constant_properties_(constant_properties),
+        properties_(properties) {}
+
+  virtual ObjectLiteral* AsObjectLiteral() { return this; }
+  virtual void Accept(AstVisitor* v);
+  virtual bool IsValidJSON();
+
+  Handle<FixedArray> constant_properties() const {
+    return constant_properties_;
+  }
+  ZoneList<Property*>* properties() const { return properties_; }
+
+ private:
+  Handle<FixedArray> constant_properties_;
+  ZoneList<Property*>* properties_;
+};
+
+
+// Node for capturing a regexp literal.
+class RegExpLiteral: public MaterializedLiteral {
+ public:
+  RegExpLiteral(Handle<String> pattern,
+                Handle<String> flags,
+                int literal_index)
+      : MaterializedLiteral(literal_index, false, 1),
+        pattern_(pattern),
+        flags_(flags) {}
+
+  virtual void Accept(AstVisitor* v);
+
+  Handle<String> pattern() const { return pattern_; }
+  Handle<String> flags() const { return flags_; }
+
+ private:
+  Handle<String> pattern_;
+  Handle<String> flags_;
+};
+
+// An array literal has a literals object that is used
+// for minimizing the work when constructing it at runtime.
+class ArrayLiteral: public MaterializedLiteral {
+ public:
+  ArrayLiteral(Handle<FixedArray> literals,
+               ZoneList<Expression*>* values,
+               int literal_index,
+               bool is_simple,
+               int depth)
+      : MaterializedLiteral(literal_index, is_simple, depth),
+        literals_(literals),
+        values_(values) {}
+
+  virtual void Accept(AstVisitor* v);
+  virtual ArrayLiteral* AsArrayLiteral() { return this; }
+  virtual bool IsValidJSON();
+
+  Handle<FixedArray> literals() const { return literals_; }
+  ZoneList<Expression*>* values() const { return values_; }
+
+ private:
+  Handle<FixedArray> literals_;
+  ZoneList<Expression*>* values_;
+};
+
+
+// Node for constructing a context extension object for a catch block.
+// The catch context extension object has one property, the catch
+// variable, which should be DontDelete.
+class CatchExtensionObject: public Expression {
+ public:
+  CatchExtensionObject(Literal* key, VariableProxy* value)
+      : key_(key), value_(value) {
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  Literal* key() const { return key_; }
+  VariableProxy* value() const { return value_; }
+
+ private:
+  Literal* key_;
+  VariableProxy* value_;
+};
+
+
+class VariableProxy: public Expression {
+ public:
+  virtual void Accept(AstVisitor* v);
+
+  // Type testing & conversion
+  virtual Property* AsProperty() {
+    return var_ == NULL ? NULL : var_->AsProperty();
+  }
+  virtual VariableProxy* AsVariableProxy()  { return this; }
+
+  Variable* AsVariable() {
+    return this == NULL || var_ == NULL ? NULL : var_->AsVariable();
+  }
+
+  virtual bool IsValidLeftHandSide() {
+    return var_ == NULL ? true : var_->IsValidLeftHandSide();
+  }
+
+  bool IsVariable(Handle<String> n) {
+    return !is_this() && name().is_identical_to(n);
+  }
+
+  bool IsArguments() {
+    Variable* variable = AsVariable();
+    return (variable == NULL) ? false : variable->is_arguments();
+  }
+
+  Handle<String> name() const  { return name_; }
+  Variable* var() const  { return var_; }
+  UseCount* var_uses()  { return &var_uses_; }
+  UseCount* obj_uses()  { return &obj_uses_; }
+  bool is_this() const  { return is_this_; }
+  bool inside_with() const  { return inside_with_; }
+
+  // Bind this proxy to the variable var.
+  void BindTo(Variable* var);
+
+ protected:
+  Handle<String> name_;
+  Variable* var_;  // resolved variable, or NULL
+  bool is_this_;
+  bool inside_with_;
+
+  // VariableProxy usage info.
+  UseCount var_uses_;  // uses of the variable value
+  UseCount obj_uses_;  // uses of the object the variable points to
+
+  VariableProxy(Handle<String> name, bool is_this, bool inside_with);
+  explicit VariableProxy(bool is_this);
+
+  friend class Scope;
+};
+
+
+class VariableProxySentinel: public VariableProxy {
+ public:
+  virtual bool IsValidLeftHandSide() { return !is_this(); }
+  static VariableProxySentinel* this_proxy() { return &this_proxy_; }
+  static VariableProxySentinel* identifier_proxy() {
+    return &identifier_proxy_;
+  }
+
+ private:
+  explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
+  static VariableProxySentinel this_proxy_;
+  static VariableProxySentinel identifier_proxy_;
+};
+
+
+class Slot: public Expression {
+ public:
+  enum Type {
+    // A slot in the parameter section on the stack. index() is
+    // the parameter index, counting left-to-right, starting at 0.
+    PARAMETER,
+
+    // A slot in the local section on the stack. index() is
+    // the variable index in the stack frame, starting at 0.
+    LOCAL,
+
+    // An indexed slot in a heap context. index() is the
+    // variable index in the context object on the heap,
+    // starting at 0. var()->scope() is the corresponding
+    // scope.
+    CONTEXT,
+
+    // A named slot in a heap context. var()->name() is the
+    // variable name in the context object on the heap,
+    // with lookup starting at the current context. index()
+    // is invalid.
+    LOOKUP,
+
+    // A property in the global object. var()->name() is
+    // the property name.
+    GLOBAL
+  };
+
+  Slot(Variable* var, Type type, int index)
+      : var_(var), type_(type), index_(index) {
+    ASSERT(var != NULL);
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  // Type testing & conversion
+  virtual Slot* AsSlot() { return this; }
+
+  // Accessors
+  Variable* var() const { return var_; }
+  Type type() const { return type_; }
+  int index() const { return index_; }
+  bool is_arguments() const { return var_->is_arguments(); }
+
+ private:
+  Variable* var_;
+  Type type_;
+  int index_;
+};
+
+
+class Property: public Expression {
+ public:
+  // Synthetic properties are property lookups introduced by the system,
+  // to objects that aren't visible to the user. Function calls to synthetic
+  // properties should use the global object as receiver, not the base object
+  // of the resolved Reference.
+  enum Type { NORMAL, SYNTHETIC };
+  Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
+      : obj_(obj), key_(key), pos_(pos), type_(type) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  // Type testing & conversion
+  virtual Property* AsProperty() { return this; }
+
+  virtual bool IsValidLeftHandSide() { return true; }
+
+  Expression* obj() const { return obj_; }
+  Expression* key() const { return key_; }
+  int position() const { return pos_; }
+  bool is_synthetic() const { return type_ == SYNTHETIC; }
+
+  // Returns a property singleton property access on 'this'.  Used
+  // during preparsing.
+  static Property* this_property() { return &this_property_; }
+
+ private:
+  Expression* obj_;
+  Expression* key_;
+  int pos_;
+  Type type_;
+
+  // Dummy property used during preparsing.
+  static Property this_property_;
+};
+
+
+class Call: public Expression {
+ public:
+  Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
+      : expression_(expression), arguments_(arguments), pos_(pos) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  // Type testing and conversion.
+  virtual Call* AsCall() { return this; }
+
+  Expression* expression() const { return expression_; }
+  ZoneList<Expression*>* arguments() const { return arguments_; }
+  int position() { return pos_; }
+
+  static Call* sentinel() { return &sentinel_; }
+
+ private:
+  Expression* expression_;
+  ZoneList<Expression*>* arguments_;
+  int pos_;
+
+  static Call sentinel_;
+};
+
+
+class CallNew: public Expression {
+ public:
+  CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
+      : expression_(expression), arguments_(arguments), pos_(pos) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  Expression* expression() const { return expression_; }
+  ZoneList<Expression*>* arguments() const { return arguments_; }
+  int position() { return pos_; }
+
+ private:
+  Expression* expression_;
+  ZoneList<Expression*>* arguments_;
+  int pos_;
+};
+
+
+// The CallRuntime class does not represent any official JavaScript
+// language construct. Instead it is used to call a C or JS function
+// with a set of arguments. This is used from the builtins that are
+// implemented in JavaScript (see "v8natives.js").
+class CallRuntime: public Expression {
+ public:
+  CallRuntime(Handle<String> name,
+              Runtime::Function* function,
+              ZoneList<Expression*>* arguments)
+      : name_(name), function_(function), arguments_(arguments) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  Handle<String> name() const { return name_; }
+  Runtime::Function* function() const { return function_; }
+  ZoneList<Expression*>* arguments() const { return arguments_; }
+
+ private:
+  Handle<String> name_;
+  Runtime::Function* function_;
+  ZoneList<Expression*>* arguments_;
+};
+
+
+class UnaryOperation: public Expression {
+ public:
+  UnaryOperation(Token::Value op, Expression* expression)
+      : op_(op), expression_(expression) {
+    ASSERT(Token::IsUnaryOp(op));
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  // Type testing & conversion
+  virtual UnaryOperation* AsUnaryOperation() { return this; }
+
+  Token::Value op() const { return op_; }
+  Expression* expression() const { return expression_; }
+
+ private:
+  Token::Value op_;
+  Expression* expression_;
+};
+
+
+class BinaryOperation: public Expression {
+ public:
+  BinaryOperation(Token::Value op, Expression* left, Expression* right)
+      : op_(op), left_(left), right_(right) {
+    ASSERT(Token::IsBinaryOp(op));
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  // Type testing & conversion
+  virtual BinaryOperation* AsBinaryOperation() { return this; }
+
+  // True iff the result can be safely overwritten (to avoid allocation).
+  // False for operations that can return one of their operands.
+  bool ResultOverwriteAllowed() {
+    switch (op_) {
+      case Token::COMMA:
+      case Token::OR:
+      case Token::AND:
+        return false;
+      case Token::BIT_OR:
+      case Token::BIT_XOR:
+      case Token::BIT_AND:
+      case Token::SHL:
+      case Token::SAR:
+      case Token::SHR:
+      case Token::ADD:
+      case Token::SUB:
+      case Token::MUL:
+      case Token::DIV:
+      case Token::MOD:
+        return true;
+      default:
+        UNREACHABLE();
+    }
+    return false;
+  }
+
+  Token::Value op() const { return op_; }
+  Expression* left() const { return left_; }
+  Expression* right() const { return right_; }
+
+ private:
+  Token::Value op_;
+  Expression* left_;
+  Expression* right_;
+};
+
+
+class CountOperation: public Expression {
+ public:
+  CountOperation(bool is_prefix, Token::Value op, Expression* expression)
+      : is_prefix_(is_prefix), op_(op), expression_(expression) {
+    ASSERT(Token::IsCountOp(op));
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  bool is_prefix() const { return is_prefix_; }
+  bool is_postfix() const { return !is_prefix_; }
+  Token::Value op() const { return op_; }
+  Expression* expression() const { return expression_; }
+
+  virtual void MarkAsStatement() { is_prefix_ = true; }
+
+ private:
+  bool is_prefix_;
+  Token::Value op_;
+  Expression* expression_;
+};
+
+
+class CompareOperation: public Expression {
+ public:
+  CompareOperation(Token::Value op, Expression* left, Expression* right)
+      : op_(op), left_(left), right_(right) {
+    ASSERT(Token::IsCompareOp(op));
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  Token::Value op() const { return op_; }
+  Expression* left() const { return left_; }
+  Expression* right() const { return right_; }
+
+ private:
+  Token::Value op_;
+  Expression* left_;
+  Expression* right_;
+};
+
+
+class Conditional: public Expression {
+ public:
+  Conditional(Expression* condition,
+              Expression* then_expression,
+              Expression* else_expression)
+      : condition_(condition),
+        then_expression_(then_expression),
+        else_expression_(else_expression) { }
+
+  virtual void Accept(AstVisitor* v);
+
+  Expression* condition() const { return condition_; }
+  Expression* then_expression() const { return then_expression_; }
+  Expression* else_expression() const { return else_expression_; }
+
+ private:
+  Expression* condition_;
+  Expression* then_expression_;
+  Expression* else_expression_;
+};
+
+
+class Assignment: public Expression {
+ public:
+  Assignment(Token::Value op, Expression* target, Expression* value, int pos)
+      : op_(op), target_(target), value_(value), pos_(pos),
+        block_start_(false), block_end_(false) {
+    ASSERT(Token::IsAssignmentOp(op));
+  }
+
+  virtual void Accept(AstVisitor* v);
+  virtual Assignment* AsAssignment() { return this; }
+
+  Token::Value binary_op() const;
+
+  Token::Value op() const { return op_; }
+  Expression* target() const { return target_; }
+  Expression* value() const { return value_; }
+  int position() { return pos_; }
+
+  // An initialization block is a series of statments of the form
+  // x.y.z.a = ...; x.y.z.b = ...; etc. The parser marks the beginning and
+  // ending of these blocks to allow for optimizations of initialization
+  // blocks.
+  bool starts_initialization_block() { return block_start_; }
+  bool ends_initialization_block() { return block_end_; }
+  void mark_block_start() { block_start_ = true; }
+  void mark_block_end() { block_end_ = true; }
+
+ private:
+  Token::Value op_;
+  Expression* target_;
+  Expression* value_;
+  int pos_;
+  bool block_start_;
+  bool block_end_;
+};
+
+
+class Throw: public Expression {
+ public:
+  Throw(Expression* exception, int pos)
+      : exception_(exception), pos_(pos) {}
+
+  virtual void Accept(AstVisitor* v);
+  Expression* exception() const { return exception_; }
+  int position() const { return pos_; }
+
+ private:
+  Expression* exception_;
+  int pos_;
+};
+
+
+class FunctionLiteral: public Expression {
+ public:
+  FunctionLiteral(Handle<String> name,
+                  Scope* scope,
+                  ZoneList<Statement*>* body,
+                  int materialized_literal_count,
+                  bool contains_array_literal,
+                  int expected_property_count,
+                  bool has_only_this_property_assignments,
+                  bool has_only_simple_this_property_assignments,
+                  Handle<FixedArray> this_property_assignments,
+                  int num_parameters,
+                  int start_position,
+                  int end_position,
+                  bool is_expression)
+      : name_(name),
+        scope_(scope),
+        body_(body),
+        materialized_literal_count_(materialized_literal_count),
+        contains_array_literal_(contains_array_literal),
+        expected_property_count_(expected_property_count),
+        has_only_this_property_assignments_(has_only_this_property_assignments),
+        has_only_simple_this_property_assignments_(
+            has_only_simple_this_property_assignments),
+        this_property_assignments_(this_property_assignments),
+        num_parameters_(num_parameters),
+        start_position_(start_position),
+        end_position_(end_position),
+        is_expression_(is_expression),
+        loop_nesting_(0),
+        function_token_position_(RelocInfo::kNoPosition),
+        inferred_name_(Heap::empty_string()) {
+#ifdef DEBUG
+    already_compiled_ = false;
+#endif
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  // Type testing & conversion
+  virtual FunctionLiteral* AsFunctionLiteral()  { return this; }
+
+  Handle<String> name() const  { return name_; }
+  Scope* scope() const  { return scope_; }
+  ZoneList<Statement*>* body() const  { return body_; }
+  void set_function_token_position(int pos) { function_token_position_ = pos; }
+  int function_token_position() const { return function_token_position_; }
+  int start_position() const { return start_position_; }
+  int end_position() const { return end_position_; }
+  bool is_expression() const { return is_expression_; }
+
+  int materialized_literal_count() { return materialized_literal_count_; }
+  bool contains_array_literal() { return contains_array_literal_; }
+  int expected_property_count() { return expected_property_count_; }
+  bool has_only_this_property_assignments() {
+      return has_only_this_property_assignments_;
+  }
+  bool has_only_simple_this_property_assignments() {
+      return has_only_simple_this_property_assignments_;
+  }
+  Handle<FixedArray> this_property_assignments() {
+      return this_property_assignments_;
+  }
+  int num_parameters() { return num_parameters_; }
+
+  bool AllowsLazyCompilation();
+
+  bool loop_nesting() const { return loop_nesting_; }
+  void set_loop_nesting(int nesting) { loop_nesting_ = nesting; }
+
+  Handle<String> inferred_name() const  { return inferred_name_; }
+  void set_inferred_name(Handle<String> inferred_name) {
+    inferred_name_ = inferred_name;
+  }
+
+#ifdef DEBUG
+  void mark_as_compiled() {
+    ASSERT(!already_compiled_);
+    already_compiled_ = true;
+  }
+#endif
+
+ private:
+  Handle<String> name_;
+  Scope* scope_;
+  ZoneList<Statement*>* body_;
+  int materialized_literal_count_;
+  bool contains_array_literal_;
+  int expected_property_count_;
+  bool has_only_this_property_assignments_;
+  bool has_only_simple_this_property_assignments_;
+  Handle<FixedArray> this_property_assignments_;
+  int num_parameters_;
+  int start_position_;
+  int end_position_;
+  bool is_expression_;
+  int loop_nesting_;
+  int function_token_position_;
+  Handle<String> inferred_name_;
+#ifdef DEBUG
+  bool already_compiled_;
+#endif
+};
+
+
+class FunctionBoilerplateLiteral: public Expression {
+ public:
+  explicit FunctionBoilerplateLiteral(Handle<JSFunction> boilerplate)
+      : boilerplate_(boilerplate) {
+    ASSERT(boilerplate->IsBoilerplate());
+  }
+
+  Handle<JSFunction> boilerplate() const { return boilerplate_; }
+
+  virtual void Accept(AstVisitor* v);
+
+ private:
+  Handle<JSFunction> boilerplate_;
+};
+
+
+class ThisFunction: public Expression {
+ public:
+  virtual void Accept(AstVisitor* v);
+};
+
+
+// ----------------------------------------------------------------------------
+// Regular expressions
+
+
+class RegExpVisitor BASE_EMBEDDED {
+ public:
+  virtual ~RegExpVisitor() { }
+#define MAKE_CASE(Name)                                              \
+  virtual void* Visit##Name(RegExp##Name*, void* data) = 0;
+  FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
+#undef MAKE_CASE
+};
+
+
+class RegExpTree: public ZoneObject {
+ public:
+  static const int kInfinity = kMaxInt;
+  virtual ~RegExpTree() { }
+  virtual void* Accept(RegExpVisitor* visitor, void* data) = 0;
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success) = 0;
+  virtual bool IsTextElement() { return false; }
+  virtual bool IsAnchored() { return false; }
+  virtual int min_match() = 0;
+  virtual int max_match() = 0;
+  // Returns the interval of registers used for captures within this
+  // expression.
+  virtual Interval CaptureRegisters() { return Interval::Empty(); }
+  virtual void AppendToText(RegExpText* text);
+  SmartPointer<const char> ToString();
+#define MAKE_ASTYPE(Name)                                                  \
+  virtual RegExp##Name* As##Name();                                        \
+  virtual bool Is##Name();
+  FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ASTYPE)
+#undef MAKE_ASTYPE
+};
+
+
+class RegExpDisjunction: public RegExpTree {
+ public:
+  explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
+  virtual void* Accept(RegExpVisitor* visitor, void* data);
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success);
+  virtual RegExpDisjunction* AsDisjunction();
+  virtual Interval CaptureRegisters();
+  virtual bool IsDisjunction();
+  virtual bool IsAnchored();
+  virtual int min_match() { return min_match_; }
+  virtual int max_match() { return max_match_; }
+  ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
+ private:
+  ZoneList<RegExpTree*>* alternatives_;
+  int min_match_;
+  int max_match_;
+};
+
+
+class RegExpAlternative: public RegExpTree {
+ public:
+  explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
+  virtual void* Accept(RegExpVisitor* visitor, void* data);
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success);
+  virtual RegExpAlternative* AsAlternative();
+  virtual Interval CaptureRegisters();
+  virtual bool IsAlternative();
+  virtual bool IsAnchored();
+  virtual int min_match() { return min_match_; }
+  virtual int max_match() { return max_match_; }
+  ZoneList<RegExpTree*>* nodes() { return nodes_; }
+ private:
+  ZoneList<RegExpTree*>* nodes_;
+  int min_match_;
+  int max_match_;
+};
+
+
+class RegExpAssertion: public RegExpTree {
+ public:
+  enum Type {
+    START_OF_LINE,
+    START_OF_INPUT,
+    END_OF_LINE,
+    END_OF_INPUT,
+    BOUNDARY,
+    NON_BOUNDARY
+  };
+  explicit RegExpAssertion(Type type) : type_(type) { }
+  virtual void* Accept(RegExpVisitor* visitor, void* data);
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success);
+  virtual RegExpAssertion* AsAssertion();
+  virtual bool IsAssertion();
+  virtual bool IsAnchored();
+  virtual int min_match() { return 0; }
+  virtual int max_match() { return 0; }
+  Type type() { return type_; }
+ private:
+  Type type_;
+};
+
+
+class CharacterSet BASE_EMBEDDED {
+ public:
+  explicit CharacterSet(uc16 standard_set_type)
+      : ranges_(NULL),
+        standard_set_type_(standard_set_type) {}
+  explicit CharacterSet(ZoneList<CharacterRange>* ranges)
+      : ranges_(ranges),
+        standard_set_type_(0) {}
+  ZoneList<CharacterRange>* ranges();
+  uc16 standard_set_type() { return standard_set_type_; }
+  void set_standard_set_type(uc16 special_set_type) {
+    standard_set_type_ = special_set_type;
+  }
+  bool is_standard() { return standard_set_type_ != 0; }
+ private:
+  ZoneList<CharacterRange>* ranges_;
+  // If non-zero, the value represents a standard set (e.g., all whitespace
+  // characters) without having to expand the ranges.
+  uc16 standard_set_type_;
+};
+
+
+class RegExpCharacterClass: public RegExpTree {
+ public:
+  RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
+      : set_(ranges),
+        is_negated_(is_negated) { }
+  explicit RegExpCharacterClass(uc16 type)
+      : set_(type),
+        is_negated_(false) { }
+  virtual void* Accept(RegExpVisitor* visitor, void* data);
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success);
+  virtual RegExpCharacterClass* AsCharacterClass();
+  virtual bool IsCharacterClass();
+  virtual bool IsTextElement() { return true; }
+  virtual int min_match() { return 1; }
+  virtual int max_match() { return 1; }
+  virtual void AppendToText(RegExpText* text);
+  CharacterSet character_set() { return set_; }
+  // TODO(lrn): Remove need for complex version if is_standard that
+  // recognizes a mangled standard set and just do { return set_.is_special(); }
+  bool is_standard();
+  // Returns a value representing the standard character set if is_standard()
+  // returns true.
+  // Currently used values are:
+  // s : unicode whitespace
+  // S : unicode non-whitespace
+  // w : ASCII word character (digit, letter, underscore)
+  // W : non-ASCII word character
+  // d : ASCII digit
+  // D : non-ASCII digit
+  // . : non-unicode non-newline
+  // * : All characters
+  uc16 standard_type() { return set_.standard_set_type(); }
+  ZoneList<CharacterRange>* ranges() { return set_.ranges(); }
+  bool is_negated() { return is_negated_; }
+ private:
+  CharacterSet set_;
+  bool is_negated_;
+};
+
+
+class RegExpAtom: public RegExpTree {
+ public:
+  explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
+  virtual void* Accept(RegExpVisitor* visitor, void* data);
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success);
+  virtual RegExpAtom* AsAtom();
+  virtual bool IsAtom();
+  virtual bool IsTextElement() { return true; }
+  virtual int min_match() { return data_.length(); }
+  virtual int max_match() { return data_.length(); }
+  virtual void AppendToText(RegExpText* text);
+  Vector<const uc16> data() { return data_; }
+  int length() { return data_.length(); }
+ private:
+  Vector<const uc16> data_;
+};
+
+
+class RegExpText: public RegExpTree {
+ public:
+  RegExpText() : elements_(2), length_(0) {}
+  virtual void* Accept(RegExpVisitor* visitor, void* data);
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success);
+  virtual RegExpText* AsText();
+  virtual bool IsText();
+  virtual bool IsTextElement() { return true; }
+  virtual int min_match() { return length_; }
+  virtual int max_match() { return length_; }
+  virtual void AppendToText(RegExpText* text);
+  void AddElement(TextElement elm)  {
+    elements_.Add(elm);
+    length_ += elm.length();
+  };
+  ZoneList<TextElement>* elements() { return &elements_; }
+ private:
+  ZoneList<TextElement> elements_;
+  int length_;
+};
+
+
+class RegExpQuantifier: public RegExpTree {
+ public:
+  RegExpQuantifier(int min, int max, bool is_greedy, RegExpTree* body)
+      : min_(min),
+        max_(max),
+        is_greedy_(is_greedy),
+        body_(body),
+        min_match_(min * body->min_match()) {
+    if (max > 0 && body->max_match() > kInfinity / max) {
+      max_match_ = kInfinity;
+    } else {
+      max_match_ = max * body->max_match();
+    }
+  }
+  virtual void* Accept(RegExpVisitor* visitor, void* data);
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success);
+  static RegExpNode* ToNode(int min,
+                            int max,
+                            bool is_greedy,
+                            RegExpTree* body,
+                            RegExpCompiler* compiler,
+                            RegExpNode* on_success,
+                            bool not_at_start = false);
+  virtual RegExpQuantifier* AsQuantifier();
+  virtual Interval CaptureRegisters();
+  virtual bool IsQuantifier();
+  virtual int min_match() { return min_match_; }
+  virtual int max_match() { return max_match_; }
+  int min() { return min_; }
+  int max() { return max_; }
+  bool is_greedy() { return is_greedy_; }
+  RegExpTree* body() { return body_; }
+ private:
+  int min_;
+  int max_;
+  bool is_greedy_;
+  RegExpTree* body_;
+  int min_match_;
+  int max_match_;
+};
+
+
+class RegExpCapture: public RegExpTree {
+ public:
+  explicit RegExpCapture(RegExpTree* body, int index)
+      : body_(body), index_(index) { }
+  virtual void* Accept(RegExpVisitor* visitor, void* data);
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success);
+  static RegExpNode* ToNode(RegExpTree* body,
+                            int index,
+                            RegExpCompiler* compiler,
+                            RegExpNode* on_success);
+  virtual RegExpCapture* AsCapture();
+  virtual bool IsAnchored();
+  virtual Interval CaptureRegisters();
+  virtual bool IsCapture();
+  virtual int min_match() { return body_->min_match(); }
+  virtual int max_match() { return body_->max_match(); }
+  RegExpTree* body() { return body_; }
+  int index() { return index_; }
+  static int StartRegister(int index) { return index * 2; }
+  static int EndRegister(int index) { return index * 2 + 1; }
+ private:
+  RegExpTree* body_;
+  int index_;
+};
+
+
+class RegExpLookahead: public RegExpTree {
+ public:
+  RegExpLookahead(RegExpTree* body,
+                  bool is_positive,
+                  int capture_count,
+                  int capture_from)
+      : body_(body),
+        is_positive_(is_positive),
+        capture_count_(capture_count),
+        capture_from_(capture_from) { }
+
+  virtual void* Accept(RegExpVisitor* visitor, void* data);
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success);
+  virtual RegExpLookahead* AsLookahead();
+  virtual Interval CaptureRegisters();
+  virtual bool IsLookahead();
+  virtual bool IsAnchored();
+  virtual int min_match() { return 0; }
+  virtual int max_match() { return 0; }
+  RegExpTree* body() { return body_; }
+  bool is_positive() { return is_positive_; }
+  int capture_count() { return capture_count_; }
+  int capture_from() { return capture_from_; }
+ private:
+  RegExpTree* body_;
+  bool is_positive_;
+  int capture_count_;
+  int capture_from_;
+};
+
+
+class RegExpBackReference: public RegExpTree {
+ public:
+  explicit RegExpBackReference(RegExpCapture* capture)
+      : capture_(capture) { }
+  virtual void* Accept(RegExpVisitor* visitor, void* data);
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success);
+  virtual RegExpBackReference* AsBackReference();
+  virtual bool IsBackReference();
+  virtual int min_match() { return 0; }
+  virtual int max_match() { return capture_->max_match(); }
+  int index() { return capture_->index(); }
+  RegExpCapture* capture() { return capture_; }
+ private:
+  RegExpCapture* capture_;
+};
+
+
+class RegExpEmpty: public RegExpTree {
+ public:
+  RegExpEmpty() { }
+  virtual void* Accept(RegExpVisitor* visitor, void* data);
+  virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+                             RegExpNode* on_success);
+  virtual RegExpEmpty* AsEmpty();
+  virtual bool IsEmpty();
+  virtual int min_match() { return 0; }
+  virtual int max_match() { return 0; }
+  static RegExpEmpty* GetInstance() { return &kInstance; }
+ private:
+  static RegExpEmpty kInstance;
+};
+
+
+// ----------------------------------------------------------------------------
+// Basic visitor
+// - leaf node visitors are abstract.
+
+class AstVisitor BASE_EMBEDDED {
+ public:
+  AstVisitor() : stack_overflow_(false) { }
+  virtual ~AstVisitor() { }
+
+  // Dispatch
+  void Visit(AstNode* node) { node->Accept(this); }
+
+  // Iteration
+  virtual void VisitStatements(ZoneList<Statement*>* statements);
+  virtual void VisitExpressions(ZoneList<Expression*>* expressions);
+
+  // Stack overflow tracking support.
+  bool HasStackOverflow() const { return stack_overflow_; }
+  bool CheckStackOverflow() {
+    if (stack_overflow_) return true;
+    StackLimitCheck check;
+    if (!check.HasOverflowed()) return false;
+    return (stack_overflow_ = true);
+  }
+
+  // If a stack-overflow exception is encountered when visiting a
+  // node, calling SetStackOverflow will make sure that the visitor
+  // bails out without visiting more nodes.
+  void SetStackOverflow() { stack_overflow_ = true; }
+
+
+  // Individual nodes
+#define DEF_VISIT(type)                         \
+  virtual void Visit##type(type* node) = 0;
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ private:
+  bool stack_overflow_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_AST_H_
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
new file mode 100644
index 0000000..e2d23ef
--- /dev/null
+++ b/src/bootstrapper.cc
@@ -0,0 +1,1615 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "bootstrapper.h"
+#include "compiler.h"
+#include "debug.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "macro-assembler.h"
+#include "natives.h"
+
+namespace v8 {
+namespace internal {
+
+// A SourceCodeCache uses a FixedArray to store pairs of
+// (AsciiString*, JSFunction*), mapping names of native code files
+// (runtime.js, etc.) to precompiled functions. Instead of mapping
+// names to functions it might make sense to let the JS2C tool
+// generate an index for each native JS file.
+class SourceCodeCache BASE_EMBEDDED {
+ public:
+  explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
+
+  void Initialize(bool create_heap_objects) {
+    cache_ = create_heap_objects ? Heap::empty_fixed_array() : NULL;
+  }
+
+  void Iterate(ObjectVisitor* v) {
+    v->VisitPointer(bit_cast<Object**, FixedArray**>(&cache_));
+  }
+
+
+  bool Lookup(Vector<const char> name, Handle<JSFunction>* handle) {
+    for (int i = 0; i < cache_->length(); i+=2) {
+      SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
+      if (str->IsEqualTo(name)) {
+        *handle = Handle<JSFunction>(JSFunction::cast(cache_->get(i + 1)));
+        return true;
+      }
+    }
+    return false;
+  }
+
+
+  void Add(Vector<const char> name, Handle<JSFunction> fun) {
+    ASSERT(fun->IsBoilerplate());
+    HandleScope scope;
+    int length = cache_->length();
+    Handle<FixedArray> new_array =
+        Factory::NewFixedArray(length + 2, TENURED);
+    cache_->CopyTo(0, *new_array, 0, cache_->length());
+    cache_ = *new_array;
+    Handle<String> str = Factory::NewStringFromAscii(name, TENURED);
+    cache_->set(length, *str);
+    cache_->set(length + 1, *fun);
+    Script::cast(fun->shared()->script())->set_type(Smi::FromInt(type_));
+  }
+
+ private:
+  Script::Type type_;
+  FixedArray* cache_;
+  DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
+};
+
+static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
+static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
+
+
+Handle<String> Bootstrapper::NativesSourceLookup(int index) {
+  ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
+  if (Heap::natives_source_cache()->get(index)->IsUndefined()) {
+    Handle<String> source_code =
+      Factory::NewStringFromAscii(Natives::GetScriptSource(index));
+    Heap::natives_source_cache()->set(index, *source_code);
+  }
+  Handle<Object> cached_source(Heap::natives_source_cache()->get(index));
+  return Handle<String>::cast(cached_source);
+}
+
+
+bool Bootstrapper::NativesCacheLookup(Vector<const char> name,
+                                      Handle<JSFunction>* handle) {
+  return natives_cache.Lookup(name, handle);
+}
+
+
+void Bootstrapper::NativesCacheAdd(Vector<const char> name,
+                                   Handle<JSFunction> fun) {
+  natives_cache.Add(name, fun);
+}
+
+
+void Bootstrapper::Initialize(bool create_heap_objects) {
+  natives_cache.Initialize(create_heap_objects);
+  extensions_cache.Initialize(create_heap_objects);
+}
+
+
+void Bootstrapper::TearDown() {
+  natives_cache.Initialize(false);  // Yes, symmetrical
+  extensions_cache.Initialize(false);
+}
+
+
+// Pending fixups are code positions that refer to builtin code
+// objects that were not available at the time the code was generated.
+// The pending list is processed whenever an environment has been
+// created.
+class PendingFixups : public AllStatic {
+ public:
+  static void Add(Code* code, MacroAssembler* masm);
+  static bool Process(Handle<JSBuiltinsObject> builtins);
+
+  static void Iterate(ObjectVisitor* v);
+
+ private:
+  static List<Object*> code_;
+  static List<const char*> name_;
+  static List<int> pc_;
+  static List<uint32_t> flags_;
+
+  static void Clear();
+};
+
+
+List<Object*> PendingFixups::code_(0);
+List<const char*> PendingFixups::name_(0);
+List<int> PendingFixups::pc_(0);
+List<uint32_t> PendingFixups::flags_(0);
+
+
+void PendingFixups::Add(Code* code, MacroAssembler* masm) {
+  // Note this code is not only called during bootstrapping.
+  List<MacroAssembler::Unresolved>* unresolved = masm->unresolved();
+  int n = unresolved->length();
+  for (int i = 0; i < n; i++) {
+    const char* name = unresolved->at(i).name;
+    code_.Add(code);
+    name_.Add(name);
+    pc_.Add(unresolved->at(i).pc);
+    flags_.Add(unresolved->at(i).flags);
+    LOG(StringEvent("unresolved", name));
+  }
+}
+
+
+bool PendingFixups::Process(Handle<JSBuiltinsObject> builtins) {
+  HandleScope scope;
+  // NOTE: Extra fixups may be added to the list during the iteration
+  // due to lazy compilation of functions during the processing. Do not
+  // cache the result of getting the length of the code list.
+  for (int i = 0; i < code_.length(); i++) {
+    const char* name = name_[i];
+    uint32_t flags = flags_[i];
+    Handle<String> symbol = Factory::LookupAsciiSymbol(name);
+    Object* o = builtins->GetProperty(*symbol);
+#ifdef DEBUG
+    if (!o->IsJSFunction()) {
+      V8_Fatal(__FILE__, __LINE__, "Cannot resolve call to builtin %s", name);
+    }
+#endif
+    Handle<JSFunction> f = Handle<JSFunction>(JSFunction::cast(o));
+    // Make sure the number of parameters match the formal parameter count.
+    int argc = Bootstrapper::FixupFlagsArgumentsCount::decode(flags);
+    USE(argc);
+    ASSERT(f->shared()->formal_parameter_count() == argc);
+    if (!f->is_compiled()) {
+      // Do lazy compilation and check for stack overflows.
+      if (!CompileLazy(f, CLEAR_EXCEPTION)) {
+        Clear();
+        return false;
+      }
+    }
+    Code* code = Code::cast(code_[i]);
+    Address pc = code->instruction_start() + pc_[i];
+    bool is_pc_relative = Bootstrapper::FixupFlagsIsPCRelative::decode(flags);
+    bool use_code_object = Bootstrapper::FixupFlagsUseCodeObject::decode(flags);
+
+    if (use_code_object) {
+      if (is_pc_relative) {
+        Assembler::set_target_address_at(
+            pc, reinterpret_cast<Address>(f->code()));
+      } else {
+        *reinterpret_cast<Object**>(pc) = f->code();
+      }
+    } else {
+      Assembler::set_target_address_at(pc, f->code()->instruction_start());
+    }
+
+    LOG(StringEvent("resolved", name));
+  }
+  Clear();
+
+  // TODO(1240818): We should probably try to avoid doing this for all
+  // the V8 builtin JS files. It should only happen after running
+  // runtime.js - just like there shouldn't be any fixups left after
+  // that.
+  for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
+    Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
+    Handle<String> name = Factory::LookupAsciiSymbol(Builtins::GetName(id));
+    JSFunction* function = JSFunction::cast(builtins->GetProperty(*name));
+    builtins->set_javascript_builtin(id, function);
+  }
+
+  return true;
+}
+
+
+void PendingFixups::Clear() {
+  code_.Clear();
+  name_.Clear();
+  pc_.Clear();
+  flags_.Clear();
+}
+
+
+void PendingFixups::Iterate(ObjectVisitor* v) {
+  if (!code_.is_empty()) {
+    v->VisitPointers(&code_[0], &code_[0] + code_.length());
+  }
+}
+
+
+class Genesis BASE_EMBEDDED {
+ public:
+  Genesis(Handle<Object> global_object,
+          v8::Handle<v8::ObjectTemplate> global_template,
+          v8::ExtensionConfiguration* extensions);
+  ~Genesis();
+
+  Handle<Context> result() { return result_; }
+
+  Genesis* previous() { return previous_; }
+  static Genesis* current() { return current_; }
+
+  // Support for thread preemption.
+  static int ArchiveSpacePerThread();
+  static char* ArchiveState(char* to);
+  static char* RestoreState(char* from);
+
+ private:
+  Handle<Context> global_context_;
+
+  // There may be more than one active genesis object: When GC is
+  // triggered during environment creation there may be weak handle
+  // processing callbacks which may create new environments.
+  Genesis* previous_;
+  static Genesis* current_;
+
+  Handle<Context> global_context() { return global_context_; }
+
+  void CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
+                   Handle<Object> global_object);
+  void InstallNativeFunctions();
+  bool InstallNatives();
+  bool InstallExtensions(v8::ExtensionConfiguration* extensions);
+  bool InstallExtension(const char* name);
+  bool InstallExtension(v8::RegisteredExtension* current);
+  bool InstallSpecialObjects();
+  bool ConfigureApiObject(Handle<JSObject> object,
+                          Handle<ObjectTemplateInfo> object_template);
+  bool ConfigureGlobalObjects(v8::Handle<v8::ObjectTemplate> global_template);
+
+  // Migrates all properties from the 'from' object to the 'to'
+  // object and overrides the prototype in 'to' with the one from
+  // 'from'.
+  void TransferObject(Handle<JSObject> from, Handle<JSObject> to);
+  void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to);
+  void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to);
+
+  Handle<DescriptorArray> ComputeFunctionInstanceDescriptor(
+      bool make_prototype_read_only,
+      bool make_prototype_enumerable = false);
+  void MakeFunctionInstancePrototypeWritable();
+
+  void AddSpecialFunction(Handle<JSObject> prototype,
+                          const char* name,
+                          Handle<Code> code);
+
+  void BuildSpecialFunctionTable();
+
+  static bool CompileBuiltin(int index);
+  static bool CompileNative(Vector<const char> name, Handle<String> source);
+  static bool CompileScriptCached(Vector<const char> name,
+                                  Handle<String> source,
+                                  SourceCodeCache* cache,
+                                  v8::Extension* extension,
+                                  bool use_runtime_context);
+
+  Handle<Context> result_;
+};
+
+Genesis* Genesis::current_ = NULL;
+
+
+void Bootstrapper::Iterate(ObjectVisitor* v) {
+  natives_cache.Iterate(v);
+  extensions_cache.Iterate(v);
+  PendingFixups::Iterate(v);
+}
+
+
+// While setting up the environment, we collect code positions that
+// need to be patched before we can run any code in the environment.
+void Bootstrapper::AddFixup(Code* code, MacroAssembler* masm) {
+  PendingFixups::Add(code, masm);
+}
+
+
+bool Bootstrapper::IsActive() {
+  return Genesis::current() != NULL;
+}
+
+
+Handle<Context> Bootstrapper::CreateEnvironment(
+    Handle<Object> global_object,
+    v8::Handle<v8::ObjectTemplate> global_template,
+    v8::ExtensionConfiguration* extensions) {
+  Genesis genesis(global_object, global_template, extensions);
+  return genesis.result();
+}
+
+
+static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
+  // object.__proto__ = proto;
+  Handle<Map> old_to_map = Handle<Map>(object->map());
+  Handle<Map> new_to_map = Factory::CopyMapDropTransitions(old_to_map);
+  new_to_map->set_prototype(*proto);
+  object->set_map(*new_to_map);
+}
+
+
+void Bootstrapper::DetachGlobal(Handle<Context> env) {
+  JSGlobalProxy::cast(env->global_proxy())->set_context(*Factory::null_value());
+  SetObjectPrototype(Handle<JSObject>(env->global_proxy()),
+                     Factory::null_value());
+  env->set_global_proxy(env->global());
+  env->global()->set_global_receiver(env->global());
+}
+
+
+Genesis::~Genesis() {
+  ASSERT(current_ == this);
+  current_ = previous_;
+}
+
+
+static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
+                                          const char* name,
+                                          InstanceType type,
+                                          int instance_size,
+                                          Handle<JSObject> prototype,
+                                          Builtins::Name call,
+                                          bool is_ecma_native) {
+  Handle<String> symbol = Factory::LookupAsciiSymbol(name);
+  Handle<Code> call_code = Handle<Code>(Builtins::builtin(call));
+  Handle<JSFunction> function =
+    Factory::NewFunctionWithPrototype(symbol,
+                                      type,
+                                      instance_size,
+                                      prototype,
+                                      call_code,
+                                      is_ecma_native);
+  SetProperty(target, symbol, function, DONT_ENUM);
+  if (is_ecma_native) {
+    function->shared()->set_instance_class_name(*symbol);
+  }
+  return function;
+}
+
+
+Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
+    bool make_prototype_read_only,
+    bool make_prototype_enumerable) {
+  Handle<DescriptorArray> result = Factory::empty_descriptor_array();
+
+  // Add prototype.
+  PropertyAttributes attributes = static_cast<PropertyAttributes>(
+      (make_prototype_enumerable ? 0 : DONT_ENUM)
+      | DONT_DELETE
+      | (make_prototype_read_only ? READ_ONLY : 0));
+  result =
+      Factory::CopyAppendProxyDescriptor(
+          result,
+          Factory::prototype_symbol(),
+          Factory::NewProxy(&Accessors::FunctionPrototype),
+          attributes);
+
+  attributes =
+      static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+  // Add length.
+  result =
+      Factory::CopyAppendProxyDescriptor(
+          result,
+          Factory::length_symbol(),
+          Factory::NewProxy(&Accessors::FunctionLength),
+          attributes);
+
+  // Add name.
+  result =
+      Factory::CopyAppendProxyDescriptor(
+          result,
+          Factory::name_symbol(),
+          Factory::NewProxy(&Accessors::FunctionName),
+          attributes);
+
+  // Add arguments.
+  result =
+      Factory::CopyAppendProxyDescriptor(
+          result,
+          Factory::arguments_symbol(),
+          Factory::NewProxy(&Accessors::FunctionArguments),
+          attributes);
+
+  // Add caller.
+  result =
+      Factory::CopyAppendProxyDescriptor(
+          result,
+          Factory::caller_symbol(),
+          Factory::NewProxy(&Accessors::FunctionCaller),
+          attributes);
+
+  return result;
+}
+
+
+void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
+                          Handle<Object> global_object) {
+  HandleScope scope;
+  // Allocate the global context FixedArray first and then patch the
+  // closure and extension object later (we need the empty function
+  // and the global object, but in order to create those, we need the
+  // global context).
+  global_context_ =
+      Handle<Context>::cast(
+          GlobalHandles::Create(*Factory::NewGlobalContext()));
+  Top::set_context(*global_context());
+
+  // Allocate the message listeners object.
+  v8::NeanderArray listeners;
+  global_context()->set_message_listeners(*listeners.value());
+
+  // Allocate the map for function instances.
+  Handle<Map> fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+  global_context()->set_function_instance_map(*fm);
+  // Please note that the prototype property for function instances must be
+  // writable.
+  Handle<DescriptorArray> function_map_descriptors =
+      ComputeFunctionInstanceDescriptor(false, false);
+  fm->set_instance_descriptors(*function_map_descriptors);
+
+  // Allocate the function map first and then patch the prototype later
+  fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+  global_context()->set_function_map(*fm);
+  function_map_descriptors = ComputeFunctionInstanceDescriptor(true);
+  fm->set_instance_descriptors(*function_map_descriptors);
+
+  Handle<String> object_name = Handle<String>(Heap::Object_symbol());
+
+  {  // --- O b j e c t ---
+    Handle<JSFunction> object_fun =
+        Factory::NewFunction(object_name, Factory::null_value());
+    Handle<Map> object_function_map =
+        Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+    object_fun->set_initial_map(*object_function_map);
+    object_function_map->set_constructor(*object_fun);
+
+    global_context()->set_object_function(*object_fun);
+
+    // Allocate a new prototype for the object function.
+    Handle<JSObject> prototype = Factory::NewJSObject(Top::object_function(),
+                                                      TENURED);
+
+    global_context()->set_initial_object_prototype(*prototype);
+    SetPrototype(object_fun, prototype);
+    object_function_map->
+      set_instance_descriptors(Heap::empty_descriptor_array());
+  }
+
+  // Allocate the empty function as the prototype for function ECMAScript
+  // 262 15.3.4.
+  Handle<String> symbol = Factory::LookupAsciiSymbol("Empty");
+  Handle<JSFunction> empty_function =
+      Factory::NewFunction(symbol, Factory::null_value());
+
+  {  // --- E m p t y ---
+    Handle<Code> code =
+        Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
+    empty_function->set_code(*code);
+    Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
+    Handle<Script> script = Factory::NewScript(source);
+    script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
+    empty_function->shared()->set_script(*script);
+    empty_function->shared()->set_start_position(0);
+    empty_function->shared()->set_end_position(source->length());
+    empty_function->shared()->DontAdaptArguments();
+    global_context()->function_map()->set_prototype(*empty_function);
+    global_context()->function_instance_map()->set_prototype(*empty_function);
+
+    // Allocate the function map first and then patch the prototype later
+    Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm);
+    empty_fm->set_instance_descriptors(*function_map_descriptors);
+    empty_fm->set_prototype(global_context()->object_function()->prototype());
+    empty_function->set_map(*empty_fm);
+  }
+
+  {  // --- G l o b a l ---
+    // Step 1: create a fresh inner JSGlobalObject
+    Handle<GlobalObject> object;
+    {
+      Handle<JSFunction> js_global_function;
+      Handle<ObjectTemplateInfo> js_global_template;
+      if (!global_template.IsEmpty()) {
+        // Get prototype template of the global_template
+        Handle<ObjectTemplateInfo> data =
+            v8::Utils::OpenHandle(*global_template);
+        Handle<FunctionTemplateInfo> global_constructor =
+            Handle<FunctionTemplateInfo>(
+                FunctionTemplateInfo::cast(data->constructor()));
+        Handle<Object> proto_template(global_constructor->prototype_template());
+        if (!proto_template->IsUndefined()) {
+          js_global_template =
+              Handle<ObjectTemplateInfo>::cast(proto_template);
+        }
+      }
+
+      if (js_global_template.is_null()) {
+        Handle<String> name = Handle<String>(Heap::empty_symbol());
+        Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+        js_global_function =
+            Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
+                                 JSGlobalObject::kSize, code, true);
+        // Change the constructor property of the prototype of the
+        // hidden global function to refer to the Object function.
+        Handle<JSObject> prototype =
+            Handle<JSObject>(
+                JSObject::cast(js_global_function->instance_prototype()));
+        SetProperty(prototype, Factory::constructor_symbol(),
+                    Top::object_function(), NONE);
+      } else {
+        Handle<FunctionTemplateInfo> js_global_constructor(
+            FunctionTemplateInfo::cast(js_global_template->constructor()));
+        js_global_function =
+            Factory::CreateApiFunction(js_global_constructor,
+                                       Factory::InnerGlobalObject);
+      }
+
+      js_global_function->initial_map()->set_is_hidden_prototype();
+      object = Factory::NewGlobalObject(js_global_function);
+    }
+
+    // Set the global context for the global object.
+    object->set_global_context(*global_context());
+
+    // Step 2: create or re-initialize the global proxy object.
+    Handle<JSGlobalProxy> global_proxy;
+    {
+      Handle<JSFunction> global_proxy_function;
+      if (global_template.IsEmpty()) {
+        Handle<String> name = Handle<String>(Heap::empty_symbol());
+        Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+        global_proxy_function =
+            Factory::NewFunction(name, JS_GLOBAL_PROXY_TYPE,
+                                 JSGlobalProxy::kSize, code, true);
+      } else {
+        Handle<ObjectTemplateInfo> data =
+            v8::Utils::OpenHandle(*global_template);
+        Handle<FunctionTemplateInfo> global_constructor(
+                FunctionTemplateInfo::cast(data->constructor()));
+        global_proxy_function =
+            Factory::CreateApiFunction(global_constructor,
+                                       Factory::OuterGlobalObject);
+      }
+
+      Handle<String> global_name = Factory::LookupAsciiSymbol("global");
+      global_proxy_function->shared()->set_instance_class_name(*global_name);
+      global_proxy_function->initial_map()->set_is_access_check_needed(true);
+
+      // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
+
+      if (global_object.location() != NULL) {
+        ASSERT(global_object->IsJSGlobalProxy());
+        global_proxy =
+            ReinitializeJSGlobalProxy(
+                global_proxy_function,
+                Handle<JSGlobalProxy>::cast(global_object));
+      } else {
+        global_proxy = Handle<JSGlobalProxy>::cast(
+            Factory::NewJSObject(global_proxy_function, TENURED));
+      }
+
+      // Security setup: Set the security token of the global object to
+      // its the inner global. This makes the security check between two
+      // different contexts fail by default even in case of global
+      // object reinitialization.
+      object->set_global_receiver(*global_proxy);
+      global_proxy->set_context(*global_context());
+    }
+
+    {  // --- G l o b a l   C o n t e x t ---
+      // use the empty function as closure (no scope info)
+      global_context()->set_closure(*empty_function);
+      global_context()->set_fcontext(*global_context());
+      global_context()->set_previous(NULL);
+
+      // set extension and global object
+      global_context()->set_extension(*object);
+      global_context()->set_global(*object);
+      global_context()->set_global_proxy(*global_proxy);
+      // use inner global object as security token by default
+      global_context()->set_security_token(*object);
+    }
+
+    Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+    SetProperty(global, object_name, Top::object_function(), DONT_ENUM);
+  }
+
+  Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+
+  // Install global Function object
+  InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
+                  empty_function, Builtins::Illegal, true);  // ECMA native.
+
+  {  // --- A r r a y ---
+    Handle<JSFunction> array_function =
+        InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
+                        Top::initial_object_prototype(), Builtins::ArrayCode,
+                        true);
+    array_function->shared()->set_construct_stub(
+        Builtins::builtin(Builtins::ArrayConstructCode));
+    array_function->shared()->DontAdaptArguments();
+
+    // This seems a bit hackish, but we need to make sure Array.length
+    // is 1.
+    array_function->shared()->set_length(1);
+    Handle<DescriptorArray> array_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            Factory::empty_descriptor_array(),
+            Factory::length_symbol(),
+            Factory::NewProxy(&Accessors::ArrayLength),
+            static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
+
+    // Cache the fast JavaScript array map
+    global_context()->set_js_array_map(array_function->initial_map());
+    global_context()->js_array_map()->set_instance_descriptors(
+        *array_descriptors);
+    // array_function is used internally. JS code creating array object should
+    // search for the 'Array' property on the global object and use that one
+    // as the constructor. 'Array' property on a global object can be
+    // overwritten by JS code.
+    global_context()->set_array_function(*array_function);
+  }
+
+  {  // --- N u m b e r ---
+    Handle<JSFunction> number_fun =
+        InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
+                        Top::initial_object_prototype(), Builtins::Illegal,
+                        true);
+    global_context()->set_number_function(*number_fun);
+  }
+
+  {  // --- B o o l e a n ---
+    Handle<JSFunction> boolean_fun =
+        InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
+                        Top::initial_object_prototype(), Builtins::Illegal,
+                        true);
+    global_context()->set_boolean_function(*boolean_fun);
+  }
+
+  {  // --- S t r i n g ---
+    Handle<JSFunction> string_fun =
+        InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
+                        Top::initial_object_prototype(), Builtins::Illegal,
+                        true);
+    global_context()->set_string_function(*string_fun);
+    // Add 'length' property to strings.
+    Handle<DescriptorArray> string_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            Factory::empty_descriptor_array(),
+            Factory::length_symbol(),
+            Factory::NewProxy(&Accessors::StringLength),
+            static_cast<PropertyAttributes>(DONT_ENUM |
+                                            DONT_DELETE |
+                                            READ_ONLY));
+
+    Handle<Map> string_map =
+        Handle<Map>(global_context()->string_function()->initial_map());
+    string_map->set_instance_descriptors(*string_descriptors);
+  }
+
+  {  // --- D a t e ---
+    // Builtin functions for Date.prototype.
+    Handle<JSFunction> date_fun =
+        InstallFunction(global, "Date", JS_VALUE_TYPE, JSValue::kSize,
+                        Top::initial_object_prototype(), Builtins::Illegal,
+                        true);
+
+    global_context()->set_date_function(*date_fun);
+  }
+
+
+  {  // -- R e g E x p
+    // Builtin functions for RegExp.prototype.
+    Handle<JSFunction> regexp_fun =
+        InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
+                        Top::initial_object_prototype(), Builtins::Illegal,
+                        true);
+
+    global_context()->set_regexp_function(*regexp_fun);
+  }
+
+  {  // -- J S O N
+    Handle<String> name = Factory::NewStringFromAscii(CStrVector("JSON"));
+    Handle<JSFunction> cons = Factory::NewFunction(
+        name,
+        Factory::the_hole_value());
+    cons->SetInstancePrototype(global_context()->initial_object_prototype());
+    cons->SetInstanceClassName(*name);
+    Handle<JSObject> json_object = Factory::NewJSObject(cons, TENURED);
+    ASSERT(json_object->IsJSObject());
+    SetProperty(global, name, json_object, DONT_ENUM);
+    global_context()->set_json_object(*json_object);
+  }
+
+  {  // --- arguments_boilerplate_
+    // Make sure we can recognize argument objects at runtime.
+    // This is done by introducing an anonymous function with
+    // class_name equals 'Arguments'.
+    Handle<String> symbol = Factory::LookupAsciiSymbol("Arguments");
+    Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+    Handle<JSObject> prototype =
+        Handle<JSObject>(
+            JSObject::cast(global_context()->object_function()->prototype()));
+
+    Handle<JSFunction> function =
+        Factory::NewFunctionWithPrototype(symbol,
+                                          JS_OBJECT_TYPE,
+                                          JSObject::kHeaderSize,
+                                          prototype,
+                                          code,
+                                          false);
+    ASSERT(!function->has_initial_map());
+    function->shared()->set_instance_class_name(*symbol);
+    function->shared()->set_expected_nof_properties(2);
+    Handle<JSObject> result = Factory::NewJSObject(function);
+
+    global_context()->set_arguments_boilerplate(*result);
+    // Note: callee must be added as the first property and
+    //       length must be added as the second property.
+    SetProperty(result, Factory::callee_symbol(),
+                Factory::undefined_value(),
+                DONT_ENUM);
+    SetProperty(result, Factory::length_symbol(),
+                Factory::undefined_value(),
+                DONT_ENUM);
+
+#ifdef DEBUG
+    LookupResult lookup;
+    result->LocalLookup(Heap::callee_symbol(), &lookup);
+    ASSERT(lookup.IsValid() && (lookup.type() == FIELD));
+    ASSERT(lookup.GetFieldIndex() == Heap::arguments_callee_index);
+
+    result->LocalLookup(Heap::length_symbol(), &lookup);
+    ASSERT(lookup.IsValid() && (lookup.type() == FIELD));
+    ASSERT(lookup.GetFieldIndex() == Heap::arguments_length_index);
+
+    ASSERT(result->map()->inobject_properties() > Heap::arguments_callee_index);
+    ASSERT(result->map()->inobject_properties() > Heap::arguments_length_index);
+
+    // Check the state of the object.
+    ASSERT(result->HasFastProperties());
+    ASSERT(result->HasFastElements());
+#endif
+  }
+
+  {  // --- context extension
+    // Create a function for the context extension objects.
+    Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+    Handle<JSFunction> context_extension_fun =
+        Factory::NewFunction(Factory::empty_symbol(),
+                             JS_CONTEXT_EXTENSION_OBJECT_TYPE,
+                             JSObject::kHeaderSize,
+                             code,
+                             true);
+
+    Handle<String> name = Factory::LookupAsciiSymbol("context_extension");
+    context_extension_fun->shared()->set_instance_class_name(*name);
+    global_context()->set_context_extension_function(*context_extension_fun);
+  }
+
+
+  {
+    // Setup the call-as-function delegate.
+    Handle<Code> code =
+        Handle<Code>(Builtins::builtin(Builtins::HandleApiCallAsFunction));
+    Handle<JSFunction> delegate =
+        Factory::NewFunction(Factory::empty_symbol(), JS_OBJECT_TYPE,
+                             JSObject::kHeaderSize, code, true);
+    global_context()->set_call_as_function_delegate(*delegate);
+    delegate->shared()->DontAdaptArguments();
+  }
+
+  {
+    // Setup the call-as-constructor delegate.
+    Handle<Code> code =
+        Handle<Code>(Builtins::builtin(Builtins::HandleApiCallAsConstructor));
+    Handle<JSFunction> delegate =
+        Factory::NewFunction(Factory::empty_symbol(), JS_OBJECT_TYPE,
+                             JSObject::kHeaderSize, code, true);
+    global_context()->set_call_as_constructor_delegate(*delegate);
+    delegate->shared()->DontAdaptArguments();
+  }
+
+  global_context()->set_special_function_table(Heap::empty_fixed_array());
+
+  // Initialize the out of memory slot.
+  global_context()->set_out_of_memory(Heap::false_value());
+
+  // Initialize the data slot.
+  global_context()->set_data(Heap::undefined_value());
+}
+
+
+bool Genesis::CompileBuiltin(int index) {
+  Vector<const char> name = Natives::GetScriptName(index);
+  Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
+  return CompileNative(name, source_code);
+}
+
+
+bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
+  HandleScope scope;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  Debugger::set_compiling_natives(true);
+#endif
+  bool result =
+      CompileScriptCached(name, source, &natives_cache, NULL, true);
+  ASSERT(Top::has_pending_exception() != result);
+  if (!result) Top::clear_pending_exception();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  Debugger::set_compiling_natives(false);
+#endif
+  return result;
+}
+
+
+bool Genesis::CompileScriptCached(Vector<const char> name,
+                                  Handle<String> source,
+                                  SourceCodeCache* cache,
+                                  v8::Extension* extension,
+                                  bool use_runtime_context) {
+  HandleScope scope;
+  Handle<JSFunction> boilerplate;
+
+  // If we can't find the function in the cache, we compile a new
+  // function and insert it into the cache.
+  if (!cache->Lookup(name, &boilerplate)) {
+    ASSERT(source->IsAsciiRepresentation());
+    Handle<String> script_name = Factory::NewStringFromUtf8(name);
+    boilerplate =
+        Compiler::Compile(source, script_name, 0, 0, extension, NULL);
+    if (boilerplate.is_null()) return false;
+    cache->Add(name, boilerplate);
+  }
+
+  // Setup the function context. Conceptually, we should clone the
+  // function before overwriting the context but since we're in a
+  // single-threaded environment it is not strictly necessary.
+  ASSERT(Top::context()->IsGlobalContext());
+  Handle<Context> context =
+      Handle<Context>(use_runtime_context
+                      ? Top::context()->runtime_context()
+                      : Top::context());
+  Handle<JSFunction> fun =
+      Factory::NewFunctionFromBoilerplate(boilerplate, context);
+
+  // Call function using the either the runtime object or the global
+  // object as the receiver. Provide no parameters.
+  Handle<Object> receiver =
+      Handle<Object>(use_runtime_context
+                     ? Top::context()->builtins()
+                     : Top::context()->global());
+  bool has_pending_exception;
+  Handle<Object> result =
+      Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+  if (has_pending_exception) return false;
+  return PendingFixups::Process(
+      Handle<JSBuiltinsObject>(Top::context()->builtins()));
+}
+
+
+#define INSTALL_NATIVE(Type, name, var)                                  \
+  Handle<String> var##_name = Factory::LookupAsciiSymbol(name);          \
+  global_context()->set_##var(Type::cast(global_context()->              \
+                                           builtins()->                  \
+                                             GetProperty(*var##_name)));
+
+void Genesis::InstallNativeFunctions() {
+  HandleScope scope;
+  INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
+  INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
+  INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
+  INSTALL_NATIVE(JSFunction, "ToDetailString", to_detail_string_fun);
+  INSTALL_NATIVE(JSFunction, "ToObject", to_object_fun);
+  INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
+  INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
+  INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
+  INSTALL_NATIVE(JSFunction, "ToBoolean", to_boolean_fun);
+  INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
+  INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
+                 configure_instance_fun);
+  INSTALL_NATIVE(JSFunction, "MakeMessage", make_message_fun);
+  INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
+  INSTALL_NATIVE(JSObject, "functionCache", function_cache);
+}
+
+#undef INSTALL_NATIVE
+
+
+bool Genesis::InstallNatives() {
+  HandleScope scope;
+
+  // Create a function for the builtins object. Allocate space for the
+  // JavaScript builtins, a reference to the builtins object
+  // (itself) and a reference to the global_context directly in the object.
+  Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+  Handle<JSFunction> builtins_fun =
+      Factory::NewFunction(Factory::empty_symbol(), JS_BUILTINS_OBJECT_TYPE,
+                           JSBuiltinsObject::kSize, code, true);
+
+  Handle<String> name = Factory::LookupAsciiSymbol("builtins");
+  builtins_fun->shared()->set_instance_class_name(*name);
+
+  // Allocate the builtins object.
+  Handle<JSBuiltinsObject> builtins =
+      Handle<JSBuiltinsObject>::cast(Factory::NewGlobalObject(builtins_fun));
+  builtins->set_builtins(*builtins);
+  builtins->set_global_context(*global_context());
+  builtins->set_global_receiver(*builtins);
+
+  // Setup the 'global' properties of the builtins object. The
+  // 'global' property that refers to the global object is the only
+  // way to get from code running in the builtins context to the
+  // global object.
+  static const PropertyAttributes attributes =
+      static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+  SetProperty(builtins, Factory::LookupAsciiSymbol("global"),
+              Handle<Object>(global_context()->global()), attributes);
+
+  // Setup the reference from the global object to the builtins object.
+  JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
+
+  // Create a bridge function that has context in the global context.
+  Handle<JSFunction> bridge =
+      Factory::NewFunction(Factory::empty_symbol(), Factory::undefined_value());
+  ASSERT(bridge->context() == *Top::global_context());
+
+  // Allocate the builtins context.
+  Handle<Context> context =
+    Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
+  context->set_global(*builtins);  // override builtins global object
+
+  global_context()->set_runtime_context(*context);
+
+  {  // -- S c r i p t
+    // Builtin functions for Script.
+    Handle<JSFunction> script_fun =
+        InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
+                        Top::initial_object_prototype(), Builtins::Illegal,
+                        false);
+    Handle<JSObject> prototype =
+        Factory::NewJSObject(Top::object_function(), TENURED);
+    SetPrototype(script_fun, prototype);
+    global_context()->set_script_function(*script_fun);
+
+    // Add 'source' and 'data' property to scripts.
+    PropertyAttributes common_attributes =
+        static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+    Handle<Proxy> proxy_source = Factory::NewProxy(&Accessors::ScriptSource);
+    Handle<DescriptorArray> script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            Factory::empty_descriptor_array(),
+            Factory::LookupAsciiSymbol("source"),
+            proxy_source,
+            common_attributes);
+    Handle<Proxy> proxy_name = Factory::NewProxy(&Accessors::ScriptName);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("name"),
+            proxy_name,
+            common_attributes);
+    Handle<Proxy> proxy_id = Factory::NewProxy(&Accessors::ScriptId);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("id"),
+            proxy_id,
+            common_attributes);
+    Handle<Proxy> proxy_line_offset =
+        Factory::NewProxy(&Accessors::ScriptLineOffset);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("line_offset"),
+            proxy_line_offset,
+            common_attributes);
+    Handle<Proxy> proxy_column_offset =
+        Factory::NewProxy(&Accessors::ScriptColumnOffset);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("column_offset"),
+            proxy_column_offset,
+            common_attributes);
+    Handle<Proxy> proxy_data = Factory::NewProxy(&Accessors::ScriptData);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("data"),
+            proxy_data,
+            common_attributes);
+    Handle<Proxy> proxy_type = Factory::NewProxy(&Accessors::ScriptType);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("type"),
+            proxy_type,
+            common_attributes);
+    Handle<Proxy> proxy_compilation_type =
+        Factory::NewProxy(&Accessors::ScriptCompilationType);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("compilation_type"),
+            proxy_compilation_type,
+            common_attributes);
+    Handle<Proxy> proxy_line_ends =
+        Factory::NewProxy(&Accessors::ScriptLineEnds);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("line_ends"),
+            proxy_line_ends,
+            common_attributes);
+    Handle<Proxy> proxy_context_data =
+        Factory::NewProxy(&Accessors::ScriptContextData);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("context_data"),
+            proxy_context_data,
+            common_attributes);
+    Handle<Proxy> proxy_eval_from_function =
+        Factory::NewProxy(&Accessors::ScriptEvalFromFunction);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("eval_from_function"),
+            proxy_eval_from_function,
+            common_attributes);
+    Handle<Proxy> proxy_eval_from_position =
+        Factory::NewProxy(&Accessors::ScriptEvalFromPosition);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("eval_from_position"),
+            proxy_eval_from_position,
+            common_attributes);
+
+    Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
+    script_map->set_instance_descriptors(*script_descriptors);
+
+    // Allocate the empty script.
+    Handle<Script> script = Factory::NewScript(Factory::empty_string());
+    script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
+    global_context()->set_empty_script(*script);
+  }
+
+  if (FLAG_natives_file == NULL) {
+    // Without natives file, install default natives.
+    for (int i = Natives::GetDelayCount();
+         i < Natives::GetBuiltinsCount();
+         i++) {
+      if (!CompileBuiltin(i)) return false;
+    }
+
+    // Setup natives with lazy loading.
+    SetupLazy(Handle<JSFunction>(global_context()->date_function()),
+              Natives::GetIndex("date"),
+              Top::global_context(),
+              Handle<Context>(Top::context()->runtime_context()));
+    SetupLazy(Handle<JSFunction>(global_context()->regexp_function()),
+              Natives::GetIndex("regexp"),
+              Top::global_context(),
+              Handle<Context>(Top::context()->runtime_context()));
+    SetupLazy(Handle<JSObject>(global_context()->json_object()),
+              Natives::GetIndex("json"),
+              Top::global_context(),
+              Handle<Context>(Top::context()->runtime_context()));
+
+  } else if (strlen(FLAG_natives_file) != 0) {
+    // Otherwise install natives from natives file if file exists and
+    // compiles.
+    bool exists;
+    Vector<const char> source = ReadFile(FLAG_natives_file, &exists);
+    Handle<String> source_string = Factory::NewStringFromAscii(source);
+    if (source.is_empty()) return false;
+    bool result = CompileNative(CStrVector(FLAG_natives_file), source_string);
+    if (!result) return false;
+
+  } else {
+    // Empty natives file name - do not install any natives.
+    PrintF("Warning: Running without installed natives!\n");
+    return true;
+  }
+
+  InstallNativeFunctions();
+
+  // Install Function.prototype.call and apply.
+  { Handle<String> key = Factory::function_class_symbol();
+    Handle<JSFunction> function =
+        Handle<JSFunction>::cast(GetProperty(Top::global(), key));
+    Handle<JSObject> proto =
+        Handle<JSObject>(JSObject::cast(function->instance_prototype()));
+
+    // Install the call and the apply functions.
+    Handle<JSFunction> call =
+        InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+                        Factory::NewJSObject(Top::object_function(), TENURED),
+                        Builtins::FunctionCall,
+                        false);
+    Handle<JSFunction> apply =
+        InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+                        Factory::NewJSObject(Top::object_function(), TENURED),
+                        Builtins::FunctionApply,
+                        false);
+
+    // Make sure that Function.prototype.call appears to be compiled.
+    // The code will never be called, but inline caching for call will
+    // only work if it appears to be compiled.
+    call->shared()->DontAdaptArguments();
+    ASSERT(call->is_compiled());
+
+    // Set the expected parameters for apply to 2; required by builtin.
+    apply->shared()->set_formal_parameter_count(2);
+
+    // Set the lengths for the functions to satisfy ECMA-262.
+    call->shared()->set_length(1);
+    apply->shared()->set_length(2);
+  }
+
+#ifdef DEBUG
+  builtins->Verify();
+#endif
+  return true;
+}
+
+
+bool Genesis::InstallSpecialObjects() {
+  HandleScope scope;
+  Handle<JSGlobalObject> js_global(
+      JSGlobalObject::cast(global_context()->global()));
+  // Expose the natives in global if a name for it is specified.
+  if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
+    Handle<String> natives_string =
+        Factory::LookupAsciiSymbol(FLAG_expose_natives_as);
+    SetProperty(js_global, natives_string,
+                Handle<JSObject>(js_global->builtins()), DONT_ENUM);
+  }
+
+  Handle<Object> Error = GetProperty(js_global, "Error");
+  if (Error->IsJSObject()) {
+    Handle<String> name = Factory::LookupAsciiSymbol("stackTraceLimit");
+    SetProperty(Handle<JSObject>::cast(Error),
+                name,
+                Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
+                NONE);
+  }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Expose the debug global object in global if a name for it is specified.
+  if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
+    // If loading fails we just bail out without installing the
+    // debugger but without tanking the whole context.
+    if (!Debug::Load())
+      return true;
+    // Set the security token for the debugger context to the same as
+    // the shell global context to allow calling between these (otherwise
+    // exposing debug global object doesn't make much sense).
+    Debug::debug_context()->set_security_token(
+        global_context()->security_token());
+
+    Handle<String> debug_string =
+        Factory::LookupAsciiSymbol(FLAG_expose_debug_as);
+    SetProperty(js_global, debug_string,
+        Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM);
+  }
+#endif
+
+  return true;
+}
+
+
+bool Genesis::InstallExtensions(v8::ExtensionConfiguration* extensions) {
+  // Clear coloring of extension list
+  v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
+  while (current != NULL) {
+    current->set_state(v8::UNVISITED);
+    current = current->next();
+  }
+  // Install auto extensions
+  current = v8::RegisteredExtension::first_extension();
+  while (current != NULL) {
+    if (current->extension()->auto_enable())
+      InstallExtension(current);
+    current = current->next();
+  }
+
+  if (FLAG_expose_gc) InstallExtension("v8/gc");
+
+  if (extensions == NULL) return true;
+  // Install required extensions
+  int count = v8::ImplementationUtilities::GetNameCount(extensions);
+  const char** names = v8::ImplementationUtilities::GetNames(extensions);
+  for (int i = 0; i < count; i++) {
+    if (!InstallExtension(names[i]))
+      return false;
+  }
+
+  return true;
+}
+
+
+// Installs a named extension.  This methods is unoptimized and does
+// not scale well if we want to support a large number of extensions.
+bool Genesis::InstallExtension(const char* name) {
+  v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
+  // Loop until we find the relevant extension
+  while (current != NULL) {
+    if (strcmp(name, current->extension()->name()) == 0) break;
+    current = current->next();
+  }
+  // Didn't find the extension; fail.
+  if (current == NULL) {
+    v8::Utils::ReportApiFailure(
+        "v8::Context::New()", "Cannot find required extension");
+    return false;
+  }
+  return InstallExtension(current);
+}
+
+
+bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
+  HandleScope scope;
+
+  if (current->state() == v8::INSTALLED) return true;
+  // The current node has already been visited so there must be a
+  // cycle in the dependency graph; fail.
+  if (current->state() == v8::VISITED) {
+    v8::Utils::ReportApiFailure(
+        "v8::Context::New()", "Circular extension dependency");
+    return false;
+  }
+  ASSERT(current->state() == v8::UNVISITED);
+  current->set_state(v8::VISITED);
+  v8::Extension* extension = current->extension();
+  // Install the extension's dependencies
+  for (int i = 0; i < extension->dependency_count(); i++) {
+    if (!InstallExtension(extension->dependencies()[i])) return false;
+  }
+  Vector<const char> source = CStrVector(extension->source());
+  Handle<String> source_code = Factory::NewStringFromAscii(source);
+  bool result = CompileScriptCached(CStrVector(extension->name()),
+                                    source_code,
+                                    &extensions_cache, extension,
+                                    false);
+  ASSERT(Top::has_pending_exception() != result);
+  if (!result) {
+    Top::clear_pending_exception();
+    v8::Utils::ReportApiFailure(
+        "v8::Context::New()", "Error installing extension");
+  }
+  current->set_state(v8::INSTALLED);
+  return result;
+}
+
+
+bool Genesis::ConfigureGlobalObjects(
+    v8::Handle<v8::ObjectTemplate> global_proxy_template) {
+  Handle<JSObject> global_proxy(
+      JSObject::cast(global_context()->global_proxy()));
+  Handle<JSObject> js_global(JSObject::cast(global_context()->global()));
+
+  if (!global_proxy_template.IsEmpty()) {
+    // Configure the global proxy object.
+    Handle<ObjectTemplateInfo> proxy_data =
+        v8::Utils::OpenHandle(*global_proxy_template);
+    if (!ConfigureApiObject(global_proxy, proxy_data)) return false;
+
+    // Configure the inner global object.
+    Handle<FunctionTemplateInfo> proxy_constructor(
+        FunctionTemplateInfo::cast(proxy_data->constructor()));
+    if (!proxy_constructor->prototype_template()->IsUndefined()) {
+      Handle<ObjectTemplateInfo> inner_data(
+          ObjectTemplateInfo::cast(proxy_constructor->prototype_template()));
+      if (!ConfigureApiObject(js_global, inner_data)) return false;
+    }
+  }
+
+  SetObjectPrototype(global_proxy, js_global);
+  return true;
+}
+
+
+bool Genesis::ConfigureApiObject(Handle<JSObject> object,
+    Handle<ObjectTemplateInfo> object_template) {
+  ASSERT(!object_template.is_null());
+  ASSERT(object->IsInstanceOf(
+      FunctionTemplateInfo::cast(object_template->constructor())));
+
+  bool pending_exception = false;
+  Handle<JSObject> obj =
+      Execution::InstantiateObject(object_template, &pending_exception);
+  if (pending_exception) {
+    ASSERT(Top::has_pending_exception());
+    Top::clear_pending_exception();
+    return false;
+  }
+  TransferObject(obj, object);
+  return true;
+}
+
+
+void Genesis::TransferNamedProperties(Handle<JSObject> from,
+                                      Handle<JSObject> to) {
+  if (from->HasFastProperties()) {
+    Handle<DescriptorArray> descs =
+        Handle<DescriptorArray>(from->map()->instance_descriptors());
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      PropertyDetails details = PropertyDetails(descs->GetDetails(i));
+      switch (details.type()) {
+        case FIELD: {
+          HandleScope inner;
+          Handle<String> key = Handle<String>(descs->GetKey(i));
+          int index = descs->GetFieldIndex(i);
+          Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
+          SetProperty(to, key, value, details.attributes());
+          break;
+        }
+        case CONSTANT_FUNCTION: {
+          HandleScope inner;
+          Handle<String> key = Handle<String>(descs->GetKey(i));
+          Handle<JSFunction> fun =
+              Handle<JSFunction>(descs->GetConstantFunction(i));
+          SetProperty(to, key, fun, details.attributes());
+          break;
+        }
+        case CALLBACKS: {
+          LookupResult result;
+          to->LocalLookup(descs->GetKey(i), &result);
+          // If the property is already there we skip it
+          if (result.IsValid()) continue;
+          HandleScope inner;
+          Handle<DescriptorArray> inst_descs =
+              Handle<DescriptorArray>(to->map()->instance_descriptors());
+          Handle<String> key = Handle<String>(descs->GetKey(i));
+          Handle<Object> entry = Handle<Object>(descs->GetCallbacksObject(i));
+          inst_descs = Factory::CopyAppendProxyDescriptor(inst_descs,
+                                                          key,
+                                                          entry,
+                                                          details.attributes());
+          to->map()->set_instance_descriptors(*inst_descs);
+          break;
+        }
+        case MAP_TRANSITION:
+        case CONSTANT_TRANSITION:
+        case NULL_DESCRIPTOR:
+          // Ignore non-properties.
+          break;
+        case NORMAL:
+          // Do not occur since the from object has fast properties.
+        case INTERCEPTOR:
+          // No element in instance descriptors have interceptor type.
+          UNREACHABLE();
+          break;
+      }
+    }
+  } else {
+    Handle<StringDictionary> properties =
+        Handle<StringDictionary>(from->property_dictionary());
+    int capacity = properties->Capacity();
+    for (int i = 0; i < capacity; i++) {
+      Object* raw_key(properties->KeyAt(i));
+      if (properties->IsKey(raw_key)) {
+        ASSERT(raw_key->IsString());
+        // If the property is already there we skip it.
+        LookupResult result;
+        to->LocalLookup(String::cast(raw_key), &result);
+        if (result.IsValid()) continue;
+        // Set the property.
+        Handle<String> key = Handle<String>(String::cast(raw_key));
+        Handle<Object> value = Handle<Object>(properties->ValueAt(i));
+        if (value->IsJSGlobalPropertyCell()) {
+          value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
+        }
+        PropertyDetails details = properties->DetailsAt(i);
+        SetProperty(to, key, value, details.attributes());
+      }
+    }
+  }
+}
+
+
+void Genesis::TransferIndexedProperties(Handle<JSObject> from,
+                                        Handle<JSObject> to) {
+  // Cloning the elements array is sufficient.
+  Handle<FixedArray> from_elements =
+      Handle<FixedArray>(FixedArray::cast(from->elements()));
+  Handle<FixedArray> to_elements = Factory::CopyFixedArray(from_elements);
+  to->set_elements(*to_elements);
+}
+
+
+void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
+  HandleScope outer;
+
+  ASSERT(!from->IsJSArray());
+  ASSERT(!to->IsJSArray());
+
+  TransferNamedProperties(from, to);
+  TransferIndexedProperties(from, to);
+
+  // Transfer the prototype (new map is needed).
+  Handle<Map> old_to_map = Handle<Map>(to->map());
+  Handle<Map> new_to_map = Factory::CopyMapDropTransitions(old_to_map);
+  new_to_map->set_prototype(from->map()->prototype());
+  to->set_map(*new_to_map);
+}
+
+
+void Genesis::MakeFunctionInstancePrototypeWritable() {
+  // Make a new function map so all future functions
+  // will have settable and enumerable prototype properties.
+  HandleScope scope;
+
+  Handle<DescriptorArray> function_map_descriptors =
+      ComputeFunctionInstanceDescriptor(false);
+  Handle<Map> fm = Factory::CopyMapDropDescriptors(Top::function_map());
+  fm->set_instance_descriptors(*function_map_descriptors);
+  Top::context()->global_context()->set_function_map(*fm);
+}
+
+
+void Genesis::AddSpecialFunction(Handle<JSObject> prototype,
+                                 const char* name,
+                                 Handle<Code> code) {
+  Handle<String> key = Factory::LookupAsciiSymbol(name);
+  Handle<Object> value = Handle<Object>(prototype->GetProperty(*key));
+  if (value->IsJSFunction()) {
+    Handle<JSFunction> optimized = Factory::NewFunction(key,
+                                                        JS_OBJECT_TYPE,
+                                                        JSObject::kHeaderSize,
+                                                        code,
+                                                        false);
+    optimized->shared()->DontAdaptArguments();
+    int len = global_context()->special_function_table()->length();
+    Handle<FixedArray> new_array = Factory::NewFixedArray(len + 3);
+    for (int index = 0; index < len; index++) {
+      new_array->set(index,
+                     global_context()->special_function_table()->get(index));
+    }
+    new_array->set(len+0, *prototype);
+    new_array->set(len+1, *value);
+    new_array->set(len+2, *optimized);
+    global_context()->set_special_function_table(*new_array);
+  }
+}
+
+
+void Genesis::BuildSpecialFunctionTable() {
+  HandleScope scope;
+  Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+  // Add special versions for Array.prototype.pop and push.
+  Handle<JSFunction> function =
+      Handle<JSFunction>(
+          JSFunction::cast(global->GetProperty(Heap::Array_symbol())));
+  Handle<JSObject> visible_prototype =
+      Handle<JSObject>(JSObject::cast(function->prototype()));
+  // Remember to put push and pop on the hidden prototype if it's there.
+  Handle<JSObject> push_and_pop_prototype;
+  Handle<Object> superproto(visible_prototype->GetPrototype());
+  if (superproto->IsJSObject() &&
+      JSObject::cast(*superproto)->map()->is_hidden_prototype()) {
+    push_and_pop_prototype = Handle<JSObject>::cast(superproto);
+  } else {
+    push_and_pop_prototype = visible_prototype;
+  }
+  AddSpecialFunction(push_and_pop_prototype, "pop",
+                     Handle<Code>(Builtins::builtin(Builtins::ArrayPop)));
+  AddSpecialFunction(push_and_pop_prototype, "push",
+                     Handle<Code>(Builtins::builtin(Builtins::ArrayPush)));
+}
+
+
+Genesis::Genesis(Handle<Object> global_object,
+                 v8::Handle<v8::ObjectTemplate> global_template,
+                 v8::ExtensionConfiguration* extensions) {
+  // Link this genesis object into the stacked genesis chain. This
+  // must be done before any early exits because the destructor
+  // will always do unlinking.
+  previous_ = current_;
+  current_  = this;
+  result_ = Handle<Context>::null();
+
+  // If V8 isn't running and cannot be initialized, just return.
+  if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
+
+  // Before creating the roots we must save the context and restore it
+  // on all function exits.
+  HandleScope scope;
+  SaveContext context;
+
+  CreateRoots(global_template, global_object);
+
+  if (!InstallNatives()) return;
+
+  MakeFunctionInstancePrototypeWritable();
+  BuildSpecialFunctionTable();
+
+  if (!ConfigureGlobalObjects(global_template)) return;
+
+  if (!InstallExtensions(extensions)) return;
+
+  if (!InstallSpecialObjects()) return;
+
+  result_ = global_context_;
+}
+
+
+// Support for thread preemption.
+
+// Reserve space for statics needing saving and restoring.
+int Bootstrapper::ArchiveSpacePerThread() {
+  return Genesis::ArchiveSpacePerThread();
+}
+
+
+// Archive statics that are thread local.
+char* Bootstrapper::ArchiveState(char* to) {
+  return Genesis::ArchiveState(to);
+}
+
+
+// Restore statics that are thread local.
+char* Bootstrapper::RestoreState(char* from) {
+  return Genesis::RestoreState(from);
+}
+
+
+// Called when the top-level V8 mutex is destroyed.
+void Bootstrapper::FreeThreadResources() {
+  ASSERT(Genesis::current() == NULL);
+}
+
+
+// Reserve space for statics needing saving and restoring.
+int Genesis::ArchiveSpacePerThread() {
+  return sizeof(current_);
+}
+
+
+// Archive statics that are thread local.
+char* Genesis::ArchiveState(char* to) {
+  *reinterpret_cast<Genesis**>(to) = current_;
+  current_ = NULL;
+  return to + sizeof(current_);
+}
+
+
+// Restore statics that are thread local.
+char* Genesis::RestoreState(char* from) {
+  current_ = *reinterpret_cast<Genesis**>(from);
+  return from + sizeof(current_);
+}
+
+} }  // namespace v8::internal
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
new file mode 100644
index 0000000..809cd41
--- /dev/null
+++ b/src/bootstrapper.h
@@ -0,0 +1,82 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_BOOTSTRAPPER_H_
+#define V8_BOOTSTRAPPER_H_
+
+namespace v8 {
+namespace internal {
+
+// The Boostrapper is the public interface for creating a JavaScript global
+// context.
+class Bootstrapper : public AllStatic {
+ public:
+  // Requires: Heap::Setup has been called.
+  static void Initialize(bool create_heap_objects);
+  static void TearDown();
+
+  // Creates a JavaScript Global Context with initial object graph.
+  // The returned value is a global handle casted to V8Environment*.
+  static Handle<Context> CreateEnvironment(
+      Handle<Object> global_object,
+      v8::Handle<v8::ObjectTemplate> global_template,
+      v8::ExtensionConfiguration* extensions);
+
+  // Detach the environment from its outer global object.
+  static void DetachGlobal(Handle<Context> env);
+
+  // Traverses the pointers for memory management.
+  static void Iterate(ObjectVisitor* v);
+
+  // Accessors for the native scripts cache. Used in lazy loading.
+  static Handle<String> NativesSourceLookup(int index);
+  static bool NativesCacheLookup(Vector<const char> name,
+                                 Handle<JSFunction>* handle);
+  static void NativesCacheAdd(Vector<const char> name, Handle<JSFunction> fun);
+
+  // Append code that needs fixup at the end of boot strapping.
+  static void AddFixup(Code* code, MacroAssembler* masm);
+
+  // Tells whether bootstrapping is active.
+  static bool IsActive();
+
+  // Encoding/decoding support for fixup flags.
+  class FixupFlagsIsPCRelative: public BitField<bool, 0, 1> {};
+  class FixupFlagsUseCodeObject: public BitField<bool, 1, 1> {};
+  class FixupFlagsArgumentsCount: public BitField<uint32_t, 2, 32-2> {};
+
+  // Support for thread preemption.
+  static int ArchiveSpacePerThread();
+  static char* ArchiveState(char* to);
+  static char* RestoreState(char* from);
+  static void FreeThreadResources();
+};
+
+}}  // namespace v8::internal
+
+#endif  // V8_BOOTSTRAPPER_H_
diff --git a/src/builtins.cc b/src/builtins.cc
new file mode 100644
index 0000000..afb5427
--- /dev/null
+++ b/src/builtins.cc
@@ -0,0 +1,764 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "arguments.h"
+#include "bootstrapper.h"
+#include "builtins.h"
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Support macros for defining builtins in C.
+// ----------------------------------------------------------------------------
+//
+// A builtin function is defined by writing:
+//
+//   BUILTIN(name) {
+//     ...
+//   }
+//   BUILTIN_END
+//
+// In the body of the builtin function, the variable 'receiver' is visible.
+// The arguments can be accessed through the Arguments object args.
+//
+//   args[0]: Receiver (also available as 'receiver')
+//   args[1]: First argument
+//     ...
+//   args[n]: Last argument
+//   args.length(): Number of arguments including the receiver.
+// ----------------------------------------------------------------------------
+
+
+// TODO(428): We should consider passing whether or not the
+// builtin was invoked as a constructor as part of the
+// arguments. Maybe we also want to pass the called function?
+#define BUILTIN(name)                                                   \
+  static Object* Builtin_##name(Arguments args) {      \
+    Handle<Object> receiver = args.at<Object>(0);
+
+
+#define BUILTIN_END                             \
+  return Heap::undefined_value();               \
+}
+
+
+static inline bool CalledAsConstructor() {
+#ifdef DEBUG
+  // Calculate the result using a full stack frame iterator and check
+  // that the state of the stack is as we assume it to be in the
+  // code below.
+  StackFrameIterator it;
+  ASSERT(it.frame()->is_exit());
+  it.Advance();
+  StackFrame* frame = it.frame();
+  bool reference_result = frame->is_construct();
+#endif
+  Address fp = Top::c_entry_fp(Top::GetCurrentThread());
+  // Because we know fp points to an exit frame we can use the relevant
+  // part of ExitFrame::ComputeCallerState directly.
+  const int kCallerOffset = ExitFrameConstants::kCallerFPOffset;
+  Address caller_fp = Memory::Address_at(fp + kCallerOffset);
+  // This inlines the part of StackFrame::ComputeType that grabs the
+  // type of the current frame.  Note that StackFrame::ComputeType
+  // has been specialized for each architecture so if any one of them
+  // changes this code has to be changed as well.
+  const int kMarkerOffset = StandardFrameConstants::kMarkerOffset;
+  const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
+  Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
+  bool result = (marker == kConstructMarker);
+  ASSERT_EQ(result, reference_result);
+  return result;
+}
+
+// ----------------------------------------------------------------------------
+
+
+Handle<Code> Builtins::GetCode(JavaScript id, bool* resolved) {
+  Code* code = Builtins::builtin(Builtins::Illegal);
+  *resolved = false;
+
+  if (Top::context() != NULL) {
+    Object* object = Top::builtins()->javascript_builtin(id);
+    if (object->IsJSFunction()) {
+      Handle<JSFunction> function(JSFunction::cast(object));
+      // Make sure the number of parameters match the formal parameter count.
+      ASSERT(function->shared()->formal_parameter_count() ==
+             Builtins::GetArgumentsCount(id));
+      if (function->is_compiled() || CompileLazy(function, CLEAR_EXCEPTION)) {
+        code = function->code();
+        *resolved = true;
+      }
+    }
+  }
+
+  return Handle<Code>(code);
+}
+
+
+BUILTIN(Illegal) {
+  UNREACHABLE();
+}
+BUILTIN_END
+
+
+BUILTIN(EmptyFunction) {
+}
+BUILTIN_END
+
+
+BUILTIN(ArrayCodeGeneric) {
+  Counters::array_function_runtime.Increment();
+
+  JSArray* array;
+  if (CalledAsConstructor()) {
+    array = JSArray::cast(*receiver);
+  } else {
+    // Allocate the JS Array
+    JSFunction* constructor =
+        Top::context()->global_context()->array_function();
+    Object* obj = Heap::AllocateJSObject(constructor);
+    if (obj->IsFailure()) return obj;
+    array = JSArray::cast(obj);
+  }
+
+  // 'array' now contains the JSArray we should initialize.
+
+  // Optimize the case where there is one argument and the argument is a
+  // small smi.
+  if (args.length() == 2) {
+    Object* obj = args[1];
+    if (obj->IsSmi()) {
+      int len = Smi::cast(obj)->value();
+      if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
+        Object* obj = Heap::AllocateFixedArrayWithHoles(len);
+        if (obj->IsFailure()) return obj;
+        array->SetContent(FixedArray::cast(obj));
+        return array;
+      }
+    }
+    // Take the argument as the length.
+    obj = array->Initialize(0);
+    if (obj->IsFailure()) return obj;
+    return array->SetElementsLength(args[1]);
+  }
+
+  // Optimize the case where there are no parameters passed.
+  if (args.length() == 1) {
+    return array->Initialize(JSArray::kPreallocatedArrayElements);
+  }
+
+  // Take the arguments as elements.
+  int number_of_elements = args.length() - 1;
+  Smi* len = Smi::FromInt(number_of_elements);
+  Object* obj = Heap::AllocateFixedArrayWithHoles(len->value());
+  if (obj->IsFailure()) return obj;
+  FixedArray* elms = FixedArray::cast(obj);
+  WriteBarrierMode mode = elms->GetWriteBarrierMode();
+  // Fill in the content
+  for (int index = 0; index < number_of_elements; index++) {
+    elms->set(index, args[index+1], mode);
+  }
+
+  // Set length and elements on the array.
+  array->set_elements(FixedArray::cast(obj));
+  array->set_length(len, SKIP_WRITE_BARRIER);
+
+  return array;
+}
+BUILTIN_END
+
+
+BUILTIN(ArrayPush) {
+  JSArray* array = JSArray::cast(*receiver);
+  ASSERT(array->HasFastElements());
+
+  // Make sure we have space for the elements.
+  int len = Smi::cast(array->length())->value();
+
+  // Set new length.
+  int new_length = len + args.length() - 1;
+  FixedArray* elms = FixedArray::cast(array->elements());
+
+  if (new_length <= elms->length()) {
+    // Backing storage has extra space for the provided values.
+    for (int index = 0; index < args.length() - 1; index++) {
+      elms->set(index + len, args[index+1]);
+    }
+  } else {
+    // New backing storage is needed.
+    int capacity = new_length + (new_length >> 1) + 16;
+    Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+    if (obj->IsFailure()) return obj;
+    FixedArray* new_elms = FixedArray::cast(obj);
+    WriteBarrierMode mode = new_elms->GetWriteBarrierMode();
+    // Fill out the new array with old elements.
+    for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
+    // Add the provided values.
+    for (int index = 0; index < args.length() - 1; index++) {
+      new_elms->set(index + len, args[index+1], mode);
+    }
+    // Set the new backing storage.
+    array->set_elements(new_elms);
+  }
+  // Set the length.
+  array->set_length(Smi::FromInt(new_length), SKIP_WRITE_BARRIER);
+  return array->length();
+}
+BUILTIN_END
+
+
+BUILTIN(ArrayPop) {
+  JSArray* array = JSArray::cast(*receiver);
+  ASSERT(array->HasFastElements());
+  Object* undefined = Heap::undefined_value();
+
+  int len = Smi::cast(array->length())->value();
+  if (len == 0) return undefined;
+
+  // Get top element
+  FixedArray* elms = FixedArray::cast(array->elements());
+  Object* top = elms->get(len - 1);
+
+  // Set the length.
+  array->set_length(Smi::FromInt(len - 1), SKIP_WRITE_BARRIER);
+
+  if (!top->IsTheHole()) {
+    // Delete the top element.
+    elms->set_the_hole(len - 1);
+    return top;
+  }
+
+  // Remember to check the prototype chain.
+  JSFunction* array_function =
+      Top::context()->global_context()->array_function();
+  JSObject* prototype = JSObject::cast(array_function->prototype());
+  top = prototype->GetElement(len - 1);
+
+  return top;
+}
+BUILTIN_END
+
+
+// -----------------------------------------------------------------------------
+//
+
+
+// Returns the holder JSObject if the function can legally be called
+// with this receiver.  Returns Heap::null_value() if the call is
+// illegal.  Any arguments that don't fit the expected type is
+// overwritten with undefined.  Arguments that do fit the expected
+// type is overwritten with the object in the prototype chain that
+// actually has that type.
+static inline Object* TypeCheck(int argc,
+                                Object** argv,
+                                FunctionTemplateInfo* info) {
+  Object* recv = argv[0];
+  Object* sig_obj = info->signature();
+  if (sig_obj->IsUndefined()) return recv;
+  SignatureInfo* sig = SignatureInfo::cast(sig_obj);
+  // If necessary, check the receiver
+  Object* recv_type = sig->receiver();
+
+  Object* holder = recv;
+  if (!recv_type->IsUndefined()) {
+    for (; holder != Heap::null_value(); holder = holder->GetPrototype()) {
+      if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) {
+        break;
+      }
+    }
+    if (holder == Heap::null_value()) return holder;
+  }
+  Object* args_obj = sig->args();
+  // If there is no argument signature we're done
+  if (args_obj->IsUndefined()) return holder;
+  FixedArray* args = FixedArray::cast(args_obj);
+  int length = args->length();
+  if (argc <= length) length = argc - 1;
+  for (int i = 0; i < length; i++) {
+    Object* argtype = args->get(i);
+    if (argtype->IsUndefined()) continue;
+    Object** arg = &argv[-1 - i];
+    Object* current = *arg;
+    for (; current != Heap::null_value(); current = current->GetPrototype()) {
+      if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) {
+        *arg = current;
+        break;
+      }
+    }
+    if (current == Heap::null_value()) *arg = Heap::undefined_value();
+  }
+  return holder;
+}
+
+
+BUILTIN(HandleApiCall) {
+  HandleScope scope;
+  bool is_construct = CalledAsConstructor();
+
+  // TODO(428): Remove use of static variable, handle API callbacks directly.
+  Handle<JSFunction> function =
+      Handle<JSFunction>(JSFunction::cast(Builtins::builtin_passed_function));
+
+  if (is_construct) {
+    Handle<FunctionTemplateInfo> desc =
+        Handle<FunctionTemplateInfo>(
+            FunctionTemplateInfo::cast(function->shared()->function_data()));
+    bool pending_exception = false;
+    Factory::ConfigureInstance(desc, Handle<JSObject>::cast(receiver),
+                               &pending_exception);
+    ASSERT(Top::has_pending_exception() == pending_exception);
+    if (pending_exception) return Failure::Exception();
+  }
+
+  FunctionTemplateInfo* fun_data =
+      FunctionTemplateInfo::cast(function->shared()->function_data());
+  Object* raw_holder = TypeCheck(args.length(), &args[0], fun_data);
+
+  if (raw_holder->IsNull()) {
+    // This function cannot be called with the given receiver.  Abort!
+    Handle<Object> obj =
+        Factory::NewTypeError("illegal_invocation", HandleVector(&function, 1));
+    return Top::Throw(*obj);
+  }
+
+  Object* raw_call_data = fun_data->call_code();
+  if (!raw_call_data->IsUndefined()) {
+    CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
+    Object* callback_obj = call_data->callback();
+    v8::InvocationCallback callback =
+        v8::ToCData<v8::InvocationCallback>(callback_obj);
+    Object* data_obj = call_data->data();
+    Object* result;
+
+    v8::Local<v8::Object> self =
+        v8::Utils::ToLocal(Handle<JSObject>::cast(receiver));
+    Handle<Object> data_handle(data_obj);
+    v8::Local<v8::Value> data = v8::Utils::ToLocal(data_handle);
+    ASSERT(raw_holder->IsJSObject());
+    v8::Local<v8::Function> callee = v8::Utils::ToLocal(function);
+    Handle<JSObject> holder_handle(JSObject::cast(raw_holder));
+    v8::Local<v8::Object> holder = v8::Utils::ToLocal(holder_handle);
+    LOG(ApiObjectAccess("call", JSObject::cast(*receiver)));
+    v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
+        data,
+        holder,
+        callee,
+        is_construct,
+        reinterpret_cast<void**>(&args[0] - 1),
+        args.length() - 1);
+
+    v8::Handle<v8::Value> value;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      value = callback(new_args);
+    }
+    if (value.IsEmpty()) {
+      result = Heap::undefined_value();
+    } else {
+      result = *reinterpret_cast<Object**>(*value);
+    }
+
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (!is_construct || result->IsJSObject()) return result;
+  }
+
+  return *receiver;
+}
+BUILTIN_END
+
+
+// Helper function to handle calls to non-function objects created through the
+// API. The object can be called as either a constructor (using new) or just as
+// a function (without new).
+static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
+                                                    Arguments args) {
+  // Non-functions are never called as constructors. Even if this is an object
+  // called as a constructor the delegate call is not a construct call.
+  ASSERT(!CalledAsConstructor());
+
+  Handle<Object> receiver = args.at<Object>(0);
+
+  // Get the object called.
+  JSObject* obj = JSObject::cast(*receiver);
+
+  // Get the invocation callback from the function descriptor that was
+  // used to create the called object.
+  ASSERT(obj->map()->has_instance_call_handler());
+  JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
+  Object* template_info = constructor->shared()->function_data();
+  Object* handler =
+      FunctionTemplateInfo::cast(template_info)->instance_call_handler();
+  ASSERT(!handler->IsUndefined());
+  CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
+  Object* callback_obj = call_data->callback();
+  v8::InvocationCallback callback =
+      v8::ToCData<v8::InvocationCallback>(callback_obj);
+
+  // Get the data for the call and perform the callback.
+  Object* data_obj = call_data->data();
+  Object* result;
+  { HandleScope scope;
+    v8::Local<v8::Object> self =
+        v8::Utils::ToLocal(Handle<JSObject>::cast(receiver));
+    Handle<Object> data_handle(data_obj);
+    v8::Local<v8::Value> data = v8::Utils::ToLocal(data_handle);
+    Handle<JSFunction> callee_handle(constructor);
+    v8::Local<v8::Function> callee = v8::Utils::ToLocal(callee_handle);
+    LOG(ApiObjectAccess("call non-function", JSObject::cast(*receiver)));
+    v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
+        data,
+        self,
+        callee,
+        is_construct_call,
+        reinterpret_cast<void**>(&args[0] - 1),
+        args.length() - 1);
+    v8::Handle<v8::Value> value;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      value = callback(new_args);
+    }
+    if (value.IsEmpty()) {
+      result = Heap::undefined_value();
+    } else {
+      result = *reinterpret_cast<Object**>(*value);
+    }
+  }
+  // Check for exceptions and return result.
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  return result;
+}
+
+
+// Handle calls to non-function objects created through the API. This delegate
+// function is used when the call is a normal function call.
+BUILTIN(HandleApiCallAsFunction) {
+  return HandleApiCallAsFunctionOrConstructor(false, args);
+}
+BUILTIN_END
+
+
+// Handle calls to non-function objects created through the API. This delegate
+// function is used when the call is a construct call.
+BUILTIN(HandleApiCallAsConstructor) {
+  return HandleApiCallAsFunctionOrConstructor(true, args);
+}
+BUILTIN_END
+
+
+// TODO(1238487): This is a nasty hack. We need to improve the way we
+// call builtins considerable to get rid of this and the hairy macros
+// in builtins.cc.
+Object* Builtins::builtin_passed_function;
+
+
+
+static void Generate_LoadIC_ArrayLength(MacroAssembler* masm) {
+  LoadIC::GenerateArrayLength(masm);
+}
+
+
+static void Generate_LoadIC_StringLength(MacroAssembler* masm) {
+  LoadIC::GenerateStringLength(masm);
+}
+
+
+static void Generate_LoadIC_FunctionPrototype(MacroAssembler* masm) {
+  LoadIC::GenerateFunctionPrototype(masm);
+}
+
+
+static void Generate_LoadIC_Initialize(MacroAssembler* masm) {
+  LoadIC::GenerateInitialize(masm);
+}
+
+
+static void Generate_LoadIC_PreMonomorphic(MacroAssembler* masm) {
+  LoadIC::GeneratePreMonomorphic(masm);
+}
+
+
+static void Generate_LoadIC_Miss(MacroAssembler* masm) {
+  LoadIC::GenerateMiss(masm);
+}
+
+
+static void Generate_LoadIC_Megamorphic(MacroAssembler* masm) {
+  LoadIC::GenerateMegamorphic(masm);
+}
+
+
+static void Generate_LoadIC_Normal(MacroAssembler* masm) {
+  LoadIC::GenerateNormal(masm);
+}
+
+
+static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateInitialize(masm);
+}
+
+
+static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateMiss(masm);
+}
+
+
+static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateGeneric(masm);
+}
+
+
+static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
+  KeyedLoadIC::GeneratePreMonomorphic(masm);
+}
+
+
+static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
+  StoreIC::GenerateInitialize(masm);
+}
+
+
+static void Generate_StoreIC_Miss(MacroAssembler* masm) {
+  StoreIC::GenerateMiss(masm);
+}
+
+
+static void Generate_StoreIC_ExtendStorage(MacroAssembler* masm) {
+  StoreIC::GenerateExtendStorage(masm);
+}
+
+static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
+  StoreIC::GenerateMegamorphic(masm);
+}
+
+
+static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateGeneric(masm);
+}
+
+
+static void Generate_KeyedStoreIC_ExtendStorage(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExtendStorage(masm);
+}
+
+
+static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateMiss(masm);
+}
+
+
+static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateInitialize(masm);
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateLoadICDebugBreak(masm);
+}
+
+
+static void Generate_StoreIC_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateStoreICDebugBreak(masm);
+}
+
+
+static void Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateKeyedLoadICDebugBreak(masm);
+}
+
+
+static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateKeyedStoreICDebugBreak(masm);
+}
+
+
+static void Generate_ConstructCall_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateConstructCallDebugBreak(masm);
+}
+
+
+static void Generate_Return_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateReturnDebugBreak(masm);
+}
+
+
+static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateStubNoRegistersDebugBreak(masm);
+}
+#endif
+
+Object* Builtins::builtins_[builtin_count] = { NULL, };
+const char* Builtins::names_[builtin_count] = { NULL, };
+
+#define DEF_ENUM_C(name) FUNCTION_ADDR(Builtin_##name),
+  Address Builtins::c_functions_[cfunction_count] = {
+    BUILTIN_LIST_C(DEF_ENUM_C)
+  };
+#undef DEF_ENUM_C
+
+#define DEF_JS_NAME(name, ignore) #name,
+#define DEF_JS_ARGC(ignore, argc) argc,
+const char* Builtins::javascript_names_[id_count] = {
+  BUILTINS_LIST_JS(DEF_JS_NAME)
+};
+
+int Builtins::javascript_argc_[id_count] = {
+  BUILTINS_LIST_JS(DEF_JS_ARGC)
+};
+#undef DEF_JS_NAME
+#undef DEF_JS_ARGC
+
+static bool is_initialized = false;
+void Builtins::Setup(bool create_heap_objects) {
+  ASSERT(!is_initialized);
+
+  // Create a scope for the handles in the builtins.
+  HandleScope scope;
+
+  struct BuiltinDesc {
+    byte* generator;
+    byte* c_code;
+    const char* s_name;  // name is only used for generating log information.
+    int name;
+    Code::Flags flags;
+  };
+
+#define DEF_FUNCTION_PTR_C(name)         \
+    { FUNCTION_ADDR(Generate_Adaptor),   \
+      FUNCTION_ADDR(Builtin_##name),     \
+      #name,                             \
+      c_##name,                          \
+      Code::ComputeFlags(Code::BUILTIN)  \
+    },
+
+#define DEF_FUNCTION_PTR_A(name, kind, state)              \
+    { FUNCTION_ADDR(Generate_##name),                      \
+      NULL,                                                \
+      #name,                                               \
+      name,                                                \
+      Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state)   \
+    },
+
+  // Define array of pointers to generators and C builtin functions.
+  static BuiltinDesc functions[] = {
+      BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
+      BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
+      BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
+      // Terminator:
+      { NULL, NULL, NULL, builtin_count, static_cast<Code::Flags>(0) }
+  };
+
+#undef DEF_FUNCTION_PTR_C
+#undef DEF_FUNCTION_PTR_A
+
+  // For now we generate builtin adaptor code into a stack-allocated
+  // buffer, before copying it into individual code objects.
+  byte buffer[4*KB];
+
+  // Traverse the list of builtins and generate an adaptor in a
+  // separate code object for each one.
+  for (int i = 0; i < builtin_count; i++) {
+    if (create_heap_objects) {
+      MacroAssembler masm(buffer, sizeof buffer);
+      // Generate the code/adaptor.
+      typedef void (*Generator)(MacroAssembler*, int);
+      Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
+      // We pass all arguments to the generator, but it may not use all of
+      // them.  This works because the first arguments are on top of the
+      // stack.
+      g(&masm, functions[i].name);
+      // Move the code into the object heap.
+      CodeDesc desc;
+      masm.GetCode(&desc);
+      Code::Flags flags =  functions[i].flags;
+      Object* code;
+      {
+        // During startup it's OK to always allocate and defer GC to later.
+        // This simplifies things because we don't need to retry.
+        AlwaysAllocateScope __scope__;
+        code = Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
+        if (code->IsFailure()) {
+          v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
+        }
+      }
+      // Add any unresolved jumps or calls to the fixup list in the
+      // bootstrapper.
+      Bootstrapper::AddFixup(Code::cast(code), &masm);
+      // Log the event and add the code to the builtins array.
+      LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
+                          Code::cast(code), functions[i].s_name));
+      builtins_[i] = code;
+#ifdef ENABLE_DISASSEMBLER
+      if (FLAG_print_builtin_code) {
+        PrintF("Builtin: %s\n", functions[i].s_name);
+        Code::cast(code)->Disassemble(functions[i].s_name);
+        PrintF("\n");
+      }
+#endif
+    } else {
+      // Deserializing. The values will be filled in during IterateBuiltins.
+      builtins_[i] = NULL;
+    }
+    names_[i] = functions[i].s_name;
+  }
+
+  // Mark as initialized.
+  is_initialized = true;
+}
+
+
+void Builtins::TearDown() {
+  is_initialized = false;
+}
+
+
+void Builtins::IterateBuiltins(ObjectVisitor* v) {
+  v->VisitPointers(&builtins_[0], &builtins_[0] + builtin_count);
+}
+
+
+const char* Builtins::Lookup(byte* pc) {
+  if (is_initialized) {  // may be called during initialization (disassembler!)
+    for (int i = 0; i < builtin_count; i++) {
+      Code* entry = Code::cast(builtins_[i]);
+      if (entry->contains(pc)) {
+        return names_[i];
+      }
+    }
+  }
+  return NULL;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/builtins.h b/src/builtins.h
new file mode 100644
index 0000000..141d5b7
--- /dev/null
+++ b/src/builtins.h
@@ -0,0 +1,229 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_BUILTINS_H_
+#define V8_BUILTINS_H_
+
+namespace v8 {
+namespace internal {
+
+// Define list of builtins implemented in C.
+#define BUILTIN_LIST_C(V)                          \
+  V(Illegal)                                       \
+                                                   \
+  V(EmptyFunction)                                 \
+                                                   \
+  V(ArrayCodeGeneric)                              \
+                                                   \
+  V(ArrayPush)                                     \
+  V(ArrayPop)                                      \
+                                                   \
+  V(HandleApiCall)                                 \
+  V(HandleApiCallAsFunction)                       \
+  V(HandleApiCallAsConstructor)
+
+
+// Define list of builtins implemented in assembly.
+#define BUILTIN_LIST_A(V)                                      \
+  V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED)        \
+  V(JSConstructCall,            BUILTIN, UNINITIALIZED)        \
+  V(JSConstructStubGeneric,     BUILTIN, UNINITIALIZED)        \
+  V(JSEntryTrampoline,          BUILTIN, UNINITIALIZED)        \
+  V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED)        \
+                                                               \
+  V(LoadIC_Miss,                BUILTIN, UNINITIALIZED)        \
+  V(KeyedLoadIC_Miss,           BUILTIN, UNINITIALIZED)        \
+  V(StoreIC_Miss,               BUILTIN, UNINITIALIZED)        \
+  V(KeyedStoreIC_Miss,          BUILTIN, UNINITIALIZED)        \
+                                                               \
+  V(StoreIC_ExtendStorage,      BUILTIN, UNINITIALIZED)        \
+  V(KeyedStoreIC_ExtendStorage, BUILTIN, UNINITIALIZED)        \
+                                                               \
+  V(LoadIC_Initialize,          LOAD_IC, UNINITIALIZED)        \
+  V(LoadIC_PreMonomorphic,      LOAD_IC, PREMONOMORPHIC)       \
+  V(LoadIC_Normal,              LOAD_IC, MONOMORPHIC)          \
+  V(LoadIC_ArrayLength,         LOAD_IC, MONOMORPHIC)          \
+  V(LoadIC_StringLength,        LOAD_IC, MONOMORPHIC)          \
+  V(LoadIC_FunctionPrototype,   LOAD_IC, MONOMORPHIC)          \
+  V(LoadIC_Megamorphic,         LOAD_IC, MEGAMORPHIC)          \
+                                                               \
+  V(KeyedLoadIC_Initialize,     KEYED_LOAD_IC, UNINITIALIZED)  \
+  V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
+  V(KeyedLoadIC_Generic,        KEYED_LOAD_IC, MEGAMORPHIC)    \
+                                                               \
+  V(StoreIC_Initialize,         STORE_IC, UNINITIALIZED)       \
+  V(StoreIC_Megamorphic,        STORE_IC, MEGAMORPHIC)         \
+                                                               \
+  V(KeyedStoreIC_Initialize,    KEYED_STORE_IC, UNINITIALIZED) \
+  V(KeyedStoreIC_Generic,       KEYED_STORE_IC, MEGAMORPHIC)   \
+                                                               \
+  /* Uses KeyedLoadIC_Initialize; must be after in list. */    \
+  V(FunctionCall,               BUILTIN, UNINITIALIZED)        \
+  V(FunctionApply,              BUILTIN, UNINITIALIZED)        \
+                                                               \
+  V(ArrayCode,                  BUILTIN, UNINITIALIZED)        \
+  V(ArrayConstructCode,         BUILTIN, UNINITIALIZED)
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// Define list of builtins used by the debugger implemented in assembly.
+#define BUILTIN_LIST_DEBUG_A(V)                                \
+  V(Return_DebugBreak,          BUILTIN, DEBUG_BREAK)          \
+  V(ConstructCall_DebugBreak,   BUILTIN, DEBUG_BREAK)          \
+  V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK)          \
+  V(LoadIC_DebugBreak,          LOAD_IC, DEBUG_BREAK)          \
+  V(KeyedLoadIC_DebugBreak,     KEYED_LOAD_IC, DEBUG_BREAK)    \
+  V(StoreIC_DebugBreak,         STORE_IC, DEBUG_BREAK)         \
+  V(KeyedStoreIC_DebugBreak,    KEYED_STORE_IC, DEBUG_BREAK)
+#else
+#define BUILTIN_LIST_DEBUG_A(V)
+#endif
+
+// Define list of builtins implemented in JavaScript.
+#define BUILTINS_LIST_JS(V)              \
+  V(EQUALS, 1)                           \
+  V(STRICT_EQUALS, 1)                    \
+  V(COMPARE, 2)                          \
+  V(ADD, 1)                              \
+  V(SUB, 1)                              \
+  V(MUL, 1)                              \
+  V(DIV, 1)                              \
+  V(MOD, 1)                              \
+  V(BIT_OR, 1)                           \
+  V(BIT_AND, 1)                          \
+  V(BIT_XOR, 1)                          \
+  V(UNARY_MINUS, 0)                      \
+  V(BIT_NOT, 0)                          \
+  V(SHL, 1)                              \
+  V(SAR, 1)                              \
+  V(SHR, 1)                              \
+  V(DELETE, 1)                           \
+  V(IN, 1)                               \
+  V(INSTANCE_OF, 1)                      \
+  V(GET_KEYS, 0)                         \
+  V(FILTER_KEY, 1)                       \
+  V(CALL_NON_FUNCTION, 0)                \
+  V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
+  V(TO_OBJECT, 0)                        \
+  V(TO_NUMBER, 0)                        \
+  V(TO_STRING, 0)                        \
+  V(STRING_ADD_LEFT, 1)                  \
+  V(STRING_ADD_RIGHT, 1)                 \
+  V(APPLY_PREPARE, 1)                    \
+  V(APPLY_OVERFLOW, 1)
+
+
+class ObjectVisitor;
+
+
+class Builtins : public AllStatic {
+ public:
+  // Generate all builtin code objects. Should be called once during
+  // VM initialization.
+  static void Setup(bool create_heap_objects);
+  static void TearDown();
+
+  // Garbage collection support.
+  static void IterateBuiltins(ObjectVisitor* v);
+
+  // Disassembler support.
+  static const char* Lookup(byte* pc);
+
+  enum Name {
+#define DEF_ENUM_C(name) name,
+#define DEF_ENUM_A(name, kind, state) name,
+    BUILTIN_LIST_C(DEF_ENUM_C)
+    BUILTIN_LIST_A(DEF_ENUM_A)
+    BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
+#undef DEF_ENUM_C
+#undef DEF_ENUM_A
+    builtin_count
+  };
+
+  enum CFunctionId {
+#define DEF_ENUM_C(name) c_##name,
+    BUILTIN_LIST_C(DEF_ENUM_C)
+#undef DEF_ENUM_C
+    cfunction_count
+  };
+
+  enum JavaScript {
+#define DEF_ENUM(name, ignore) name,
+    BUILTINS_LIST_JS(DEF_ENUM)
+#undef DEF_ENUM
+    id_count
+  };
+
+  static Code* builtin(Name name) {
+    // Code::cast cannot be used here since we access builtins
+    // during the marking phase of mark sweep. See IC::Clear.
+    return reinterpret_cast<Code*>(builtins_[name]);
+  }
+
+  static Address builtin_address(Name name) {
+    return reinterpret_cast<Address>(&builtins_[name]);
+  }
+
+  static Address c_function_address(CFunctionId id) {
+    return c_functions_[id];
+  }
+
+  static const char* GetName(JavaScript id) { return javascript_names_[id]; }
+  static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
+  static Handle<Code> GetCode(JavaScript id, bool* resolved);
+  static int NumberOfJavaScriptBuiltins() { return id_count; }
+
+  static Object* builtin_passed_function;
+
+ private:
+  // The external C++ functions called from the code.
+  static Address c_functions_[cfunction_count];
+
+  // Note: These are always Code objects, but to conform with
+  // IterateBuiltins() above which assumes Object**'s for the callback
+  // function f, we use an Object* array here.
+  static Object* builtins_[builtin_count];
+  static const char* names_[builtin_count];
+  static const char* javascript_names_[id_count];
+  static int javascript_argc_[id_count];
+
+  static void Generate_Adaptor(MacroAssembler* masm, CFunctionId id);
+  static void Generate_JSConstructCall(MacroAssembler* masm);
+  static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
+  static void Generate_JSEntryTrampoline(MacroAssembler* masm);
+  static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
+  static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
+
+  static void Generate_FunctionCall(MacroAssembler* masm);
+  static void Generate_FunctionApply(MacroAssembler* masm);
+
+  static void Generate_ArrayCode(MacroAssembler* masm);
+  static void Generate_ArrayConstructCode(MacroAssembler* masm);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_BUILTINS_H_
diff --git a/src/bytecodes-irregexp.h b/src/bytecodes-irregexp.h
new file mode 100644
index 0000000..bcb34c8
--- /dev/null
+++ b/src/bytecodes-irregexp.h
@@ -0,0 +1,104 @@
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_BYTECODES_IRREGEXP_H_
+#define V8_BYTECODES_IRREGEXP_H_
+
+namespace v8 {
+namespace internal {
+
+
+static const int BYTECODE_MASK = 0xff;
+// The first argument is packed in with the byte code in one word, but so it
+// has 24 bits, but it can be positive and negative so only use 23 bits for
+// positive values.
+static const unsigned int MAX_FIRST_ARG = 0x7fffffu;
+static const int BYTECODE_SHIFT = 8;
+
+#define BYTECODE_ITERATOR(V)                                                   \
+V(BREAK,              0, 4)   /* bc8                                        */ \
+V(PUSH_CP,            1, 4)   /* bc8 pad24                                  */ \
+V(PUSH_BT,            2, 8)   /* bc8 pad24 offset32                         */ \
+V(PUSH_REGISTER,      3, 4)   /* bc8 reg_idx24                              */ \
+V(SET_REGISTER_TO_CP, 4, 8)   /* bc8 reg_idx24 offset32                     */ \
+V(SET_CP_TO_REGISTER, 5, 4)   /* bc8 reg_idx24                              */ \
+V(SET_REGISTER_TO_SP, 6, 4)   /* bc8 reg_idx24                              */ \
+V(SET_SP_TO_REGISTER, 7, 4)   /* bc8 reg_idx24                              */ \
+V(SET_REGISTER,       8, 8)   /* bc8 reg_idx24 value32                      */ \
+V(ADVANCE_REGISTER,   9, 8)   /* bc8 reg_idx24 value32                      */ \
+V(POP_CP,            10, 4)   /* bc8 pad24                                  */ \
+V(POP_BT,            11, 4)   /* bc8 pad24                                  */ \
+V(POP_REGISTER,      12, 4)   /* bc8 reg_idx24                              */ \
+V(FAIL,              13, 4)   /* bc8 pad24                                  */ \
+V(SUCCEED,           14, 4)   /* bc8 pad24                                  */ \
+V(ADVANCE_CP,        15, 4)   /* bc8 offset24                               */ \
+V(GOTO,              16, 8)   /* bc8 pad24 addr32                           */ \
+V(LOAD_CURRENT_CHAR, 17, 8)   /* bc8 offset24 addr32                        */ \
+V(LOAD_CURRENT_CHAR_UNCHECKED, 18, 4) /* bc8 offset24                       */ \
+V(LOAD_2_CURRENT_CHARS, 19, 8) /* bc8 offset24 addr32                       */ \
+V(LOAD_2_CURRENT_CHARS_UNCHECKED, 20, 4) /* bc8 offset24                    */ \
+V(LOAD_4_CURRENT_CHARS, 21, 8) /* bc8 offset24 addr32                       */ \
+V(LOAD_4_CURRENT_CHARS_UNCHECKED, 22, 4) /* bc8 offset24                    */ \
+V(CHECK_4_CHARS,     23, 12)  /* bc8 pad24 uint32 addr32                    */ \
+V(CHECK_CHAR,        24, 8)   /* bc8 pad8 uint16 addr32                     */ \
+V(CHECK_NOT_4_CHARS, 25, 12)  /* bc8 pad24 uint32 addr32                    */ \
+V(CHECK_NOT_CHAR,    26, 8)   /* bc8 pad8 uint16 addr32                     */ \
+V(AND_CHECK_4_CHARS, 27, 16)  /* bc8 pad24 uint32 uint32 addr32             */ \
+V(AND_CHECK_CHAR,    28, 12)  /* bc8 pad8 uint16 uint32 addr32              */ \
+V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32          */ \
+V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32              */ \
+V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 addr32            */ \
+V(CHECK_LT,          32, 8)   /* bc8 pad8 uc16 addr32                       */ \
+V(CHECK_GT,          33, 8)   /* bc8 pad8 uc16 addr32                       */ \
+V(CHECK_NOT_BACK_REF, 34, 8)  /* bc8 reg_idx24 addr32                       */ \
+V(CHECK_NOT_BACK_REF_NO_CASE, 35, 8) /* bc8 reg_idx24 addr32                */ \
+V(CHECK_NOT_REGS_EQUAL, 36, 12) /* bc8 regidx24 reg_idx32 addr32            */ \
+V(LOOKUP_MAP1,       37, 12)  /* bc8 pad8 start16 bit_map_addr32 addr32     */ \
+V(LOOKUP_MAP2,       38, 96)  /* bc8 pad8 start16 half_nibble_map_addr32*   */ \
+V(LOOKUP_MAP8,       39, 96)  /* bc8 pad8  start16 byte_map addr32*         */ \
+V(LOOKUP_HI_MAP8,    40, 96)  /* bc8 start24 byte_map_addr32 addr32*        */ \
+V(CHECK_REGISTER_LT, 41, 12)  /* bc8 reg_idx24 value32 addr32               */ \
+V(CHECK_REGISTER_GE, 42, 12)  /* bc8 reg_idx24 value32 addr32               */ \
+V(CHECK_REGISTER_EQ_POS, 43, 8) /* bc8 reg_idx24 addr32                     */ \
+V(CHECK_AT_START,    44, 8)   /* bc8 pad24 addr32                           */ \
+V(CHECK_NOT_AT_START, 45, 8)  /* bc8 pad24 addr32                           */ \
+V(CHECK_GREEDY,      46, 8)   /* bc8 pad24 addr32                           */ \
+V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32                        */
+
+#define DECLARE_BYTECODES(name, code, length) \
+  static const int BC_##name = code;
+BYTECODE_ITERATOR(DECLARE_BYTECODES)
+#undef DECLARE_BYTECODES
+
+#define DECLARE_BYTECODE_LENGTH(name, code, length) \
+  static const int BC_##name##_LENGTH = length;
+BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
+#undef DECLARE_BYTECODE_LENGTH
+} }
+
+#endif  // V8_BYTECODES_IRREGEXP_H_
diff --git a/src/char-predicates-inl.h b/src/char-predicates-inl.h
new file mode 100644
index 0000000..fadbc9a
--- /dev/null
+++ b/src/char-predicates-inl.h
@@ -0,0 +1,86 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CHAR_PREDICATES_INL_H_
+#define V8_CHAR_PREDICATES_INL_H_
+
+#include "char-predicates.h"
+
+namespace v8 {
+namespace internal {
+
+
+inline bool IsCarriageReturn(uc32 c) {
+  return c == 0x000D;
+}
+
+
+inline bool IsLineFeed(uc32 c) {
+  return c == 0x000A;
+}
+
+
+static inline bool IsInRange(int value, int lower_limit, int higher_limit) {
+  ASSERT(lower_limit <= higher_limit);
+  return static_cast<unsigned int>(value - lower_limit) <=
+      static_cast<unsigned int>(higher_limit - lower_limit);
+}
+
+
+inline bool IsDecimalDigit(uc32 c) {
+  // ECMA-262, 3rd, 7.8.3 (p 16)
+  return IsInRange(c, '0', '9');
+}
+
+
+inline bool IsHexDigit(uc32 c) {
+  // ECMA-262, 3rd, 7.6 (p 15)
+  return IsDecimalDigit(c) || IsInRange(c | 0x20, 'a', 'f');
+}
+
+
+inline bool IsRegExpWord(uc16 c) {
+  return IsInRange(c | 0x20, 'a', 'z')
+      || IsDecimalDigit(c)
+      || (c == '_');
+}
+
+
+inline bool IsRegExpNewline(uc16 c) {
+  switch (c) {
+    //   CR           LF           LS           PS
+    case 0x000A: case 0x000D: case 0x2028: case 0x2029:
+      return false;
+    default:
+      return true;
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_CHAR_PREDICATES_INL_H_
diff --git a/src/char-predicates.h b/src/char-predicates.h
new file mode 100644
index 0000000..dac1eb8
--- /dev/null
+++ b/src/char-predicates.h
@@ -0,0 +1,65 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CHAR_PREDICATES_H_
+#define V8_CHAR_PREDICATES_H_
+
+namespace v8 {
+namespace internal {
+
+// Unicode character predicates as defined by ECMA-262, 3rd,
+// used for lexical analysis.
+
+inline bool IsCarriageReturn(uc32 c);
+inline bool IsLineFeed(uc32 c);
+inline bool IsDecimalDigit(uc32 c);
+inline bool IsHexDigit(uc32 c);
+inline bool IsRegExpWord(uc32 c);
+inline bool IsRegExpNewline(uc32 c);
+
+struct IdentifierStart {
+  static inline bool Is(uc32 c) {
+    switch (c) {
+      case '$': case '_': case '\\': return true;
+      default: return unibrow::Letter::Is(c);
+    }
+  }
+};
+
+
+struct IdentifierPart {
+  static inline bool Is(uc32 c) {
+    return IdentifierStart::Is(c)
+        || unibrow::Number::Is(c)
+        || unibrow::CombiningMark::Is(c)
+        || unibrow::ConnectorPunctuation::Is(c);
+  }
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_CHAR_PREDICATES_H_
diff --git a/src/checks.cc b/src/checks.cc
new file mode 100644
index 0000000..f8a2f24
--- /dev/null
+++ b/src/checks.cc
@@ -0,0 +1,98 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "platform.h"
+#include "top.h"
+
+static int fatal_error_handler_nesting_depth = 0;
+
+// Contains protection against recursive calls (faults while handling faults).
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+  fatal_error_handler_nesting_depth++;
+  // First time we try to print an error message
+  if (fatal_error_handler_nesting_depth < 2) {
+    i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
+    va_list arguments;
+    va_start(arguments, format);
+    i::OS::VPrintError(format, arguments);
+    va_end(arguments);
+    i::OS::PrintError("\n#\n\n");
+  }
+  // First two times we may try to print a stack dump.
+  if (fatal_error_handler_nesting_depth < 3) {
+    if (i::FLAG_stack_trace_on_abort) {
+      // Call this one twice on double fault
+      i::Top::PrintStack();
+    }
+  }
+  i::OS::Abort();
+}
+
+
+void CheckEqualsHelper(const char* file,
+                       int line,
+                       const char* expected_source,
+                       v8::Handle<v8::Value> expected,
+                       const char* value_source,
+                       v8::Handle<v8::Value> value) {
+  if (!expected->Equals(value)) {
+    v8::String::Utf8Value value_str(value);
+    v8::String::Utf8Value expected_str(expected);
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %s\n#   Found: %s",
+             expected_source, value_source, *expected_str, *value_str);
+  }
+}
+
+
+void CheckNonEqualsHelper(const char* file,
+                          int line,
+                          const char* unexpected_source,
+                          v8::Handle<v8::Value> unexpected,
+                          const char* value_source,
+                          v8::Handle<v8::Value> value) {
+  if (unexpected->Equals(value)) {
+    v8::String::Utf8Value value_str(value);
+    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %s",
+             unexpected_source, value_source, *value_str);
+  }
+}
+
+
+void API_Fatal(const char* location, const char* format, ...) {
+  i::OS::PrintError("\n#\n# Fatal error in %s\n# ", location);
+  va_list arguments;
+  va_start(arguments, format);
+  i::OS::VPrintError(format, arguments);
+  va_end(arguments);
+  i::OS::PrintError("\n#\n\n");
+  i::OS::Abort();
+}
diff --git a/src/checks.h b/src/checks.h
new file mode 100644
index 0000000..b302e5b
--- /dev/null
+++ b/src/checks.h
@@ -0,0 +1,263 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CHECKS_H_
+#define V8_CHECKS_H_
+
+#include <string.h>
+
+#include "flags.h"
+
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+void API_Fatal(const char* location, const char* format, ...);
+
+// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
+// development, but they should not be relied on in the final product.
+#ifdef DEBUG
+#define FATAL(msg)                              \
+  V8_Fatal(__FILE__, __LINE__, "%s", (msg))
+#define UNIMPLEMENTED()                         \
+  V8_Fatal(__FILE__, __LINE__, "unimplemented code")
+#define UNREACHABLE()                           \
+  V8_Fatal(__FILE__, __LINE__, "unreachable code")
+#else
+#define FATAL(msg)                              \
+  V8_Fatal("", 0, "%s", (msg))
+#define UNIMPLEMENTED()                         \
+  V8_Fatal("", 0, "unimplemented code")
+#define UNREACHABLE() ((void) 0)
+#endif
+
+
+// Used by the CHECK macro -- should not be called directly.
+static inline void CheckHelper(const char* file,
+                               int line,
+                               const char* source,
+                               bool condition) {
+  if (!condition)
+    V8_Fatal(file, line, "CHECK(%s) failed", source);
+}
+
+
+// The CHECK macro checks that the given condition is true; if not, it
+// prints a message to stderr and aborts.
+#define CHECK(condition) CheckHelper(__FILE__, __LINE__, #condition, condition)
+
+
+// Helper function used by the CHECK_EQ function when given int
+// arguments.  Should not be called directly.
+static inline void CheckEqualsHelper(const char* file, int line,
+                                     const char* expected_source, int expected,
+                                     const char* value_source, int value) {
+  if (expected != value) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %i\n#   Found: %i",
+             expected_source, value_source, expected, value);
+  }
+}
+
+
+// Helper function used by the CHECK_NE function when given int
+// arguments.  Should not be called directly.
+static inline void CheckNonEqualsHelper(const char* file,
+                                        int line,
+                                        const char* unexpected_source,
+                                        int unexpected,
+                                        const char* value_source,
+                                        int value) {
+  if (unexpected == value) {
+    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %i",
+             unexpected_source, value_source, value);
+  }
+}
+
+
+// Helper function used by the CHECK function when given string
+// arguments.  Should not be called directly.
+static inline void CheckEqualsHelper(const char* file,
+                                     int line,
+                                     const char* expected_source,
+                                     const char* expected,
+                                     const char* value_source,
+                                     const char* value) {
+  if (strcmp(expected, value) != 0) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %s\n#   Found: %s",
+             expected_source, value_source, expected, value);
+  }
+}
+
+
+static inline void CheckNonEqualsHelper(const char* file,
+                                        int line,
+                                        const char* expected_source,
+                                        const char* expected,
+                                        const char* value_source,
+                                        const char* value) {
+  if (expected == value ||
+      (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
+    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %s",
+             expected_source, value_source, value);
+  }
+}
+
+
+// Helper function used by the CHECK function when given pointer
+// arguments.  Should not be called directly.
+static inline void CheckEqualsHelper(const char* file,
+                                     int line,
+                                     const char* expected_source,
+                                     void* expected,
+                                     const char* value_source,
+                                     void* value) {
+  if (expected != value) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %p\n#   Found: %p",
+             expected_source, value_source,
+             expected, value);
+  }
+}
+
+
+static inline void CheckNonEqualsHelper(const char* file,
+                                        int line,
+                                        const char* expected_source,
+                                        void* expected,
+                                        const char* value_source,
+                                        void* value) {
+  if (expected == value) {
+    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %p",
+             expected_source, value_source, value);
+  }
+}
+
+
+// Helper function used by the CHECK function when given floating
+// point arguments.  Should not be called directly.
+static inline void CheckEqualsHelper(const char* file,
+                                     int line,
+                                     const char* expected_source,
+                                     double expected,
+                                     const char* value_source,
+                                     double value) {
+  // Force values to 64 bit memory to truncate 80 bit precision on IA32.
+  volatile double* exp = new double[1];
+  *exp = expected;
+  volatile double* val = new double[1];
+  *val = value;
+  if (*exp != *val) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %f\n#   Found: %f",
+             expected_source, value_source, *exp, *val);
+  }
+  delete[] exp;
+  delete[] val;
+}
+
+
+namespace v8 {
+  class Value;
+  template <class T> class Handle;
+}
+
+
+void CheckNonEqualsHelper(const char* file,
+                          int line,
+                          const char* unexpected_source,
+                          v8::Handle<v8::Value> unexpected,
+                          const char* value_source,
+                          v8::Handle<v8::Value> value);
+
+
+void CheckEqualsHelper(const char* file,
+                       int line,
+                       const char* expected_source,
+                       v8::Handle<v8::Value> expected,
+                       const char* value_source,
+                       v8::Handle<v8::Value> value);
+
+
+#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
+  #expected, expected, #value, value)
+
+
+#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \
+  #unexpected, unexpected, #value, value)
+
+
+#define CHECK_GT(a, b) CHECK((a) > (b))
+#define CHECK_GE(a, b) CHECK((a) >= (b))
+
+
+// This is inspired by the static assertion facility in boost.  This
+// is pretty magical.  If it causes you trouble on a platform you may
+// find a fix in the boost code.
+template <bool> class StaticAssertion;
+template <> class StaticAssertion<true> { };
+// This macro joins two tokens.  If one of the tokens is a macro the
+// helper call causes it to be resolved before joining.
+#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b)
+#define SEMI_STATIC_JOIN_HELPER(a, b) a##b
+// Causes an error during compilation of the condition is not
+// statically known to be true.  It is formulated as a typedef so that
+// it can be used wherever a typedef can be used.  Beware that this
+// actually causes each use to introduce a new defined type with a
+// name depending on the source line.
+template <int> class StaticAssertionHelper { };
+#define STATIC_CHECK(test)                                                  \
+  typedef                                                                   \
+    StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>(test)>)> \
+    SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
+
+
+// The ASSERT macro is equivalent to CHECK except that it only
+// generates code in debug builds.  Ditto STATIC_ASSERT.
+#ifdef DEBUG
+#define ASSERT_RESULT(expr)  CHECK(expr)
+#define ASSERT(condition)    CHECK(condition)
+#define ASSERT_EQ(v1, v2)    CHECK_EQ(v1, v2)
+#define ASSERT_NE(v1, v2)   CHECK_NE(v1, v2)
+#define STATIC_ASSERT(test)  STATIC_CHECK(test)
+#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
+#else
+#define ASSERT_RESULT(expr)     (expr)
+#define ASSERT(condition)      ((void) 0)
+#define ASSERT_EQ(v1, v2)      ((void) 0)
+#define ASSERT_NE(v1, v2)     ((void) 0)
+#define STATIC_ASSERT(test)    ((void) 0)
+#define SLOW_ASSERT(condition) ((void) 0)
+#endif
+
+
+#define ASSERT_TAG_ALIGNED(address) \
+  ASSERT((reinterpret_cast<intptr_t>(address) & kHeapObjectTagMask) == 0)
+
+#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & kHeapObjectTagMask) == 0)
+
+#define ASSERT_NOT_NULL(p)  ASSERT_NE(NULL, p)
+
+#endif  // V8_CHECKS_H_
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
new file mode 100644
index 0000000..9c24c60
--- /dev/null
+++ b/src/code-stubs.cc
@@ -0,0 +1,148 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Code> CodeStub::GetCode() {
+  uint32_t key = GetKey();
+  int index = Heap::code_stubs()->FindEntry(key);
+  if (index == NumberDictionary::kNotFound) {
+    HandleScope scope;
+
+    // Update the static counter each time a new code stub is generated.
+    Counters::code_stubs.Increment();
+
+    // Generate the new code.
+    MacroAssembler masm(NULL, 256);
+
+    // Nested stubs are not allowed for leafs.
+    masm.set_allow_stub_calls(AllowsStubCalls());
+
+    // Generate the code for the stub.
+    masm.set_generating_stub(true);
+    Generate(&masm);
+
+    // Create the code object.
+    CodeDesc desc;
+    masm.GetCode(&desc);
+
+    // Copy the generated code into a heap object, and store the major key.
+    Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
+    Handle<Code> code = Factory::NewCode(desc, NULL, flags, masm.CodeObject());
+    code->set_major_key(MajorKey());
+
+    // Add unresolved entries in the code to the fixup list.
+    Bootstrapper::AddFixup(*code, &masm);
+
+    LOG(CodeCreateEvent(Logger::STUB_TAG, *code, GetName()));
+    Counters::total_stubs_code_size.Increment(code->instruction_size());
+
+#ifdef ENABLE_DISASSEMBLER
+    if (FLAG_print_code_stubs) {
+#ifdef DEBUG
+      Print();
+#endif
+      code->Disassemble(GetName());
+      PrintF("\n");
+    }
+#endif
+
+    // Update the dictionary and the root in Heap.
+    Handle<NumberDictionary> dict =
+        Factory::DictionaryAtNumberPut(
+            Handle<NumberDictionary>(Heap::code_stubs()),
+            key,
+            code);
+    Heap::public_set_code_stubs(*dict);
+    index = Heap::code_stubs()->FindEntry(key);
+  }
+  ASSERT(index != NumberDictionary::kNotFound);
+
+  return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
+}
+
+
+const char* CodeStub::MajorName(CodeStub::Major major_key) {
+  switch (major_key) {
+    case CallFunction:
+      return "CallFunction";
+    case GenericBinaryOp:
+      return "GenericBinaryOp";
+    case SmiOp:
+      return "SmiOp";
+    case Compare:
+      return "Compare";
+    case RecordWrite:
+      return "RecordWrite";
+    case StackCheck:
+      return "StackCheck";
+    case UnarySub:
+      return "UnarySub";
+    case RevertToNumber:
+      return "RevertToNumber";
+    case ToBoolean:
+      return "ToBoolean";
+    case Instanceof:
+      return "Instanceof";
+    case CounterOp:
+      return "CounterOp";
+    case ArgumentsAccess:
+      return "ArgumentsAccess";
+    case Runtime:
+      return "Runtime";
+    case CEntry:
+      return "CEntry";
+    case JSEntry:
+      return "JSEntry";
+    case GetProperty:
+      return "GetProperty";
+    case SetProperty:
+      return "SetProperty";
+    case InvokeBuiltin:
+      return "InvokeBuiltin";
+    case JSExit:
+      return "JSExit";
+    case ConvertToDouble:
+      return "ConvertToDouble";
+    case WriteInt32ToHeapNumber:
+      return "WriteInt32ToHeapNumber";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/code-stubs.h b/src/code-stubs.h
new file mode 100644
index 0000000..ae86c20
--- /dev/null
+++ b/src/code-stubs.h
@@ -0,0 +1,117 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CODE_STUBS_H_
+#define V8_CODE_STUBS_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Stub is base classes of all stubs.
+class CodeStub BASE_EMBEDDED {
+ public:
+  enum Major {
+    CallFunction,
+    GenericBinaryOp,
+    SmiOp,
+    Compare,
+    RecordWrite,  // Last stub that allows stub calls inside.
+    ConvertToDouble,
+    WriteInt32ToHeapNumber,
+    StackCheck,
+    UnarySub,
+    RevertToNumber,
+    ToBoolean,
+    Instanceof,
+    CounterOp,
+    ArgumentsAccess,
+    Runtime,
+    CEntry,
+    JSEntry,
+    GetProperty,   // ARM only
+    SetProperty,   // ARM only
+    InvokeBuiltin,  // ARM only
+    JSExit,        // ARM only
+    RegExpCEntry,  // ARM only
+    NUMBER_OF_IDS
+  };
+
+  // Retrieve the code for the stub. Generate the code if needed.
+  Handle<Code> GetCode();
+
+  static Major MajorKeyFromKey(uint32_t key) {
+    return static_cast<Major>(MajorKeyBits::decode(key));
+  };
+  static int MinorKeyFromKey(uint32_t key) {
+    return MinorKeyBits::decode(key);
+  };
+  static const char* MajorName(Major major_key);
+
+  virtual ~CodeStub() {}
+
+ protected:
+  static const int kMajorBits = 5;
+  static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
+
+ private:
+  // Generates the assembler code for the stub.
+  virtual void Generate(MacroAssembler* masm) = 0;
+
+  // Returns information for computing the number key.
+  virtual Major MajorKey() = 0;
+  virtual int MinorKey() = 0;
+
+  // The CallFunctionStub needs to override this so it can encode whether a
+  // lazily generated function should be fully optimized or not.
+  virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
+
+  // Returns a name for logging/debugging purposes.
+  virtual const char* GetName() { return MajorName(MajorKey()); }
+
+#ifdef DEBUG
+  virtual void Print() { PrintF("%s\n", GetName()); }
+#endif
+
+  // Computes the key based on major and minor.
+  uint32_t GetKey() {
+    ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
+    return MinorKeyBits::encode(MinorKey()) |
+           MajorKeyBits::encode(MajorKey());
+  }
+
+  bool AllowsStubCalls() { return MajorKey() <= RecordWrite; }
+
+  class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
+  class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
+
+  friend class BreakPointIterator;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_CODE_STUBS_H_
diff --git a/src/code.h b/src/code.h
new file mode 100644
index 0000000..072344b
--- /dev/null
+++ b/src/code.h
@@ -0,0 +1,68 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CODE_H_
+#define V8_CODE_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Wrapper class for passing expected and actual parameter counts as
+// either registers or immediate values. Used to make sure that the
+// caller provides exactly the expected number of parameters to the
+// callee.
+class ParameterCount BASE_EMBEDDED {
+ public:
+  explicit ParameterCount(Register reg)
+      : reg_(reg), immediate_(0) { }
+  explicit ParameterCount(int immediate)
+      : reg_(no_reg), immediate_(immediate) { }
+
+  bool is_reg() const { return !reg_.is(no_reg); }
+  bool is_immediate() const { return !is_reg(); }
+
+  Register reg() const {
+    ASSERT(is_reg());
+    return reg_;
+  }
+  int immediate() const {
+    ASSERT(is_immediate());
+    return immediate_;
+  }
+
+ private:
+  const Register reg_;
+  const int immediate_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ParameterCount);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_CODE_H_
diff --git a/src/codegen-inl.h b/src/codegen-inl.h
new file mode 100644
index 0000000..bee237d
--- /dev/null
+++ b/src/codegen-inl.h
@@ -0,0 +1,88 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_CODEGEN_INL_H_
+#define V8_CODEGEN_INL_H_
+
+#include "codegen.h"
+#include "register-allocator-inl.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/codegen-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/codegen-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/codegen-arm-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// -----------------------------------------------------------------------------
+// Support for "structured" code comments.
+//
+// By selecting matching brackets in disassembler output,
+// code segments can be identified more easily.
+
+#ifdef DEBUG
+
+class Comment BASE_EMBEDDED {
+ public:
+  Comment(MacroAssembler* masm, const char* msg) : masm_(masm), msg_(msg) {
+    __ RecordComment(msg);
+  }
+
+  ~Comment() {
+    if (msg_[0] == '[') __ RecordComment("]");
+  }
+
+ private:
+  MacroAssembler* masm_;
+  const char* msg_;
+};
+
+#else
+
+class Comment BASE_EMBEDDED {
+ public:
+  Comment(MacroAssembler*, const char*)  {}
+};
+
+#endif  // DEBUG
+
+#undef __
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_CODEGEN_INL_H_
diff --git a/src/codegen.cc b/src/codegen.cc
new file mode 100644
index 0000000..a18fa0f
--- /dev/null
+++ b/src/codegen.cc
@@ -0,0 +1,524 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "oprofile-agent.h"
+#include "prettyprinter.h"
+#include "register-allocator-inl.h"
+#include "rewriter.h"
+#include "runtime.h"
+#include "scopeinfo.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+CodeGenerator* CodeGeneratorScope::top_ = NULL;
+
+
+DeferredCode::DeferredCode()
+    : masm_(CodeGeneratorScope::Current()->masm()),
+      statement_position_(masm_->current_statement_position()),
+      position_(masm_->current_position()) {
+  ASSERT(statement_position_ != RelocInfo::kNoPosition);
+  ASSERT(position_ != RelocInfo::kNoPosition);
+
+  CodeGeneratorScope::Current()->AddDeferred(this);
+#ifdef DEBUG
+  comment_ = "";
+#endif
+
+  // Copy the register locations from the code generator's frame.
+  // These are the registers that will be spilled on entry to the
+  // deferred code and restored on exit.
+  VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
+  int sp_offset = frame->fp_relative(frame->stack_pointer_);
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int loc = frame->register_location(i);
+    if (loc == VirtualFrame::kIllegalIndex) {
+      registers_[i] = kIgnore;
+    } else if (frame->elements_[loc].is_synced()) {
+      // Needs to be restored on exit but not saved on entry.
+      registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
+    } else {
+      int offset = frame->fp_relative(loc);
+      registers_[i] = (offset < sp_offset) ? kPush : offset;
+    }
+  }
+}
+
+
+void CodeGenerator::ProcessDeferred() {
+  while (!deferred_.is_empty()) {
+    DeferredCode* code = deferred_.RemoveLast();
+    ASSERT(masm_ == code->masm());
+    // Record position of deferred code stub.
+    masm_->RecordStatementPosition(code->statement_position());
+    if (code->position() != RelocInfo::kNoPosition) {
+      masm_->RecordPosition(code->position());
+    }
+    // Generate the code.
+    Comment cmnt(masm_, code->comment());
+    masm_->bind(code->entry_label());
+    code->SaveRegisters();
+    code->Generate();
+    code->RestoreRegisters();
+    masm_->jmp(code->exit_label());
+  }
+}
+
+
+void CodeGenerator::SetFrame(VirtualFrame* new_frame,
+                             RegisterFile* non_frame_registers) {
+  RegisterFile saved_counts;
+  if (has_valid_frame()) {
+    frame_->DetachFromCodeGenerator();
+    // The remaining register reference counts are the non-frame ones.
+    allocator_->SaveTo(&saved_counts);
+  }
+
+  if (new_frame != NULL) {
+    // Restore the non-frame register references that go with the new frame.
+    allocator_->RestoreFrom(non_frame_registers);
+    new_frame->AttachToCodeGenerator();
+  }
+
+  frame_ = new_frame;
+  saved_counts.CopyTo(non_frame_registers);
+}
+
+
+void CodeGenerator::DeleteFrame() {
+  if (has_valid_frame()) {
+    frame_->DetachFromCodeGenerator();
+    frame_ = NULL;
+  }
+}
+
+
+// Generate the code. Takes a function literal, generates code for it, assemble
+// all the pieces into a Code object. This function is only to be called by
+// the compiler.cc code.
+Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* flit,
+                                     Handle<Script> script,
+                                     bool is_eval) {
+#ifdef ENABLE_DISASSEMBLER
+  bool print_code = Bootstrapper::IsActive()
+      ? FLAG_print_builtin_code
+      : FLAG_print_code;
+#endif
+
+#ifdef DEBUG
+  bool print_source = false;
+  bool print_ast = false;
+  const char* ftype;
+
+  if (Bootstrapper::IsActive()) {
+    print_source = FLAG_print_builtin_source;
+    print_ast = FLAG_print_builtin_ast;
+    ftype = "builtin";
+  } else {
+    print_source = FLAG_print_source;
+    print_ast = FLAG_print_ast;
+    ftype = "user-defined";
+  }
+
+  if (FLAG_trace_codegen || print_source || print_ast) {
+    PrintF("*** Generate code for %s function: ", ftype);
+    flit->name()->ShortPrint();
+    PrintF(" ***\n");
+  }
+
+  if (print_source) {
+    PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(flit));
+  }
+
+  if (print_ast) {
+    PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(flit));
+  }
+#endif  // DEBUG
+
+  // Generate code.
+  const int initial_buffer_size = 4 * KB;
+  CodeGenerator cgen(initial_buffer_size, script, is_eval);
+  CodeGeneratorScope scope(&cgen);
+  cgen.GenCode(flit);
+  if (cgen.HasStackOverflow()) {
+    ASSERT(!Top::has_pending_exception());
+    return Handle<Code>::null();
+  }
+
+  // Allocate and install the code.  Time the rest of this function as
+  // code creation.
+  HistogramTimerScope timer(&Counters::code_creation);
+  CodeDesc desc;
+  cgen.masm()->GetCode(&desc);
+  ZoneScopeInfo sinfo(flit->scope());
+  InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
+  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
+  Handle<Code> code = Factory::NewCode(desc,
+                                       &sinfo,
+                                       flags,
+                                       cgen.masm()->CodeObject());
+
+  // Add unresolved entries in the code to the fixup list.
+  Bootstrapper::AddFixup(*code, cgen.masm());
+
+#ifdef ENABLE_DISASSEMBLER
+  if (print_code) {
+    // Print the source code if available.
+    if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+      PrintF("--- Raw source ---\n");
+      StringInputBuffer stream(String::cast(script->source()));
+      stream.Seek(flit->start_position());
+      // flit->end_position() points to the last character in the stream. We
+      // need to compensate by adding one to calculate the length.
+      int source_len = flit->end_position() - flit->start_position() + 1;
+      for (int i = 0; i < source_len; i++) {
+        if (stream.has_more()) PrintF("%c", stream.GetNext());
+      }
+      PrintF("\n\n");
+    }
+    PrintF("--- Code ---\n");
+    code->Disassemble(*flit->name()->ToCString());
+  }
+#endif  // ENABLE_DISASSEMBLER
+
+  if (!code.is_null()) {
+    Counters::total_compiled_code_size.Increment(code->instruction_size());
+  }
+
+  return code;
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+bool CodeGenerator::ShouldGenerateLog(Expression* type) {
+  ASSERT(type != NULL);
+  if (!Logger::is_logging()) return false;
+  Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
+  if (FLAG_log_regexp) {
+    static Vector<const char> kRegexp = CStrVector("regexp");
+    if (name->IsEqualTo(kRegexp))
+      return true;
+  }
+  return false;
+}
+
+#endif
+
+
+// Sets the function info on a function.
+// The start_position points to the first '(' character after the function name
+// in the full script source. When counting characters in the script source the
+// the first character is number 0 (not 1).
+void CodeGenerator::SetFunctionInfo(Handle<JSFunction> fun,
+                                    FunctionLiteral* lit,
+                                    bool is_toplevel,
+                                    Handle<Script> script) {
+  fun->shared()->set_length(lit->num_parameters());
+  fun->shared()->set_formal_parameter_count(lit->num_parameters());
+  fun->shared()->set_script(*script);
+  fun->shared()->set_function_token_position(lit->function_token_position());
+  fun->shared()->set_start_position(lit->start_position());
+  fun->shared()->set_end_position(lit->end_position());
+  fun->shared()->set_is_expression(lit->is_expression());
+  fun->shared()->set_is_toplevel(is_toplevel);
+  fun->shared()->set_inferred_name(*lit->inferred_name());
+  fun->shared()->SetThisPropertyAssignmentsInfo(
+      lit->has_only_this_property_assignments(),
+      lit->has_only_simple_this_property_assignments(),
+      *lit->this_property_assignments());
+}
+
+
+static Handle<Code> ComputeLazyCompile(int argc) {
+  CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
+}
+
+
+Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
+#ifdef DEBUG
+  // We should not try to compile the same function literal more than
+  // once.
+  node->mark_as_compiled();
+#endif
+
+  // Determine if the function can be lazily compiled. This is
+  // necessary to allow some of our builtin JS files to be lazily
+  // compiled. These builtins cannot be handled lazily by the parser,
+  // since we have to know if a function uses the special natives
+  // syntax, which is something the parser records.
+  bool allow_lazy = node->AllowsLazyCompilation();
+
+  // Generate code
+  Handle<Code> code;
+  if (FLAG_lazy && allow_lazy) {
+    code = ComputeLazyCompile(node->num_parameters());
+  } else {
+    // The bodies of function literals have not yet been visited by
+    // the AST optimizer/analyzer.
+    if (!Rewriter::Optimize(node)) {
+      return Handle<JSFunction>::null();
+    }
+
+    code = MakeCode(node, script_, false);
+
+    // Check for stack-overflow exception.
+    if (code.is_null()) {
+      SetStackOverflow();
+      return Handle<JSFunction>::null();
+    }
+
+    // Function compilation complete.
+    LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *node->name()));
+
+#ifdef ENABLE_OPROFILE_AGENT
+    OProfileAgent::CreateNativeCodeRegion(*node->name(),
+                                          code->instruction_start(),
+                                          code->instruction_size());
+#endif
+  }
+
+  // Create a boilerplate function.
+  Handle<JSFunction> function =
+      Factory::NewFunctionBoilerplate(node->name(),
+                                      node->materialized_literal_count(),
+                                      node->contains_array_literal(),
+                                      code);
+  CodeGenerator::SetFunctionInfo(function, node, false, script_);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Notify debugger that a new function has been added.
+  Debugger::OnNewFunction(function);
+#endif
+
+  // Set the expected number of properties for instances and return
+  // the resulting function.
+  SetExpectedNofPropertiesFromEstimate(function,
+                                       node->expected_property_count());
+  return function;
+}
+
+
+Handle<Code> CodeGenerator::ComputeCallInitialize(
+    int argc,
+    InLoopFlag in_loop) {
+  if (in_loop == IN_LOOP) {
+    // Force the creation of the corresponding stub outside loops,
+    // because it may be used when clearing the ICs later - it is
+    // possible for a series of IC transitions to lose the in-loop
+    // information, and the IC clearing code can't generate a stub
+    // that it needs so we need to ensure it is generated already.
+    ComputeCallInitialize(argc, NOT_IN_LOOP);
+  }
+  CALL_HEAP_FUNCTION(StubCache::ComputeCallInitialize(argc, in_loop), Code);
+}
+
+
+void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
+  int length = declarations->length();
+  int globals = 0;
+  for (int i = 0; i < length; i++) {
+    Declaration* node = declarations->at(i);
+    Variable* var = node->proxy()->var();
+    Slot* slot = var->slot();
+
+    // If it was not possible to allocate the variable at compile
+    // time, we need to "declare" it at runtime to make sure it
+    // actually exists in the local context.
+    if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
+      VisitDeclaration(node);
+    } else {
+      // Count global variables and functions for later processing
+      globals++;
+    }
+  }
+
+  // Return in case of no declared global functions or variables.
+  if (globals == 0) return;
+
+  // Compute array of global variable and function declarations.
+  Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+  for (int j = 0, i = 0; i < length; i++) {
+    Declaration* node = declarations->at(i);
+    Variable* var = node->proxy()->var();
+    Slot* slot = var->slot();
+
+    if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
+      // Skip - already processed.
+    } else {
+      array->set(j++, *(var->name()));
+      if (node->fun() == NULL) {
+        if (var->mode() == Variable::CONST) {
+          // In case this is const property use the hole.
+          array->set_the_hole(j++);
+        } else {
+          array->set_undefined(j++);
+        }
+      } else {
+        Handle<JSFunction> function = BuildBoilerplate(node->fun());
+        // Check for stack-overflow exception.
+        if (HasStackOverflow()) return;
+        array->set(j++, *function);
+      }
+    }
+  }
+
+  // Invoke the platform-dependent code generator to do the actual
+  // declaration the global variables and functions.
+  DeclareGlobals(array);
+}
+
+
+
+// Special cases: These 'runtime calls' manipulate the current
+// frame and are only used 1 or two places, so we generate them
+// inline instead of generating calls to them.  They are used
+// for implementing Function.prototype.call() and
+// Function.prototype.apply().
+CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
+  {&CodeGenerator::GenerateIsSmi, "_IsSmi"},
+  {&CodeGenerator::GenerateIsNonNegativeSmi, "_IsNonNegativeSmi"},
+  {&CodeGenerator::GenerateIsArray, "_IsArray"},
+  {&CodeGenerator::GenerateIsConstructCall, "_IsConstructCall"},
+  {&CodeGenerator::GenerateArgumentsLength, "_ArgumentsLength"},
+  {&CodeGenerator::GenerateArgumentsAccess, "_Arguments"},
+  {&CodeGenerator::GenerateClassOf, "_ClassOf"},
+  {&CodeGenerator::GenerateValueOf, "_ValueOf"},
+  {&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
+  {&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
+  {&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
+  {&CodeGenerator::GenerateLog, "_Log"},
+  {&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
+  {&CodeGenerator::GenerateMathSin, "_Math_sin"},
+  {&CodeGenerator::GenerateMathCos, "_Math_cos"}
+};
+
+
+CodeGenerator::InlineRuntimeLUT* CodeGenerator::FindInlineRuntimeLUT(
+    Handle<String> name) {
+  const int entries_count =
+      sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT);
+  for (int i = 0; i < entries_count; i++) {
+    InlineRuntimeLUT* entry = &kInlineRuntimeLUT[i];
+    if (name->IsEqualTo(CStrVector(entry->name))) {
+      return entry;
+    }
+  }
+  return NULL;
+}
+
+
+bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
+  ZoneList<Expression*>* args = node->arguments();
+  Handle<String> name = node->name();
+  if (name->length() > 0 && name->Get(0) == '_') {
+    InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
+    if (entry != NULL) {
+      ((*this).*(entry->method))(args);
+      return true;
+    }
+  }
+  return false;
+}
+
+
+bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
+    const CodeGenerator::InlineRuntimeLUT& new_entry,
+    CodeGenerator::InlineRuntimeLUT* old_entry) {
+  InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
+  if (entry == NULL) return false;
+  if (old_entry != NULL) {
+    old_entry->name = entry->name;
+    old_entry->method = entry->method;
+  }
+  entry->name = new_entry.name;
+  entry->method = new_entry.method;
+  return true;
+}
+
+
+static inline void RecordPositions(CodeGenerator* cgen, int pos) {
+  if (pos != RelocInfo::kNoPosition) {
+    cgen->masm()->RecordStatementPosition(pos);
+    cgen->masm()->RecordPosition(pos);
+  }
+}
+
+
+void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
+  if (FLAG_debug_info) RecordPositions(this, fun->start_position());
+}
+
+
+void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
+  if (FLAG_debug_info) RecordPositions(this, fun->end_position());
+}
+
+
+void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
+  if (FLAG_debug_info) RecordPositions(this, stmt->statement_pos());
+}
+
+
+void CodeGenerator::CodeForSourcePosition(int pos) {
+  if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+    masm()->RecordPosition(pos);
+  }
+}
+
+
+const char* RuntimeStub::GetName() {
+  return Runtime::FunctionForId(id_)->stub_name;
+}
+
+
+void RuntimeStub::Generate(MacroAssembler* masm) {
+  Runtime::Function* f = Runtime::FunctionForId(id_);
+  masm->TailCallRuntime(ExternalReference(f),
+                        num_arguments_,
+                        f->result_size);
+}
+
+
+void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
+  switch (type_) {
+    case READ_LENGTH: GenerateReadLength(masm); break;
+    case READ_ELEMENT: GenerateReadElement(masm); break;
+    case NEW_OBJECT: GenerateNewObject(masm); break;
+  }
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/codegen.h b/src/codegen.h
new file mode 100644
index 0000000..d03f4b6
--- /dev/null
+++ b/src/codegen.h
@@ -0,0 +1,395 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CODEGEN_H_
+#define V8_CODEGEN_H_
+
+#include "ast.h"
+#include "code-stubs.h"
+#include "runtime.h"
+
+// Include the declaration of the architecture defined class CodeGenerator.
+// The contract  to the shared code is that the the CodeGenerator is a subclass
+// of Visitor and that the following methods are available publicly:
+//   MakeCode
+//   SetFunctionInfo
+//   masm
+//   frame
+//   has_valid_frame
+//   SetFrame
+//   DeleteFrame
+//   allocator
+//   AddDeferred
+//   in_spilled_code
+//   set_in_spilled_code
+//
+// These methods are either used privately by the shared code or implemented as
+// shared code:
+//   CodeGenerator
+//   ~CodeGenerator
+//   ProcessDeferred
+//   GenCode
+//   BuildBoilerplate
+//   ComputeCallInitialize
+//   ComputeCallInitializeInLoop
+//   ProcessDeclarations
+//   DeclareGlobals
+//   FindInlineRuntimeLUT
+//   CheckForInlineRuntimeCall
+//   PatchInlineRuntimeEntry
+//   CodeForFunctionPosition
+//   CodeForReturnPosition
+//   CodeForStatementPosition
+//   CodeForSourcePosition
+
+
+// Mode to overwrite BinaryExpression values.
+enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
+
+// Types of uncatchable exceptions.
+enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
+
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/codegen-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/codegen-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+#include "register-allocator.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Code generation can be nested.  Code generation scopes form a stack
+// of active code generators.
+class CodeGeneratorScope BASE_EMBEDDED {
+ public:
+  explicit CodeGeneratorScope(CodeGenerator* cgen) {
+    previous_ = top_;
+    top_ = cgen;
+  }
+
+  ~CodeGeneratorScope() {
+    top_ = previous_;
+  }
+
+  static CodeGenerator* Current() {
+    ASSERT(top_ != NULL);
+    return top_;
+  }
+
+ private:
+  static CodeGenerator* top_;
+  CodeGenerator* previous_;
+};
+
+
+// Deferred code objects are small pieces of code that are compiled
+// out of line. They are used to defer the compilation of uncommon
+// paths thereby avoiding expensive jumps around uncommon code parts.
+class DeferredCode: public ZoneObject {
+ public:
+  DeferredCode();
+  virtual ~DeferredCode() { }
+
+  virtual void Generate() = 0;
+
+  MacroAssembler* masm() { return masm_; }
+
+  int statement_position() const { return statement_position_; }
+  int position() const { return position_; }
+
+  Label* entry_label() { return &entry_label_; }
+  Label* exit_label() { return &exit_label_; }
+
+#ifdef DEBUG
+  void set_comment(const char* comment) { comment_ = comment; }
+  const char* comment() const { return comment_; }
+#else
+  void set_comment(const char* comment) { }
+  const char* comment() const { return ""; }
+#endif
+
+  inline void Jump();
+  inline void Branch(Condition cc);
+  void BindExit() { masm_->bind(&exit_label_); }
+
+  void SaveRegisters();
+  void RestoreRegisters();
+
+ protected:
+  MacroAssembler* masm_;
+
+ private:
+  // Constants indicating special actions.  They should not be multiples
+  // of kPointerSize so they will not collide with valid offsets from
+  // the frame pointer.
+  static const int kIgnore = -1;
+  static const int kPush = 1;
+
+  // This flag is ored with a valid offset from the frame pointer, so
+  // it should fit in the low zero bits of a valid offset.
+  static const int kSyncedFlag = 2;
+
+  int statement_position_;
+  int position_;
+
+  Label entry_label_;
+  Label exit_label_;
+
+  int registers_[RegisterAllocator::kNumRegisters];
+
+#ifdef DEBUG
+  const char* comment_;
+#endif
+  DISALLOW_COPY_AND_ASSIGN(DeferredCode);
+};
+
+
+// RuntimeStub models code stubs calling entry points in the Runtime class.
+class RuntimeStub : public CodeStub {
+ public:
+  explicit RuntimeStub(Runtime::FunctionId id, int num_arguments)
+      : id_(id), num_arguments_(num_arguments) { }
+
+  void Generate(MacroAssembler* masm);
+
+  // Disassembler support.  It is useful to be able to print the name
+  // of the runtime function called through this stub.
+  static const char* GetNameFromMinorKey(int minor_key) {
+    return Runtime::FunctionForId(IdField::decode(minor_key))->stub_name;
+  }
+
+ private:
+  Runtime::FunctionId id_;
+  int num_arguments_;
+
+  class ArgumentField: public BitField<int,  0, 16> {};
+  class IdField: public BitField<Runtime::FunctionId, 16, kMinorBits - 16> {};
+
+  Major MajorKey() { return Runtime; }
+  int MinorKey() {
+    return IdField::encode(id_) | ArgumentField::encode(num_arguments_);
+  }
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("RuntimeStub (id %s)\n", Runtime::FunctionForId(id_)->name);
+  }
+#endif
+};
+
+
+class StackCheckStub : public CodeStub {
+ public:
+  StackCheckStub() { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+
+  const char* GetName() { return "StackCheckStub"; }
+
+  Major MajorKey() { return StackCheck; }
+  int MinorKey() { return 0; }
+};
+
+
+class InstanceofStub: public CodeStub {
+ public:
+  InstanceofStub() { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return Instanceof; }
+  int MinorKey() { return 0; }
+};
+
+
+class UnarySubStub : public CodeStub {
+ public:
+  explicit UnarySubStub(bool overwrite)
+      : overwrite_(overwrite) { }
+
+ private:
+  bool overwrite_;
+  Major MajorKey() { return UnarySub; }
+  int MinorKey() { return overwrite_ ? 1 : 0; }
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "UnarySubStub"; }
+};
+
+
+class CompareStub: public CodeStub {
+ public:
+  CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Condition cc_;
+  bool strict_;
+
+  Major MajorKey() { return Compare; }
+
+  int MinorKey();
+
+  // Branch to the label if the given object isn't a symbol.
+  void BranchIfNonSymbol(MacroAssembler* masm,
+                         Label* label,
+                         Register object,
+                         Register scratch);
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("CompareStub (cc %d), (strict %s)\n",
+           static_cast<int>(cc_),
+           strict_ ? "true" : "false");
+  }
+#endif
+};
+
+
+class CEntryStub : public CodeStub {
+ public:
+  explicit CEntryStub(int result_size) : result_size_(result_size) { }
+
+  void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
+
+ protected:
+  void GenerateBody(MacroAssembler* masm, bool is_debug_break);
+  void GenerateCore(MacroAssembler* masm,
+                    Label* throw_normal_exception,
+                    Label* throw_termination_exception,
+                    Label* throw_out_of_memory_exception,
+                    StackFrame::Type frame_type,
+                    bool do_gc,
+                    bool always_allocate_scope);
+  void GenerateThrowTOS(MacroAssembler* masm);
+  void GenerateThrowUncatchable(MacroAssembler* masm,
+                                UncatchableExceptionType type);
+ private:
+  // Number of pointers/values returned.
+  int result_size_;
+
+  Major MajorKey() { return CEntry; }
+  // Minor key must differ if different result_size_ values means different
+  // code is generated.
+  int MinorKey();
+
+  const char* GetName() { return "CEntryStub"; }
+};
+
+
+class CEntryDebugBreakStub : public CEntryStub {
+ public:
+  CEntryDebugBreakStub() : CEntryStub(1) { }
+
+  void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
+
+ private:
+  int MinorKey() { return 1; }
+
+  const char* GetName() { return "CEntryDebugBreakStub"; }
+};
+
+
+class JSEntryStub : public CodeStub {
+ public:
+  JSEntryStub() { }
+
+  void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
+
+ protected:
+  void GenerateBody(MacroAssembler* masm, bool is_construct);
+
+ private:
+  Major MajorKey() { return JSEntry; }
+  int MinorKey() { return 0; }
+
+  const char* GetName() { return "JSEntryStub"; }
+};
+
+
+class JSConstructEntryStub : public JSEntryStub {
+ public:
+  JSConstructEntryStub() { }
+
+  void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
+
+ private:
+  int MinorKey() { return 1; }
+
+  const char* GetName() { return "JSConstructEntryStub"; }
+};
+
+
+class ArgumentsAccessStub: public CodeStub {
+ public:
+  enum Type {
+    READ_LENGTH,
+    READ_ELEMENT,
+    NEW_OBJECT
+  };
+
+  explicit ArgumentsAccessStub(Type type) : type_(type) { }
+
+ private:
+  Type type_;
+
+  Major MajorKey() { return ArgumentsAccess; }
+  int MinorKey() { return type_; }
+
+  void Generate(MacroAssembler* masm);
+  void GenerateReadLength(MacroAssembler* masm);
+  void GenerateReadElement(MacroAssembler* masm);
+  void GenerateNewObject(MacroAssembler* masm);
+
+  const char* GetName() { return "ArgumentsAccessStub"; }
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("ArgumentsAccessStub (type %d)\n", type_);
+  }
+#endif
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CODEGEN_H_
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
new file mode 100644
index 0000000..8dd9ec1
--- /dev/null
+++ b/src/compilation-cache.cc
@@ -0,0 +1,488 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "compilation-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// The number of sub caches covering the different types to cache.
+static const int kSubCacheCount = 4;
+
+// The number of generations for each sub cache.
+#if defined(ANDROID)
+static const int kScriptGenerations = 1;
+static const int kEvalGlobalGenerations = 1;
+static const int kEvalContextualGenerations = 1;
+static const int kRegExpGenerations = 1;
+#else
+static const int kScriptGenerations = 5;
+static const int kEvalGlobalGenerations = 2;
+static const int kEvalContextualGenerations = 2;
+static const int kRegExpGenerations = 2;
+#endif
+
+// Initial of each compilation cache table allocated.
+static const int kInitialCacheSize = 64;
+
+// The compilation cache consists of several generational sub-caches which uses
+// this class as a base class. A sub-cache contains a compilation cache tables
+// for each generation of the sub-cache. As the same source code string has
+// different compiled code for scripts and evals. Internally, we use separate
+// sub-caches to avoid getting the wrong kind of result when looking up.
+class CompilationSubCache {
+ public:
+  explicit CompilationSubCache(int generations): generations_(generations) {
+    tables_ = NewArray<Object*>(generations);
+  }
+
+  ~CompilationSubCache() { DeleteArray(tables_); }
+
+  // Get the compilation cache tables for a specific generation.
+  Handle<CompilationCacheTable> GetTable(int generation);
+
+  // Age the sub-cache by evicting the oldest generation and creating a new
+  // young generation.
+  void Age();
+
+  // GC support.
+  void Iterate(ObjectVisitor* v);
+
+  // Clear this sub-cache evicting all its content.
+  void Clear();
+
+  // Number of generations in this sub-cache.
+  inline int generations() { return generations_; }
+
+ private:
+  int generations_;  // Number of generations.
+  Object** tables_;  // Compilation cache tables - one for each generation.
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
+};
+
+
+// Sub-cache for scripts.
+class CompilationCacheScript : public CompilationSubCache {
+ public:
+  explicit CompilationCacheScript(int generations)
+      : CompilationSubCache(generations) { }
+
+  Handle<JSFunction> Lookup(Handle<String> source,
+                            Handle<Object> name,
+                            int line_offset,
+                            int column_offset);
+  void Put(Handle<String> source, Handle<JSFunction> boilerplate);
+
+ private:
+  bool HasOrigin(Handle<JSFunction> boilerplate,
+                 Handle<Object> name,
+                 int line_offset,
+                 int column_offset);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
+};
+
+
+// Sub-cache for eval scripts.
+class CompilationCacheEval: public CompilationSubCache {
+ public:
+  explicit CompilationCacheEval(int generations)
+      : CompilationSubCache(generations) { }
+
+  Handle<JSFunction> Lookup(Handle<String> source, Handle<Context> context);
+
+  void Put(Handle<String> source,
+           Handle<Context> context,
+           Handle<JSFunction> boilerplate);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
+};
+
+
+// Sub-cache for regular expressions.
+class CompilationCacheRegExp: public CompilationSubCache {
+ public:
+  explicit CompilationCacheRegExp(int generations)
+      : CompilationSubCache(generations) { }
+
+  Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
+
+  void Put(Handle<String> source,
+           JSRegExp::Flags flags,
+           Handle<FixedArray> data);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
+};
+
+
+// Statically allocate all the sub-caches.
+static CompilationCacheScript script(kScriptGenerations);
+static CompilationCacheEval eval_global(kEvalGlobalGenerations);
+static CompilationCacheEval eval_contextual(kEvalContextualGenerations);
+static CompilationCacheRegExp reg_exp(kRegExpGenerations);
+static CompilationSubCache* subcaches[kSubCacheCount] =
+    {&script, &eval_global, &eval_contextual, &reg_exp};
+
+
+// Current enable state of the compilation cache.
+static bool enabled = true;
+static inline bool IsEnabled() {
+  return FLAG_compilation_cache && enabled;
+}
+
+
+static Handle<CompilationCacheTable> AllocateTable(int size) {
+  CALL_HEAP_FUNCTION(CompilationCacheTable::Allocate(size),
+                     CompilationCacheTable);
+}
+
+
+Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
+  ASSERT(generation < generations_);
+  Handle<CompilationCacheTable> result;
+  if (tables_[generation]->IsUndefined()) {
+    result = AllocateTable(kInitialCacheSize);
+    tables_[generation] = *result;
+  } else {
+    CompilationCacheTable* table =
+        CompilationCacheTable::cast(tables_[generation]);
+    result = Handle<CompilationCacheTable>(table);
+  }
+  return result;
+}
+
+
+void CompilationSubCache::Age() {
+  // Age the generations implicitly killing off the oldest.
+  for (int i = generations_ - 1; i > 0; i--) {
+    tables_[i] = tables_[i - 1];
+  }
+
+  // Set the first generation as unborn.
+  tables_[0] = Heap::undefined_value();
+}
+
+
+void CompilationSubCache::Iterate(ObjectVisitor* v) {
+  v->VisitPointers(&tables_[0], &tables_[generations_]);
+}
+
+
+void CompilationSubCache::Clear() {
+  for (int i = 0; i < generations_; i++) {
+    tables_[i] = Heap::undefined_value();
+  }
+}
+
+
+// We only re-use a cached function for some script source code if the
+// script originates from the same place. This is to avoid issues
+// when reporting errors, etc.
+bool CompilationCacheScript::HasOrigin(Handle<JSFunction> boilerplate,
+                                       Handle<Object> name,
+                                       int line_offset,
+                                       int column_offset) {
+  Handle<Script> script =
+      Handle<Script>(Script::cast(boilerplate->shared()->script()));
+  // If the script name isn't set, the boilerplate script should have
+  // an undefined name to have the same origin.
+  if (name.is_null()) {
+    return script->name()->IsUndefined();
+  }
+  // Do the fast bailout checks first.
+  if (line_offset != script->line_offset()->value()) return false;
+  if (column_offset != script->column_offset()->value()) return false;
+  // Check that both names are strings. If not, no match.
+  if (!name->IsString() || !script->name()->IsString()) return false;
+  // Compare the two name strings for equality.
+  return String::cast(*name)->Equals(String::cast(script->name()));
+}
+
+
+// TODO(245): Need to allow identical code from different contexts to
+// be cached in the same script generation. Currently the first use
+// will be cached, but subsequent code from different source / line
+// won't.
+Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source,
+                                                  Handle<Object> name,
+                                                  int line_offset,
+                                                  int column_offset) {
+  Object* result = NULL;
+  int generation;
+
+  // Probe the script generation tables. Make sure not to leak handles
+  // into the caller's handle scope.
+  { HandleScope scope;
+    for (generation = 0; generation < generations(); generation++) {
+      Handle<CompilationCacheTable> table = GetTable(generation);
+      Handle<Object> probe(table->Lookup(*source));
+      if (probe->IsJSFunction()) {
+        Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(probe);
+        // Break when we've found a suitable boilerplate function that
+        // matches the origin.
+        if (HasOrigin(boilerplate, name, line_offset, column_offset)) {
+          result = *boilerplate;
+          break;
+        }
+      }
+    }
+  }
+
+  static void* script_histogram = StatsTable::CreateHistogram(
+      "V8.ScriptCache",
+      0,
+      kScriptGenerations,
+      kScriptGenerations + 1);
+
+  if (script_histogram != NULL) {
+    // The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss.
+    StatsTable::AddHistogramSample(script_histogram, generation);
+  }
+
+  // Once outside the manacles of the handle scope, we need to recheck
+  // to see if we actually found a cached script. If so, we return a
+  // handle created in the caller's handle scope.
+  if (result != NULL) {
+    Handle<JSFunction> boilerplate(JSFunction::cast(result));
+    ASSERT(HasOrigin(boilerplate, name, line_offset, column_offset));
+    // If the script was found in a later generation, we promote it to
+    // the first generation to let it survive longer in the cache.
+    if (generation != 0) Put(source, boilerplate);
+    Counters::compilation_cache_hits.Increment();
+    return boilerplate;
+  } else {
+    Counters::compilation_cache_misses.Increment();
+    return Handle<JSFunction>::null();
+  }
+}
+
+
+void CompilationCacheScript::Put(Handle<String> source,
+                                 Handle<JSFunction> boilerplate) {
+  HandleScope scope;
+  ASSERT(boilerplate->IsBoilerplate());
+  Handle<CompilationCacheTable> table = GetTable(0);
+  CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
+}
+
+
+Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source,
+                                                Handle<Context> context) {
+  // Make sure not to leak the table into the surrounding handle
+  // scope. Otherwise, we risk keeping old tables around even after
+  // having cleared the cache.
+  Object* result = NULL;
+  int generation;
+  { HandleScope scope;
+    for (generation = 0; generation < generations(); generation++) {
+      Handle<CompilationCacheTable> table = GetTable(generation);
+      result = table->LookupEval(*source, *context);
+      if (result->IsJSFunction()) {
+        break;
+      }
+    }
+  }
+  if (result->IsJSFunction()) {
+    Handle<JSFunction> boilerplate(JSFunction::cast(result));
+    if (generation != 0) {
+      Put(source, context, boilerplate);
+    }
+    Counters::compilation_cache_hits.Increment();
+    return boilerplate;
+  } else {
+    Counters::compilation_cache_misses.Increment();
+    return Handle<JSFunction>::null();
+  }
+}
+
+
+void CompilationCacheEval::Put(Handle<String> source,
+                               Handle<Context> context,
+                               Handle<JSFunction> boilerplate) {
+  HandleScope scope;
+  ASSERT(boilerplate->IsBoilerplate());
+  Handle<CompilationCacheTable> table = GetTable(0);
+  CALL_HEAP_FUNCTION_VOID(table->PutEval(*source, *context, *boilerplate));
+}
+
+
+Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
+                                                  JSRegExp::Flags flags) {
+  // Make sure not to leak the table into the surrounding handle
+  // scope. Otherwise, we risk keeping old tables around even after
+  // having cleared the cache.
+  Object* result = NULL;
+  int generation;
+  { HandleScope scope;
+    for (generation = 0; generation < generations(); generation++) {
+      Handle<CompilationCacheTable> table = GetTable(generation);
+      result = table->LookupRegExp(*source, flags);
+      if (result->IsFixedArray()) {
+        break;
+      }
+    }
+  }
+  if (result->IsFixedArray()) {
+    Handle<FixedArray> data(FixedArray::cast(result));
+    if (generation != 0) {
+      Put(source, flags, data);
+    }
+    Counters::compilation_cache_hits.Increment();
+    return data;
+  } else {
+    Counters::compilation_cache_misses.Increment();
+    return Handle<FixedArray>::null();
+  }
+}
+
+
+void CompilationCacheRegExp::Put(Handle<String> source,
+                                 JSRegExp::Flags flags,
+                                 Handle<FixedArray> data) {
+  HandleScope scope;
+  Handle<CompilationCacheTable> table = GetTable(0);
+  CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
+}
+
+
+Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
+                                                  Handle<Object> name,
+                                                  int line_offset,
+                                                  int column_offset) {
+  if (!IsEnabled()) {
+    return Handle<JSFunction>::null();
+  }
+
+  return script.Lookup(source, name, line_offset, column_offset);
+}
+
+
+Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source,
+                                                Handle<Context> context,
+                                                bool is_global) {
+  if (!IsEnabled()) {
+    return Handle<JSFunction>::null();
+  }
+
+  Handle<JSFunction> result;
+  if (is_global) {
+    result = eval_global.Lookup(source, context);
+  } else {
+    result = eval_contextual.Lookup(source, context);
+  }
+  return result;
+}
+
+
+Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
+                                                  JSRegExp::Flags flags) {
+  if (!IsEnabled()) {
+    return Handle<FixedArray>::null();
+  }
+
+  return reg_exp.Lookup(source, flags);
+}
+
+
+void CompilationCache::PutScript(Handle<String> source,
+                                 Handle<JSFunction> boilerplate) {
+  if (!IsEnabled()) {
+    return;
+  }
+
+  ASSERT(boilerplate->IsBoilerplate());
+  script.Put(source, boilerplate);
+}
+
+
+void CompilationCache::PutEval(Handle<String> source,
+                               Handle<Context> context,
+                               bool is_global,
+                               Handle<JSFunction> boilerplate) {
+  if (!IsEnabled()) {
+    return;
+  }
+
+  HandleScope scope;
+  ASSERT(boilerplate->IsBoilerplate());
+  if (is_global) {
+    eval_global.Put(source, context, boilerplate);
+  } else {
+    eval_contextual.Put(source, context, boilerplate);
+  }
+}
+
+
+
+void CompilationCache::PutRegExp(Handle<String> source,
+                                 JSRegExp::Flags flags,
+                                 Handle<FixedArray> data) {
+  if (!IsEnabled()) {
+    return;
+  }
+
+  reg_exp.Put(source, flags, data);
+}
+
+
+void CompilationCache::Clear() {
+  for (int i = 0; i < kSubCacheCount; i++) {
+    subcaches[i]->Clear();
+  }
+}
+
+
+void CompilationCache::Iterate(ObjectVisitor* v) {
+  for (int i = 0; i < kSubCacheCount; i++) {
+    subcaches[i]->Iterate(v);
+  }
+}
+
+
+void CompilationCache::MarkCompactPrologue() {
+  for (int i = 0; i < kSubCacheCount; i++) {
+    subcaches[i]->Age();
+  }
+}
+
+
+void CompilationCache::Enable() {
+  enabled = true;
+}
+
+
+void CompilationCache::Disable() {
+  enabled = false;
+  Clear();
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
new file mode 100644
index 0000000..3487c08
--- /dev/null
+++ b/src/compilation-cache.h
@@ -0,0 +1,98 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_COMPILATION_CACHE_H_
+#define V8_COMPILATION_CACHE_H_
+
+namespace v8 {
+namespace internal {
+
+
+// The compilation cache keeps function boilerplates for compiled
+// scripts and evals. The boilerplates are looked up using the source
+// string as the key. For regular expressions the compilation data is cached.
+class CompilationCache {
+ public:
+  // Finds the script function boilerplate for a source
+  // string. Returns an empty handle if the cache doesn't contain a
+  // script for the given source string with the right origin.
+  static Handle<JSFunction> LookupScript(Handle<String> source,
+                                         Handle<Object> name,
+                                         int line_offset,
+                                         int column_offset);
+
+  // Finds the function boilerplate for a source string for eval in a
+  // given context.  Returns an empty handle if the cache doesn't
+  // contain a script for the given source string.
+  static Handle<JSFunction> LookupEval(Handle<String> source,
+                                       Handle<Context> context,
+                                       bool is_global);
+
+  // Returns the regexp data associated with the given regexp if it
+  // is in cache, otherwise an empty handle.
+  static Handle<FixedArray> LookupRegExp(Handle<String> source,
+                                         JSRegExp::Flags flags);
+
+  // Associate the (source, kind) pair to the boilerplate. This may
+  // overwrite an existing mapping.
+  static void PutScript(Handle<String> source,
+                        Handle<JSFunction> boilerplate);
+
+  // Associate the (source, context->closure()->shared(), kind) triple
+  // with the boilerplate. This may overwrite an existing mapping.
+  static void PutEval(Handle<String> source,
+                      Handle<Context> context,
+                      bool is_global,
+                      Handle<JSFunction> boilerplate);
+
+  // Associate the (source, flags) pair to the given regexp data.
+  // This may overwrite an existing mapping.
+  static void PutRegExp(Handle<String> source,
+                        JSRegExp::Flags flags,
+                        Handle<FixedArray> data);
+
+  // Clear the cache - also used to initialize the cache at startup.
+  static void Clear();
+
+  // GC support.
+  static void Iterate(ObjectVisitor* v);
+
+  // Notify the cache that a mark-sweep garbage collection is about to
+  // take place. This is used to retire entries from the cache to
+  // avoid keeping them alive too long without using them.
+  static void MarkCompactPrologue();
+
+  // Enable/disable compilation cache. Used by debugger to disable compilation
+  // cache during debugging to make sure new scripts are always compiled.
+  static void Enable();
+  static void Disable();
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_COMPILATION_CACHE_H_
diff --git a/src/compiler.cc b/src/compiler.cc
new file mode 100644
index 0000000..6ba7a9a
--- /dev/null
+++ b/src/compiler.cc
@@ -0,0 +1,420 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "compilation-cache.h"
+#include "compiler.h"
+#include "debug.h"
+#include "oprofile-agent.h"
+#include "rewriter.h"
+#include "scopes.h"
+#include "usage-analyzer.h"
+
+namespace v8 {
+namespace internal {
+
+static Handle<Code> MakeCode(FunctionLiteral* literal,
+                             Handle<Script> script,
+                             Handle<Context> context,
+                             bool is_eval) {
+  ASSERT(literal != NULL);
+
+  // Rewrite the AST by introducing .result assignments where needed.
+  if (!Rewriter::Process(literal) || !AnalyzeVariableUsage(literal)) {
+    // Signal a stack overflow by returning a null handle.  The stack
+    // overflow exception will be thrown by the caller.
+    return Handle<Code>::null();
+  }
+
+  {
+    // Compute top scope and allocate variables. For lazy compilation
+    // the top scope only contains the single lazily compiled function,
+    // so this doesn't re-allocate variables repeatedly.
+    HistogramTimerScope timer(&Counters::variable_allocation);
+    Scope* top = literal->scope();
+    while (top->outer_scope() != NULL) top = top->outer_scope();
+    top->AllocateVariables(context);
+  }
+
+#ifdef DEBUG
+  if (Bootstrapper::IsActive() ?
+      FLAG_print_builtin_scopes :
+      FLAG_print_scopes) {
+    literal->scope()->Print();
+  }
+#endif
+
+  // Optimize the AST.
+  if (!Rewriter::Optimize(literal)) {
+    // Signal a stack overflow by returning a null handle.  The stack
+    // overflow exception will be thrown by the caller.
+    return Handle<Code>::null();
+  }
+
+  // Generate code and return it.
+  Handle<Code> result = CodeGenerator::MakeCode(literal, script, is_eval);
+  return result;
+}
+
+
+static bool IsValidJSON(FunctionLiteral* lit) {
+  if (lit->body()->length() != 1)
+    return false;
+  Statement* stmt = lit->body()->at(0);
+  if (stmt->AsExpressionStatement() == NULL)
+    return false;
+  Expression* expr = stmt->AsExpressionStatement()->expression();
+  return expr->IsValidJSON();
+}
+
+
+static Handle<JSFunction> MakeFunction(bool is_global,
+                                       bool is_eval,
+                                       Compiler::ValidationState validate,
+                                       Handle<Script> script,
+                                       Handle<Context> context,
+                                       v8::Extension* extension,
+                                       ScriptDataImpl* pre_data) {
+  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+
+  PostponeInterruptsScope postpone;
+
+  ASSERT(!i::Top::global_context().is_null());
+  script->set_context_data((*i::Top::global_context())->data());
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  bool is_json = (validate == Compiler::VALIDATE_JSON);
+  if (is_eval || is_json) {
+    script->set_compilation_type(
+        is_json ? Smi::FromInt(Script::COMPILATION_TYPE_JSON) :
+                               Smi::FromInt(Script::COMPILATION_TYPE_EVAL));
+    // For eval scripts add information on the function from which eval was
+    // called.
+    if (is_eval) {
+      JavaScriptFrameIterator it;
+      script->set_eval_from_function(it.frame()->function());
+      int offset = it.frame()->pc() - it.frame()->code()->instruction_start();
+      script->set_eval_from_instructions_offset(Smi::FromInt(offset));
+    }
+  }
+
+  // Notify debugger
+  Debugger::OnBeforeCompile(script);
+#endif
+
+  // Only allow non-global compiles for eval.
+  ASSERT(is_eval || is_global);
+
+  // Build AST.
+  FunctionLiteral* lit = MakeAST(is_global, script, extension, pre_data);
+
+  // Check for parse errors.
+  if (lit == NULL) {
+    ASSERT(Top::has_pending_exception());
+    return Handle<JSFunction>::null();
+  }
+
+  // When parsing JSON we do an ordinary parse and then afterwards
+  // check the AST to ensure it was well-formed.  If not we give a
+  // syntax error.
+  if (validate == Compiler::VALIDATE_JSON && !IsValidJSON(lit)) {
+    HandleScope scope;
+    Handle<JSArray> args = Factory::NewJSArray(1);
+    Handle<Object> source(script->source());
+    SetElement(args, 0, source);
+    Handle<Object> result = Factory::NewSyntaxError("invalid_json", args);
+    Top::Throw(*result, NULL);
+    return Handle<JSFunction>::null();
+  }
+
+  // Measure how long it takes to do the compilation; only take the
+  // rest of the function into account to avoid overlap with the
+  // parsing statistics.
+  HistogramTimer* rate = is_eval
+      ? &Counters::compile_eval
+      : &Counters::compile;
+  HistogramTimerScope timer(rate);
+
+  // Compile the code.
+  Handle<Code> code = MakeCode(lit, script, context, is_eval);
+
+  // Check for stack-overflow exceptions.
+  if (code.is_null()) {
+    Top::StackOverflow();
+    return Handle<JSFunction>::null();
+  }
+
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
+  // Log the code generation for the script. Check explicit whether logging is
+  // to avoid allocating when not required.
+  if (Logger::is_logging() || OProfileAgent::is_enabled()) {
+    if (script->name()->IsString()) {
+      SmartPointer<char> data =
+          String::cast(script->name())->ToCString(DISALLOW_NULLS);
+      LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
+                          *code, *data));
+      OProfileAgent::CreateNativeCodeRegion(*data,
+                                            code->instruction_start(),
+                                            code->instruction_size());
+    } else {
+      LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
+                          *code, ""));
+      OProfileAgent::CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
+                                            code->instruction_start(),
+                                            code->instruction_size());
+    }
+  }
+#endif
+
+  // Allocate function.
+  Handle<JSFunction> fun =
+      Factory::NewFunctionBoilerplate(lit->name(),
+                                      lit->materialized_literal_count(),
+                                      lit->contains_array_literal(),
+                                      code);
+
+  ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
+  CodeGenerator::SetFunctionInfo(fun, lit, true, script);
+
+  // Hint to the runtime system used when allocating space for initial
+  // property space by setting the expected number of properties for
+  // the instances of the function.
+  SetExpectedNofPropertiesFromEstimate(fun, lit->expected_property_count());
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Notify debugger
+  Debugger::OnAfterCompile(script, fun);
+#endif
+
+  return fun;
+}
+
+
+static StaticResource<SafeStringInputBuffer> safe_string_input_buffer;
+
+
+Handle<JSFunction> Compiler::Compile(Handle<String> source,
+                                     Handle<Object> script_name,
+                                     int line_offset, int column_offset,
+                                     v8::Extension* extension,
+                                     ScriptDataImpl* input_pre_data) {
+  int source_length = source->length();
+  Counters::total_load_size.Increment(source_length);
+  Counters::total_compile_size.Increment(source_length);
+
+  // The VM is in the COMPILER state until exiting this function.
+  VMState state(COMPILER);
+
+  // Do a lookup in the compilation cache but not for extensions.
+  Handle<JSFunction> result;
+  if (extension == NULL) {
+    result = CompilationCache::LookupScript(source,
+                                            script_name,
+                                            line_offset,
+                                            column_offset);
+  }
+
+  if (result.is_null()) {
+    // No cache entry found. Do pre-parsing and compile the script.
+    ScriptDataImpl* pre_data = input_pre_data;
+    if (pre_data == NULL && source_length >= FLAG_min_preparse_length) {
+      Access<SafeStringInputBuffer> buf(&safe_string_input_buffer);
+      buf->Reset(source.location());
+      pre_data = PreParse(source, buf.value(), extension);
+    }
+
+    // Create a script object describing the script to be compiled.
+    Handle<Script> script = Factory::NewScript(source);
+    if (!script_name.is_null()) {
+      script->set_name(*script_name);
+      script->set_line_offset(Smi::FromInt(line_offset));
+      script->set_column_offset(Smi::FromInt(column_offset));
+    }
+
+    // Compile the function and add it to the cache.
+    result = MakeFunction(true,
+                          false,
+                          DONT_VALIDATE_JSON,
+                          script,
+                          Handle<Context>::null(),
+                          extension,
+                          pre_data);
+    if (extension == NULL && !result.is_null()) {
+      CompilationCache::PutScript(source, result);
+    }
+
+    // Get rid of the pre-parsing data (if necessary).
+    if (input_pre_data == NULL && pre_data != NULL) {
+      delete pre_data;
+    }
+  }
+
+  if (result.is_null()) Top::ReportPendingMessages();
+  return result;
+}
+
+
+Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
+                                         Handle<Context> context,
+                                         bool is_global,
+                                         ValidationState validate) {
+  // Note that if validation is required then no path through this
+  // function is allowed to return a value without validating that
+  // the input is legal json.
+
+  int source_length = source->length();
+  Counters::total_eval_size.Increment(source_length);
+  Counters::total_compile_size.Increment(source_length);
+
+  // The VM is in the COMPILER state until exiting this function.
+  VMState state(COMPILER);
+
+  // Do a lookup in the compilation cache; if the entry is not there,
+  // invoke the compiler and add the result to the cache.  If we're
+  // evaluating json we bypass the cache since we can't be sure a
+  // potential value in the cache has been validated.
+  Handle<JSFunction> result;
+  if (validate == DONT_VALIDATE_JSON)
+    result = CompilationCache::LookupEval(source, context, is_global);
+
+  if (result.is_null()) {
+    // Create a script object describing the script to be compiled.
+    Handle<Script> script = Factory::NewScript(source);
+    result = MakeFunction(is_global,
+                          true,
+                          validate,
+                          script,
+                          context,
+                          NULL,
+                          NULL);
+    if (!result.is_null() && validate != VALIDATE_JSON) {
+      // For json it's unlikely that we'll ever see exactly the same
+      // string again so we don't use the compilation cache.
+      CompilationCache::PutEval(source, context, is_global, result);
+    }
+  }
+
+  return result;
+}
+
+
+bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
+                           int loop_nesting) {
+  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+
+  // The VM is in the COMPILER state until exiting this function.
+  VMState state(COMPILER);
+
+  PostponeInterruptsScope postpone;
+
+  // Compute name, source code and script data.
+  Handle<String> name(String::cast(shared->name()));
+  Handle<Script> script(Script::cast(shared->script()));
+
+  int start_position = shared->start_position();
+  int end_position = shared->end_position();
+  bool is_expression = shared->is_expression();
+  Counters::total_compile_size.Increment(end_position - start_position);
+
+  // Generate the AST for the lazily compiled function. The AST may be
+  // NULL in case of parser stack overflow.
+  FunctionLiteral* lit = MakeLazyAST(script, name,
+                                     start_position,
+                                     end_position,
+                                     is_expression);
+
+  // Check for parse errors.
+  if (lit == NULL) {
+    ASSERT(Top::has_pending_exception());
+    return false;
+  }
+
+  // Update the loop nesting in the function literal.
+  lit->set_loop_nesting(loop_nesting);
+
+  // Measure how long it takes to do the lazy compilation; only take
+  // the rest of the function into account to avoid overlap with the
+  // lazy parsing statistics.
+  HistogramTimerScope timer(&Counters::compile_lazy);
+
+  // Compile the code.
+  Handle<Code> code = MakeCode(lit, script, Handle<Context>::null(), false);
+
+  // Check for stack-overflow exception.
+  if (code.is_null()) {
+    Top::StackOverflow();
+    return false;
+  }
+
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
+  // Log the code generation. If source information is available include script
+  // name and line number. Check explicit whether logging is enabled as finding
+  // the line number is not for free.
+  if (Logger::is_logging() || OProfileAgent::is_enabled()) {
+    Handle<String> func_name(name->length() > 0 ?
+                             *name : shared->inferred_name());
+    if (script->name()->IsString()) {
+      int line_num = GetScriptLineNumber(script, start_position) + 1;
+      LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name,
+                          String::cast(script->name()), line_num));
+      OProfileAgent::CreateNativeCodeRegion(*func_name,
+                                            String::cast(script->name()),
+                                            line_num,
+                                            code->instruction_start(),
+                                            code->instruction_size());
+    } else {
+      LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name));
+      OProfileAgent::CreateNativeCodeRegion(*func_name,
+                                            code->instruction_start(),
+                                            code->instruction_size());
+    }
+  }
+#endif
+
+  // Update the shared function info with the compiled code.
+  shared->set_code(*code);
+
+  // Set the expected number of properties for instances.
+  SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
+
+  // Set the optimication hints after performing lazy compilation, as these are
+  // not set when the function is set up as a lazily compiled function.
+  shared->SetThisPropertyAssignmentsInfo(
+      lit->has_only_this_property_assignments(),
+      lit->has_only_simple_this_property_assignments(),
+      *lit->this_property_assignments());
+
+  // Check the function has compiled code.
+  ASSERT(shared->is_compiled());
+  return true;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/compiler.h b/src/compiler.h
new file mode 100644
index 0000000..579970b
--- /dev/null
+++ b/src/compiler.h
@@ -0,0 +1,94 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_COMPILER_H_
+#define V8_COMPILER_H_
+
+#include "frame-element.h"
+#include "parser.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// The V8 compiler
+//
+// General strategy: Source code is translated into an anonymous function w/o
+// parameters which then can be executed. If the source code contains other
+// functions, they will be compiled and allocated as part of the compilation
+// of the source code.
+
+// Please note this interface returns function boilerplates.
+// This means you need to call Factory::NewFunctionFromBoilerplate
+// before you have a real function with context.
+
+class Compiler : public AllStatic {
+ public:
+  enum ValidationState { VALIDATE_JSON, DONT_VALIDATE_JSON };
+
+  // All routines return a JSFunction.
+  // If an error occurs an exception is raised and
+  // the return handle contains NULL.
+
+  // Compile a String source within a context.
+  static Handle<JSFunction> Compile(Handle<String> source,
+                                    Handle<Object> script_name,
+                                    int line_offset, int column_offset,
+                                    v8::Extension* extension,
+                                    ScriptDataImpl* script_Data);
+
+  // Compile a String source within a context for Eval.
+  static Handle<JSFunction> CompileEval(Handle<String> source,
+                                        Handle<Context> context,
+                                        bool is_global,
+                                        ValidationState validation);
+
+  // Compile from function info (used for lazy compilation). Returns
+  // true on success and false if the compilation resulted in a stack
+  // overflow.
+  static bool CompileLazy(Handle<SharedFunctionInfo> shared, int loop_nesting);
+};
+
+
+// During compilation we need a global list of handles to constants
+// for frame elements.  When the zone gets deleted, we make sure to
+// clear this list of handles as well.
+class CompilationZoneScope : public ZoneScope {
+ public:
+  explicit CompilationZoneScope(ZoneScopeMode mode) : ZoneScope(mode) { }
+  virtual ~CompilationZoneScope() {
+    if (ShouldDeleteOnExit()) {
+      FrameElement::ClearConstantList();
+      Result::ClearConstantList();
+    }
+  }
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_COMPILER_H_
diff --git a/src/contexts.cc b/src/contexts.cc
new file mode 100644
index 0000000..ead73ee
--- /dev/null
+++ b/src/contexts.cc
@@ -0,0 +1,253 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "debug.h"
+#include "scopeinfo.h"
+
+namespace v8 {
+namespace internal {
+
+JSBuiltinsObject* Context::builtins() {
+  GlobalObject* object = global();
+  if (object->IsJSGlobalObject()) {
+    return JSGlobalObject::cast(object)->builtins();
+  } else {
+    ASSERT(object->IsJSBuiltinsObject());
+    return JSBuiltinsObject::cast(object);
+  }
+}
+
+
+Context* Context::global_context() {
+  // Fast case: the global object for this context has been set.  In
+  // that case, the global object has a direct pointer to the global
+  // context.
+  if (global()->IsGlobalObject()) {
+    return global()->global_context();
+  }
+  // During bootstrapping, the global object might not be set and we
+  // have to search the context chain to find the global context.
+  Context* current = this;
+  while (!current->IsGlobalContext()) {
+    current = Context::cast(JSFunction::cast(current->closure())->context());
+  }
+  return current;
+}
+
+
+JSObject* Context::global_proxy() {
+  return global_context()->global_proxy_object();
+}
+
+void Context::set_global_proxy(JSObject* object) {
+  global_context()->set_global_proxy_object(object);
+}
+
+
+Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
+                               int* index_, PropertyAttributes* attributes) {
+  Handle<Context> context(this);
+
+  bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
+  *index_ = -1;
+  *attributes = ABSENT;
+
+  if (FLAG_trace_contexts) {
+    PrintF("Context::Lookup(");
+    name->ShortPrint();
+    PrintF(")\n");
+  }
+
+  do {
+    if (FLAG_trace_contexts) {
+      PrintF(" - looking in context %p", *context);
+      if (context->IsGlobalContext()) PrintF(" (global context)");
+      PrintF("\n");
+    }
+
+    // check extension/with object
+    if (context->has_extension()) {
+      Handle<JSObject> extension = Handle<JSObject>(context->extension());
+      // Context extension objects needs to behave as if they have no
+      // prototype.  So even if we want to follow prototype chains, we
+      // need to only do a local lookup for context extension objects.
+      if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
+          extension->IsJSContextExtensionObject()) {
+        *attributes = extension->GetLocalPropertyAttribute(*name);
+      } else {
+        *attributes = extension->GetPropertyAttribute(*name);
+      }
+      if (*attributes != ABSENT) {
+        // property found
+        if (FLAG_trace_contexts) {
+          PrintF("=> found property in context object %p\n", *extension);
+        }
+        return extension;
+      }
+    }
+
+    if (context->is_function_context()) {
+      // we have context-local slots
+
+      // check non-parameter locals in context
+      Handle<Code> code(context->closure()->code());
+      Variable::Mode mode;
+      int index = ScopeInfo<>::ContextSlotIndex(*code, *name, &mode);
+      ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
+      if (index >= 0) {
+        // slot found
+        if (FLAG_trace_contexts) {
+          PrintF("=> found local in context slot %d (mode = %d)\n",
+                 index, mode);
+        }
+        *index_ = index;
+        // Note: Fixed context slots are statically allocated by the compiler.
+        // Statically allocated variables always have a statically known mode,
+        // which is the mode with which they were declared when added to the
+        // scope. Thus, the DYNAMIC mode (which corresponds to dynamically
+        // declared variables that were introduced through declaration nodes)
+        // must not appear here.
+        switch (mode) {
+          case Variable::INTERNAL:  // fall through
+          case Variable::VAR: *attributes = NONE; break;
+          case Variable::CONST: *attributes = READ_ONLY; break;
+          case Variable::DYNAMIC: UNREACHABLE(); break;
+          case Variable::DYNAMIC_GLOBAL: UNREACHABLE(); break;
+          case Variable::DYNAMIC_LOCAL: UNREACHABLE(); break;
+          case Variable::TEMPORARY: UNREACHABLE(); break;
+        }
+        return context;
+      }
+
+      // check parameter locals in context
+      int param_index = ScopeInfo<>::ParameterIndex(*code, *name);
+      if (param_index >= 0) {
+        // slot found.
+        int index =
+            ScopeInfo<>::ContextSlotIndex(*code,
+                                          Heap::arguments_shadow_symbol(),
+                                          NULL);
+        ASSERT(index >= 0);  // arguments must exist and be in the heap context
+        Handle<JSObject> arguments(JSObject::cast(context->get(index)));
+        ASSERT(arguments->HasLocalProperty(Heap::length_symbol()));
+        if (FLAG_trace_contexts) {
+          PrintF("=> found parameter %d in arguments object\n", param_index);
+        }
+        *index_ = param_index;
+        *attributes = NONE;
+        return arguments;
+      }
+
+      // check intermediate context (holding only the function name variable)
+      if (follow_context_chain) {
+        int index = ScopeInfo<>::FunctionContextSlotIndex(*code, *name);
+        if (index >= 0) {
+          // slot found
+          if (FLAG_trace_contexts) {
+            PrintF("=> found intermediate function in context slot %d\n",
+                   index);
+          }
+          *index_ = index;
+          *attributes = READ_ONLY;
+          return context;
+        }
+      }
+    }
+
+    // proceed with enclosing context
+    if (context->IsGlobalContext()) {
+      follow_context_chain = false;
+    } else if (context->is_function_context()) {
+      context = Handle<Context>(Context::cast(context->closure()->context()));
+    } else {
+      context = Handle<Context>(context->previous());
+    }
+  } while (follow_context_chain);
+
+  // slot not found
+  if (FLAG_trace_contexts) {
+    PrintF("=> no property/slot found\n");
+  }
+  return Handle<Object>::null();
+}
+
+
+bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
+  Context* context = this;
+
+  // Check that there is no local with the given name in contexts
+  // before the global context and check that there are no context
+  // extension objects (conservative check for with statements).
+  while (!context->IsGlobalContext()) {
+    // Check if the context is a potentially a with context.
+    if (context->has_extension()) return false;
+
+    // Not a with context so it must be a function context.
+    ASSERT(context->is_function_context());
+
+    // Check non-parameter locals.
+    Handle<Code> code(context->closure()->code());
+    Variable::Mode mode;
+    int index = ScopeInfo<>::ContextSlotIndex(*code, *name, &mode);
+    ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
+    if (index >= 0) return false;
+
+    // Check parameter locals.
+    int param_index = ScopeInfo<>::ParameterIndex(*code, *name);
+    if (param_index >= 0) return false;
+
+    // Check context only holding the function name variable.
+    index = ScopeInfo<>::FunctionContextSlotIndex(*code, *name);
+    if (index >= 0) return false;
+    context = Context::cast(context->closure()->context());
+  }
+
+  // No local or potential with statement found so the variable is
+  // global unless it is shadowed by an eval-introduced variable.
+  return true;
+}
+
+
+#ifdef DEBUG
+bool Context::IsBootstrappingOrContext(Object* object) {
+  // During bootstrapping we allow all objects to pass as
+  // contexts. This is necessary to fix circular dependencies.
+  return Bootstrapper::IsActive() || object->IsContext();
+}
+
+
+bool Context::IsBootstrappingOrGlobalObject(Object* object) {
+  // During bootstrapping we allow all objects to pass as global
+  // objects. This is necessary to fix circular dependencies.
+  return Bootstrapper::IsActive() || object->IsGlobalObject();
+}
+#endif
+
+} }  // namespace v8::internal
diff --git a/src/contexts.h b/src/contexts.h
new file mode 100644
index 0000000..bdfc40b
--- /dev/null
+++ b/src/contexts.h
@@ -0,0 +1,343 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CONTEXTS_H_
+#define V8_CONTEXTS_H_
+
+namespace v8 {
+namespace internal {
+
+
+enum ContextLookupFlags {
+  FOLLOW_CONTEXT_CHAIN = 1,
+  FOLLOW_PROTOTYPE_CHAIN = 2,
+
+  DONT_FOLLOW_CHAINS = 0,
+  FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN
+};
+
+
+// Heap-allocated activation contexts.
+//
+// Contexts are implemented as FixedArray objects; the Context
+// class is a convenience interface casted on a FixedArray object.
+//
+// Note: Context must have no virtual functions and Context objects
+// must always be allocated via Heap::AllocateContext() or
+// Factory::NewContext.
+
+// Comment for special_function_table:
+// Table for providing optimized/specialized functions.
+// The array contains triplets [object, general_function, optimized_function].
+// Primarily added to support built-in optimized variants of
+// Array.prototype.{push,pop}.
+
+#define GLOBAL_CONTEXT_FIELDS(V) \
+  V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
+  V(SECURITY_TOKEN_INDEX, Object, security_token) \
+  V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
+  V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
+  V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
+  V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
+  V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
+  V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
+  V(JSON_OBJECT_INDEX, JSObject, json_object) \
+  V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
+  V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
+  V(CREATE_DATE_FUN_INDEX, JSFunction,  create_date_fun) \
+  V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \
+  V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \
+  V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \
+  V(TO_OBJECT_FUN_INDEX, JSFunction, to_object_fun) \
+  V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \
+  V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
+  V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
+  V(TO_BOOLEAN_FUN_INDEX, JSFunction, to_boolean_fun) \
+  V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
+  V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
+  V(FUNCTION_MAP_INDEX, Map, function_map) \
+  V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
+  V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
+  V(SPECIAL_FUNCTION_TABLE_INDEX, FixedArray, special_function_table) \
+  V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
+  V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
+  V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
+  V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
+  V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
+  V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
+  V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
+  V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
+  V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
+    call_as_constructor_delegate) \
+  V(EMPTY_SCRIPT_INDEX, Script, empty_script) \
+  V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
+  V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
+  V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
+  V(MAP_CACHE_INDEX, Object, map_cache) \
+  V(CONTEXT_DATA_INDEX, Object, data)
+
+// JSFunctions are pairs (context, function code), sometimes also called
+// closures. A Context object is used to represent function contexts and
+// dynamically pushed 'with' contexts (or 'scopes' in ECMA-262 speak).
+//
+// At runtime, the contexts build a stack in parallel to the execution
+// stack, with the top-most context being the current context. All contexts
+// have the following slots:
+//
+// [ closure   ]  This is the current function. It is the same for all
+//                contexts inside a function. It provides access to the
+//                incoming context (i.e., the outer context, which may
+//                or may not become the current function's context), and
+//                it provides access to the functions code and thus it's
+//                scope information, which in turn contains the names of
+//                statically allocated context slots. The names are needed
+//                for dynamic lookups in the presence of 'with' or 'eval'.
+//
+// [ fcontext  ]  A pointer to the innermost enclosing function context.
+//                It is the same for all contexts *allocated* inside a
+//                function, and the function context's fcontext points
+//                to itself. It is only needed for fast access of the
+//                function context (used for declarations, and static
+//                context slot access).
+//
+// [ previous  ]  A pointer to the previous context. It is NULL for
+//                function contexts, and non-NULL for 'with' contexts.
+//                Used to implement the 'with' statement.
+//
+// [ extension ]  A pointer to an extension JSObject, or NULL. Used to
+//                implement 'with' statements and dynamic declarations
+//                (through 'eval'). The object in a 'with' statement is
+//                stored in the extension slot of a 'with' context.
+//                Dynamically declared variables/functions are also added
+//                to lazily allocated extension object. Context::Lookup
+//                searches the extension object for properties.
+//
+// [ global    ]  A pointer to the global object. Provided for quick
+//                access to the global object from inside the code (since
+//                we always have a context pointer).
+//
+// In addition, function contexts may have statically allocated context slots
+// to store local variables/functions that are accessed from inner functions
+// (via static context addresses) or through 'eval' (dynamic context lookups).
+// Finally, the global context contains additional slots for fast access to
+// global properties.
+//
+// We may be able to simplify the implementation:
+//
+// - We may be able to get rid of 'fcontext': We can always use the fact that
+//   previous == NULL for function contexts and so we can search for them. They
+//   are only needed when doing dynamic declarations, and the context chains
+//   tend to be very very short (depth of nesting of 'with' statements). At
+//   the moment we also use it in generated code for context slot accesses -
+//   and there we don't want a loop because of code bloat - but we may not
+//   need it there after all (see comment in codegen_*.cc).
+//
+// - If we cannot get rid of fcontext, consider making 'previous' never NULL
+//   except for the global context. This could simplify Context::Lookup.
+
+class Context: public FixedArray {
+ public:
+  // Conversions.
+  static Context* cast(Object* context) {
+    ASSERT(context->IsContext());
+    return reinterpret_cast<Context*>(context);
+  }
+
+  // The default context slot layout; indices are FixedArray slot indices.
+  enum {
+    // These slots are in all contexts.
+    CLOSURE_INDEX,
+    FCONTEXT_INDEX,
+    PREVIOUS_INDEX,
+    EXTENSION_INDEX,
+    GLOBAL_INDEX,
+    MIN_CONTEXT_SLOTS,
+
+    // These slots are only in global contexts.
+    GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
+    SECURITY_TOKEN_INDEX,
+    ARGUMENTS_BOILERPLATE_INDEX,
+    JS_ARRAY_MAP_INDEX,
+    FUNCTION_MAP_INDEX,
+    FUNCTION_INSTANCE_MAP_INDEX,
+    INITIAL_OBJECT_PROTOTYPE_INDEX,
+    BOOLEAN_FUNCTION_INDEX,
+    NUMBER_FUNCTION_INDEX,
+    STRING_FUNCTION_INDEX,
+    OBJECT_FUNCTION_INDEX,
+    ARRAY_FUNCTION_INDEX,
+    DATE_FUNCTION_INDEX,
+    JSON_OBJECT_INDEX,
+    REGEXP_FUNCTION_INDEX,
+    CREATE_DATE_FUN_INDEX,
+    TO_NUMBER_FUN_INDEX,
+    TO_STRING_FUN_INDEX,
+    TO_DETAIL_STRING_FUN_INDEX,
+    TO_OBJECT_FUN_INDEX,
+    TO_INTEGER_FUN_INDEX,
+    TO_UINT32_FUN_INDEX,
+    TO_INT32_FUN_INDEX,
+    TO_BOOLEAN_FUN_INDEX,
+    INSTANTIATE_FUN_INDEX,
+    CONFIGURE_INSTANCE_FUN_INDEX,
+    SPECIAL_FUNCTION_TABLE_INDEX,
+    MESSAGE_LISTENERS_INDEX,
+    MAKE_MESSAGE_FUN_INDEX,
+    GET_STACK_TRACE_LINE_INDEX,
+    CONFIGURE_GLOBAL_INDEX,
+    FUNCTION_CACHE_INDEX,
+    RUNTIME_CONTEXT_INDEX,
+    CALL_AS_FUNCTION_DELEGATE_INDEX,
+    CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
+    EMPTY_SCRIPT_INDEX,
+    SCRIPT_FUNCTION_INDEX,
+    CONTEXT_EXTENSION_FUNCTION_INDEX,
+    OUT_OF_MEMORY_INDEX,
+    MAP_CACHE_INDEX,
+    CONTEXT_DATA_INDEX,
+    GLOBAL_CONTEXT_SLOTS
+  };
+
+  // Direct slot access.
+  JSFunction* closure() { return JSFunction::cast(get(CLOSURE_INDEX)); }
+  void set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); }
+
+  Context* fcontext() { return Context::cast(get(FCONTEXT_INDEX)); }
+  void set_fcontext(Context* context) { set(FCONTEXT_INDEX, context); }
+
+  Context* previous() {
+    Object* result = unchecked_previous();
+    ASSERT(IsBootstrappingOrContext(result));
+    return reinterpret_cast<Context*>(result);
+  }
+  void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
+
+  bool has_extension() { return unchecked_extension() != NULL; }
+  JSObject* extension() { return JSObject::cast(unchecked_extension()); }
+  void set_extension(JSObject* object) { set(EXTENSION_INDEX, object); }
+
+  GlobalObject* global() {
+    Object* result = get(GLOBAL_INDEX);
+    ASSERT(IsBootstrappingOrGlobalObject(result));
+    return reinterpret_cast<GlobalObject*>(result);
+  }
+  void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
+
+  // Returns a JSGlobalProxy object or null.
+  JSObject* global_proxy();
+  void set_global_proxy(JSObject* global);
+
+  // The builtins object.
+  JSBuiltinsObject* builtins();
+
+  // Compute the global context by traversing the context chain.
+  Context* global_context();
+
+  // Tells if this is a function context (as opposed to a 'with' context).
+  bool is_function_context() { return unchecked_previous() == NULL; }
+
+  // Tells whether the global context is marked with out of memory.
+  bool has_out_of_memory() {
+    return global_context()->out_of_memory() == Heap::true_value();
+  }
+
+  // Mark the global context with out of memory.
+  void mark_out_of_memory() {
+    global_context()->set_out_of_memory(Heap::true_value());
+  }
+
+  // The exception holder is the object used as a with object in
+  // the implementation of a catch block.
+  bool is_exception_holder(Object* object) {
+    return IsCatchContext() && extension() == object;
+  }
+
+#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
+  void  set_##name(type* value) {                         \
+    ASSERT(IsGlobalContext());                            \
+    set(index, value);                                    \
+  }                                                       \
+  type* name() {                                          \
+    ASSERT(IsGlobalContext());                            \
+    return type::cast(get(index));                        \
+  }
+  GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS)
+#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
+
+  // Lookup the the slot called name, starting with the current context.
+  // There are 4 possible outcomes:
+  //
+  // 1) index_ >= 0 && result->IsContext():
+  //    most common case, the result is a Context, and index is the
+  //    context slot index, and the slot exists.
+  //    attributes == READ_ONLY for the function name variable, NONE otherwise.
+  //
+  // 2) index_ >= 0 && result->IsJSObject():
+  //    the result is the JSObject arguments object, the index is the parameter
+  //    index, i.e., key into the arguments object, and the property exists.
+  //    attributes != ABSENT.
+  //
+  // 3) index_ < 0 && result->IsJSObject():
+  //    the result is the JSObject extension context or the global object,
+  //    and the name is the property name, and the property exists.
+  //    attributes != ABSENT.
+  //
+  // 4) index_ < 0 && result.is_null():
+  //    there was no context found with the corresponding property.
+  //    attributes == ABSENT.
+  Handle<Object> Lookup(Handle<String> name, ContextLookupFlags flags,
+                        int* index_, PropertyAttributes* attributes);
+
+  // Determine if a local variable with the given name exists in a
+  // context.  Do not consider context extension objects.  This is
+  // used for compiling code using eval.  If the context surrounding
+  // the eval call does not have a local variable with this name and
+  // does not contain a with statement the property is global unless
+  // it is shadowed by a property in an extension object introduced by
+  // eval.
+  bool GlobalIfNotShadowedByEval(Handle<String> name);
+
+  // Code generation support.
+  static int SlotOffset(int index) {
+    return kHeaderSize + index * kPointerSize - kHeapObjectTag;
+  }
+
+ private:
+  // Unchecked access to the slots.
+  Object* unchecked_previous() { return get(PREVIOUS_INDEX); }
+  Object* unchecked_extension() { return get(EXTENSION_INDEX); }
+
+#ifdef DEBUG
+  // Bootstrapping-aware type checks.
+  static bool IsBootstrappingOrContext(Object* object);
+  static bool IsBootstrappingOrGlobalObject(Object* object);
+#endif
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_CONTEXTS_H_
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
new file mode 100644
index 0000000..8c875d7
--- /dev/null
+++ b/src/conversions-inl.h
@@ -0,0 +1,95 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CONVERSIONS_INL_H_
+#define V8_CONVERSIONS_INL_H_
+
+#include <math.h>
+#include <float.h>         // required for DBL_MAX and on Win32 for finite()
+#include <stdarg.h>
+
+// ----------------------------------------------------------------------------
+// Extra POSIX/ANSI functions for Win32/MSVC.
+
+#include "conversions.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+// The fast double-to-int conversion routine does not guarantee
+// rounding towards zero.
+static inline int FastD2I(double x) {
+#ifdef __USE_ISOC99
+  // The ISO C99 standard defines the lrint() function which rounds a
+  // double to an integer according to the current rounding direction.
+  return lrint(x);
+#else
+  // This is incredibly slow on Intel x86. The reason is that rounding
+  // towards zero is implied by the C standard. This means that the
+  // status register of the FPU has to be changed with the 'fldcw'
+  // instruction. This completely stalls the pipeline and takes many
+  // hundreds of clock cycles.
+  return static_cast<int>(x);
+#endif
+}
+
+
+static inline double DoubleToInteger(double x) {
+  if (isnan(x)) return 0;
+  if (!isfinite(x) || x == 0) return x;
+  return (x >= 0) ? floor(x) : ceil(x);
+}
+
+
+int32_t NumberToInt32(Object* number) {
+  if (number->IsSmi()) return Smi::cast(number)->value();
+  return DoubleToInt32(number->Number());
+}
+
+
+uint32_t NumberToUint32(Object* number) {
+  if (number->IsSmi()) return Smi::cast(number)->value();
+  return DoubleToUint32(number->Number());
+}
+
+
+int32_t DoubleToInt32(double x) {
+  int32_t i = FastD2I(x);
+  if (FastI2D(i) == x) return i;
+  static const double two32 = 4294967296.0;
+  static const double two31 = 2147483648.0;
+  if (!isfinite(x) || x == 0) return 0;
+  if (x < 0 || x >= two32) x = fmod(x, two32);
+  x = (x >= 0) ? floor(x) : ceil(x) + two32;
+  return (int32_t) ((x >= two31) ? x - two32 : x);
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_CONVERSIONS_INL_H_
diff --git a/src/conversions.cc b/src/conversions.cc
new file mode 100644
index 0000000..2a3db7b
--- /dev/null
+++ b/src/conversions.cc
@@ -0,0 +1,708 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "conversions-inl.h"
+#include "factory.h"
+#include "scanner.h"
+
+namespace v8 {
+namespace internal {
+
+int HexValue(uc32 c) {
+  if ('0' <= c && c <= '9')
+    return c - '0';
+  if ('a' <= c && c <= 'f')
+    return c - 'a' + 10;
+  if ('A' <= c && c <= 'F')
+    return c - 'A' + 10;
+  return -1;
+}
+
+
+// Provide a common interface to getting a character at a certain
+// index from a char* or a String object.
+static inline int GetChar(const char* str, int index) {
+  ASSERT(index >= 0 && index < static_cast<int>(strlen(str)));
+  return str[index];
+}
+
+
+static inline int GetChar(String* str, int index) {
+  return str->Get(index);
+}
+
+
+static inline int GetLength(const char* str) {
+  return strlen(str);
+}
+
+
+static inline int GetLength(String* str) {
+  return str->length();
+}
+
+
+static inline const char* GetCString(const char* str, int index) {
+  return str + index;
+}
+
+
+static inline const char* GetCString(String* str, int index) {
+  int length = str->length();
+  char* result = NewArray<char>(length + 1);
+  for (int i = index; i < length; i++) {
+    uc16 c = str->Get(i);
+    if (c <= 127) {
+      result[i - index] = static_cast<char>(c);
+    } else {
+      result[i - index] = 127;  // Force number parsing to fail.
+    }
+  }
+  result[length - index] = '\0';
+  return result;
+}
+
+
+static inline void ReleaseCString(const char* original, const char* str) {
+}
+
+
+static inline void ReleaseCString(String* original, const char* str) {
+  DeleteArray(const_cast<char *>(str));
+}
+
+
+static inline bool IsSpace(const char* str, int index) {
+  ASSERT(index >= 0 && index < static_cast<int>(strlen(str)));
+  return Scanner::kIsWhiteSpace.get(str[index]);
+}
+
+
+static inline bool IsSpace(String* str, int index) {
+  return Scanner::kIsWhiteSpace.get(str->Get(index));
+}
+
+
+static inline bool SubStringEquals(const char* str,
+                                   int index,
+                                   const char* other) {
+  return strncmp(str + index, other, strlen(other)) != 0;
+}
+
+
+static inline bool SubStringEquals(String* str, int index, const char* other) {
+  HandleScope scope;
+  int str_length = str->length();
+  int other_length = strlen(other);
+  int end = index + other_length < str_length ?
+            index + other_length :
+            str_length;
+  Handle<String> slice =
+      Factory::NewStringSlice(Handle<String>(str), index, end);
+  return slice->IsEqualTo(Vector<const char>(other, other_length));
+}
+
+
+// Check if a string should be parsed as an octal number.  The string
+// can be either a char* or a String*.
+template<class S>
+static bool ShouldParseOctal(S* s, int i) {
+  int index = i;
+  int len = GetLength(s);
+  if (index < len && GetChar(s, index) != '0') return false;
+
+  // If the first real character (following '0') is not an octal
+  // digit, bail out early. This also takes care of numbers of the
+  // forms 0.xxx and 0exxx by not allowing the first 0 to be
+  // interpreted as an octal.
+  index++;
+  if (index < len) {
+    int d = GetChar(s, index) - '0';
+    if (d < 0 || d > 7) return false;
+  } else {
+    return false;
+  }
+
+  // Traverse all digits (including the first). If there is an octal
+  // prefix which is not a part of a longer decimal prefix, we return
+  // true. Otherwise, false is returned.
+  while (index < len) {
+    int d = GetChar(s, index++) - '0';
+    if (d == 8 || d == 9) return false;
+    if (d <  0 || d >  7) return true;
+  }
+  return true;
+}
+
+
+extern "C" double gay_strtod(const char* s00, const char** se);
+
+
+// Parse an int from a string starting a given index and in a given
+// radix.  The string can be either a char* or a String*.
+template <class S>
+static int InternalStringToInt(S* s, int i, int radix, double* value) {
+  int len = GetLength(s);
+
+  // Setup limits for computing the value.
+  ASSERT(2 <= radix && radix <= 36);
+  int lim_0 = '0' + (radix < 10 ? radix : 10);
+  int lim_a = 'a' + (radix - 10);
+  int lim_A = 'A' + (radix - 10);
+
+  // NOTE: The code for computing the value may seem a bit complex at
+  // first glance. It is structured to use 32-bit multiply-and-add
+  // loops as long as possible to avoid loosing precision.
+
+  double v = 0.0;
+  int j;
+  for (j = i; j < len;) {
+    // Parse the longest part of the string starting at index j
+    // possible while keeping the multiplier, and thus the part
+    // itself, within 32 bits.
+    uint32_t part = 0, multiplier = 1;
+    int k;
+    for (k = j; k < len; k++) {
+      int c = GetChar(s, k);
+      if (c >= '0' && c < lim_0) {
+        c = c - '0';
+      } else if (c >= 'a' && c < lim_a) {
+        c = c - 'a' + 10;
+      } else if (c >= 'A' && c < lim_A) {
+        c = c - 'A' + 10;
+      } else {
+        break;
+      }
+
+      // Update the value of the part as long as the multiplier fits
+      // in 32 bits. When we can't guarantee that the next iteration
+      // will not overflow the multiplier, we stop parsing the part
+      // by leaving the loop.
+      static const uint32_t kMaximumMultiplier = 0xffffffffU / 36;
+      uint32_t m = multiplier * radix;
+      if (m > kMaximumMultiplier) break;
+      part = part * radix + c;
+      multiplier = m;
+      ASSERT(multiplier > part);
+    }
+
+    // Compute the number of part digits. If no digits were parsed;
+    // we're done parsing the entire string.
+    int digits = k - j;
+    if (digits == 0) break;
+
+    // Update the value and skip the part in the string.
+    ASSERT(multiplier ==
+           pow(static_cast<double>(radix), static_cast<double>(digits)));
+    v = v * multiplier + part;
+    j = k;
+  }
+
+  // If the resulting value is larger than 2^53 the value does not fit
+  // in the mantissa of the double and there is a loss of precision.
+  // When the value is larger than 2^53 the rounding depends on the
+  // code generation.  If the code generator spills the double value
+  // it uses 64 bits and if it does not it uses 80 bits.
+  //
+  // If there is a potential for overflow we resort to strtod for
+  // radix 10 numbers to get higher precision.  For numbers in another
+  // radix we live with the loss of precision.
+  static const double kPreciseConversionLimit = 9007199254740992.0;
+  if (radix == 10 && v > kPreciseConversionLimit) {
+    const char* cstr = GetCString(s, i);
+    const char* end;
+    v = gay_strtod(cstr, &end);
+    ReleaseCString(s, cstr);
+  }
+
+  *value = v;
+  return j;
+}
+
+
+int StringToInt(String* str, int index, int radix, double* value) {
+  return InternalStringToInt(str, index, radix, value);
+}
+
+
+int StringToInt(const char* str, int index, int radix, double* value) {
+  return InternalStringToInt(const_cast<char*>(str), index, radix, value);
+}
+
+
+static const double JUNK_STRING_VALUE = OS::nan_value();
+
+
+// Convert a string to a double value.  The string can be either a
+// char* or a String*.
+template<class S>
+static double InternalStringToDouble(S* str,
+                                     int flags,
+                                     double empty_string_val) {
+  double result = 0.0;
+  int index = 0;
+
+  int len = GetLength(str);
+
+  // Skip leading spaces.
+  while ((index < len) && IsSpace(str, index)) index++;
+
+  // Is the string empty?
+  if (index >= len) return empty_string_val;
+
+  // Get the first character.
+  uint16_t first = GetChar(str, index);
+
+  // Numbers can only start with '-', '+', '.', 'I' (Infinity), or a digit.
+  if (first != '-' && first != '+' && first != '.' && first != 'I' &&
+      (first > '9' || first < '0')) {
+    return JUNK_STRING_VALUE;
+  }
+
+  // Compute sign of result based on first character.
+  int sign = 1;
+  if (first == '-') {
+    sign = -1;
+    index++;
+    // String only containing a '-' are junk chars.
+    if (index == len) return JUNK_STRING_VALUE;
+  }
+
+  // do we have a hex number?
+  // (since the string is 0-terminated, it's ok to look one char beyond the end)
+  if ((flags & ALLOW_HEX) != 0 &&
+      (index + 1) < len &&
+      GetChar(str, index) == '0' &&
+      (GetChar(str, index + 1) == 'x' || GetChar(str, index + 1) == 'X')) {
+    index += 2;
+    index = StringToInt(str, index, 16, &result);
+  } else if ((flags & ALLOW_OCTALS) != 0 && ShouldParseOctal(str, index)) {
+    // NOTE: We optimistically try to parse the number as an octal (if
+    // we're allowed to), even though this is not as dictated by
+    // ECMA-262. The reason for doing this is compatibility with IE and
+    // Firefox.
+    index = StringToInt(str, index, 8, &result);
+  } else {
+    const char* cstr = GetCString(str, index);
+    const char* end;
+    // Optimistically parse the number and then, if that fails,
+    // check if it might have been {+,-,}Infinity.
+    result = gay_strtod(cstr, &end);
+    ReleaseCString(str, cstr);
+    if (result != 0.0 || end != cstr) {
+      // It appears that strtod worked
+      index += end - cstr;
+    } else {
+      // Check for {+,-,}Infinity
+      bool is_negative = (GetChar(str, index) == '-');
+      if (GetChar(str, index) == '+' || GetChar(str, index) == '-')
+        index++;
+      if (!SubStringEquals(str, index, "Infinity"))
+        return JUNK_STRING_VALUE;
+      result = is_negative ? -V8_INFINITY : V8_INFINITY;
+      index += 8;
+    }
+  }
+
+  if ((flags & ALLOW_TRAILING_JUNK) == 0) {
+    // skip trailing spaces
+    while ((index < len) && IsSpace(str, index)) index++;
+    // string ending with junk?
+    if (index < len) return JUNK_STRING_VALUE;
+  }
+
+  return sign * result;
+}
+
+
+double StringToDouble(String* str, int flags, double empty_string_val) {
+  return InternalStringToDouble(str, flags, empty_string_val);
+}
+
+
+double StringToDouble(const char* str, int flags, double empty_string_val) {
+  return InternalStringToDouble(str, flags, empty_string_val);
+}
+
+
+extern "C" char* dtoa(double d, int mode, int ndigits,
+                      int* decpt, int* sign, char** rve);
+
+extern "C" void freedtoa(char* s);
+
+const char* DoubleToCString(double v, Vector<char> buffer) {
+  StringBuilder builder(buffer.start(), buffer.length());
+
+  switch (fpclassify(v)) {
+    case FP_NAN:
+      builder.AddString("NaN");
+      break;
+
+    case FP_INFINITE:
+      if (v < 0.0) {
+        builder.AddString("-Infinity");
+      } else {
+        builder.AddString("Infinity");
+      }
+      break;
+
+    case FP_ZERO:
+      builder.AddCharacter('0');
+      break;
+
+    default: {
+      int decimal_point;
+      int sign;
+
+      char* decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
+      int length = strlen(decimal_rep);
+
+      if (sign) builder.AddCharacter('-');
+
+      if (length <= decimal_point && decimal_point <= 21) {
+        // ECMA-262 section 9.8.1 step 6.
+        builder.AddString(decimal_rep);
+        builder.AddPadding('0', decimal_point - length);
+
+      } else if (0 < decimal_point && decimal_point <= 21) {
+        // ECMA-262 section 9.8.1 step 7.
+        builder.AddSubstring(decimal_rep, decimal_point);
+        builder.AddCharacter('.');
+        builder.AddString(decimal_rep + decimal_point);
+
+      } else if (decimal_point <= 0 && decimal_point > -6) {
+        // ECMA-262 section 9.8.1 step 8.
+        builder.AddString("0.");
+        builder.AddPadding('0', -decimal_point);
+        builder.AddString(decimal_rep);
+
+      } else {
+        // ECMA-262 section 9.8.1 step 9 and 10 combined.
+        builder.AddCharacter(decimal_rep[0]);
+        if (length != 1) {
+          builder.AddCharacter('.');
+          builder.AddString(decimal_rep + 1);
+        }
+        builder.AddCharacter('e');
+        builder.AddCharacter((decimal_point >= 0) ? '+' : '-');
+        int exponent = decimal_point - 1;
+        if (exponent < 0) exponent = -exponent;
+        builder.AddFormatted("%d", exponent);
+      }
+
+      freedtoa(decimal_rep);
+    }
+  }
+  return builder.Finalize();
+}
+
+
+const char* IntToCString(int n, Vector<char> buffer) {
+  bool negative = false;
+  if (n < 0) {
+    // We must not negate the most negative int.
+    if (n == kMinInt) return DoubleToCString(n, buffer);
+    negative = true;
+    n = -n;
+  }
+  // Build the string backwards from the least significant digit.
+  int i = buffer.length();
+  buffer[--i] = '\0';
+  do {
+    buffer[--i] = '0' + (n % 10);
+    n /= 10;
+  } while (n);
+  if (negative) buffer[--i] = '-';
+  return buffer.start() + i;
+}
+
+
+char* DoubleToFixedCString(double value, int f) {
+  ASSERT(f >= 0);
+
+  bool negative = false;
+  double abs_value = value;
+  if (value < 0) {
+    abs_value = -value;
+    negative = true;
+  }
+
+  if (abs_value >= 1e21) {
+    char arr[100];
+    Vector<char> buffer(arr, ARRAY_SIZE(arr));
+    return StrDup(DoubleToCString(value, buffer));
+  }
+
+  // Find a sufficiently precise decimal representation of n.
+  int decimal_point;
+  int sign;
+  char* decimal_rep = dtoa(abs_value, 3, f, &decimal_point, &sign, NULL);
+  int decimal_rep_length = strlen(decimal_rep);
+
+  // Create a representation that is padded with zeros if needed.
+  int zero_prefix_length = 0;
+  int zero_postfix_length = 0;
+
+  if (decimal_point <= 0) {
+    zero_prefix_length = -decimal_point + 1;
+    decimal_point = 1;
+  }
+
+  if (zero_prefix_length + decimal_rep_length < decimal_point + f) {
+    zero_postfix_length = decimal_point + f - decimal_rep_length -
+                          zero_prefix_length;
+  }
+
+  unsigned rep_length =
+      zero_prefix_length + decimal_rep_length + zero_postfix_length;
+  StringBuilder rep_builder(rep_length + 1);
+  rep_builder.AddPadding('0', zero_prefix_length);
+  rep_builder.AddString(decimal_rep);
+  rep_builder.AddPadding('0', zero_postfix_length);
+  char* rep = rep_builder.Finalize();
+  freedtoa(decimal_rep);
+
+  // Create the result string by appending a minus and putting in a
+  // decimal point if needed.
+  unsigned result_size = decimal_point + f + 2;
+  StringBuilder builder(result_size + 1);
+  if (negative) builder.AddCharacter('-');
+  builder.AddSubstring(rep, decimal_point);
+  if (f > 0) {
+    builder.AddCharacter('.');
+    builder.AddSubstring(rep + decimal_point, f);
+  }
+  DeleteArray(rep);
+  return builder.Finalize();
+}
+
+
+static char* CreateExponentialRepresentation(char* decimal_rep,
+                                             int exponent,
+                                             bool negative,
+                                             int significant_digits) {
+  bool negative_exponent = false;
+  if (exponent < 0) {
+    negative_exponent = true;
+    exponent = -exponent;
+  }
+
+  // Leave room in the result for appending a minus, for a period, the
+  // letter 'e', a minus or a plus depending on the exponent, and a
+  // three digit exponent.
+  unsigned result_size = significant_digits + 7;
+  StringBuilder builder(result_size + 1);
+
+  if (negative) builder.AddCharacter('-');
+  builder.AddCharacter(decimal_rep[0]);
+  if (significant_digits != 1) {
+    builder.AddCharacter('.');
+    builder.AddString(decimal_rep + 1);
+    builder.AddPadding('0', significant_digits - strlen(decimal_rep));
+  }
+
+  builder.AddCharacter('e');
+  builder.AddCharacter(negative_exponent ? '-' : '+');
+  builder.AddFormatted("%d", exponent);
+  return builder.Finalize();
+}
+
+
+
+char* DoubleToExponentialCString(double value, int f) {
+  // f might be -1 to signal that f was undefined in JavaScript.
+  ASSERT(f >= -1 && f <= 20);
+
+  bool negative = false;
+  if (value < 0) {
+    value = -value;
+    negative = true;
+  }
+
+  // Find a sufficiently precise decimal representation of n.
+  int decimal_point;
+  int sign;
+  char* decimal_rep = NULL;
+  if (f == -1) {
+    decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
+    f = strlen(decimal_rep) - 1;
+  } else {
+    decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
+  }
+  int decimal_rep_length = strlen(decimal_rep);
+  ASSERT(decimal_rep_length > 0);
+  ASSERT(decimal_rep_length <= f + 1);
+  USE(decimal_rep_length);
+
+  int exponent = decimal_point - 1;
+  char* result =
+      CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1);
+
+  freedtoa(decimal_rep);
+
+  return result;
+}
+
+
+char* DoubleToPrecisionCString(double value, int p) {
+  ASSERT(p >= 1 && p <= 21);
+
+  bool negative = false;
+  if (value < 0) {
+    value = -value;
+    negative = true;
+  }
+
+  // Find a sufficiently precise decimal representation of n.
+  int decimal_point;
+  int sign;
+  char* decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
+  int decimal_rep_length = strlen(decimal_rep);
+  ASSERT(decimal_rep_length <= p);
+
+  int exponent = decimal_point - 1;
+
+  char* result = NULL;
+
+  if (exponent < -6 || exponent >= p) {
+    result =
+        CreateExponentialRepresentation(decimal_rep, exponent, negative, p);
+  } else {
+    // Use fixed notation.
+    //
+    // Leave room in the result for appending a minus, a period and in
+    // the case where decimal_point is not positive for a zero in
+    // front of the period.
+    unsigned result_size = (decimal_point <= 0)
+        ? -decimal_point + p + 3
+        : p + 2;
+    StringBuilder builder(result_size + 1);
+    if (negative) builder.AddCharacter('-');
+    if (decimal_point <= 0) {
+      builder.AddString("0.");
+      builder.AddPadding('0', -decimal_point);
+      builder.AddString(decimal_rep);
+      builder.AddPadding('0', p - decimal_rep_length);
+    } else {
+      const int m = Min(decimal_rep_length, decimal_point);
+      builder.AddSubstring(decimal_rep, m);
+      builder.AddPadding('0', decimal_point - decimal_rep_length);
+      if (decimal_point < p) {
+        builder.AddCharacter('.');
+        const int extra = negative ? 2 : 1;
+        if (decimal_rep_length > decimal_point) {
+          const int len = strlen(decimal_rep + decimal_point);
+          const int n = Min(len, p - (builder.position() - extra));
+          builder.AddSubstring(decimal_rep + decimal_point, n);
+        }
+        builder.AddPadding('0', extra + (p - builder.position()));
+      }
+    }
+    result = builder.Finalize();
+  }
+
+  freedtoa(decimal_rep);
+  return result;
+}
+
+
+char* DoubleToRadixCString(double value, int radix) {
+  ASSERT(radix >= 2 && radix <= 36);
+
+  // Character array used for conversion.
+  static const char chars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+
+  // Buffer for the integer part of the result. 1024 chars is enough
+  // for max integer value in radix 2.  We need room for a sign too.
+  static const int kBufferSize = 1100;
+  char integer_buffer[kBufferSize];
+  integer_buffer[kBufferSize - 1] = '\0';
+
+  // Buffer for the decimal part of the result.  We only generate up
+  // to kBufferSize - 1 chars for the decimal part.
+  char decimal_buffer[kBufferSize];
+  decimal_buffer[kBufferSize - 1] = '\0';
+
+  // Make sure the value is positive.
+  bool is_negative = value < 0.0;
+  if (is_negative) value = -value;
+
+  // Get the integer part and the decimal part.
+  double integer_part = floor(value);
+  double decimal_part = value - integer_part;
+
+  // Convert the integer part starting from the back.  Always generate
+  // at least one digit.
+  int integer_pos = kBufferSize - 2;
+  do {
+    integer_buffer[integer_pos--] =
+        chars[static_cast<int>(fmod(integer_part, radix))];
+    integer_part /= radix;
+  } while (integer_part >= 1.0);
+  // Sanity check.
+  ASSERT(integer_pos > 0);
+  // Add sign if needed.
+  if (is_negative) integer_buffer[integer_pos--] = '-';
+
+  // Convert the decimal part.  Repeatedly multiply by the radix to
+  // generate the next char.  Never generate more than kBufferSize - 1
+  // chars.
+  //
+  // TODO(1093998): We will often generate a full decimal_buffer of
+  // chars because hitting zero will often not happen.  The right
+  // solution would be to continue until the string representation can
+  // be read back and yield the original value.  To implement this
+  // efficiently, we probably have to modify dtoa.
+  int decimal_pos = 0;
+  while ((decimal_part > 0.0) && (decimal_pos < kBufferSize - 1)) {
+    decimal_part *= radix;
+    decimal_buffer[decimal_pos++] =
+        chars[static_cast<int>(floor(decimal_part))];
+    decimal_part -= floor(decimal_part);
+  }
+  decimal_buffer[decimal_pos] = '\0';
+
+  // Compute the result size.
+  int integer_part_size = kBufferSize - 2 - integer_pos;
+  // Make room for zero termination.
+  unsigned result_size = integer_part_size + decimal_pos;
+  // If the number has a decimal part, leave room for the period.
+  if (decimal_pos > 0) result_size++;
+  // Allocate result and fill in the parts.
+  StringBuilder builder(result_size + 1);
+  builder.AddSubstring(integer_buffer + integer_pos + 1, integer_part_size);
+  if (decimal_pos > 0) builder.AddCharacter('.');
+  builder.AddSubstring(decimal_buffer, decimal_pos);
+  return builder.Finalize();
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/conversions.h b/src/conversions.h
new file mode 100644
index 0000000..b6589cb
--- /dev/null
+++ b/src/conversions.h
@@ -0,0 +1,117 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CONVERSIONS_H_
+#define V8_CONVERSIONS_H_
+
+namespace v8 {
+namespace internal {
+
+// The fast double-to-int conversion routine does not guarantee
+// rounding towards zero.
+// The result is unspecified if x is infinite or NaN, or if the rounded
+// integer value is outside the range of type int.
+static inline int FastD2I(double x);
+
+
+static inline double FastI2D(int x) {
+  // There is no rounding involved in converting an integer to a
+  // double, so this code should compile to a few instructions without
+  // any FPU pipeline stalls.
+  return static_cast<double>(x);
+}
+
+
+static inline double FastUI2D(unsigned x) {
+  // There is no rounding involved in converting an unsigned integer to a
+  // double, so this code should compile to a few instructions without
+  // any FPU pipeline stalls.
+  return static_cast<double>(x);
+}
+
+
+// This function should match the exact semantics of ECMA-262 9.4.
+static inline double DoubleToInteger(double x);
+
+
+// This function should match the exact semantics of ECMA-262 9.5.
+static inline int32_t DoubleToInt32(double x);
+
+
+// This function should match the exact semantics of ECMA-262 9.6.
+static inline uint32_t DoubleToUint32(double x) {
+  return static_cast<uint32_t>(DoubleToInt32(x));
+}
+
+
+// Returns the value (0 .. 15) of a hexadecimal character c.
+// If c is not a legal hexadecimal character, returns a value < 0.
+int HexValue(uc32 c);
+
+
+// Enumeration for allowing octals and ignoring junk when converting
+// strings to numbers.
+enum ConversionFlags {
+  NO_FLAGS = 0,
+  ALLOW_HEX = 1,
+  ALLOW_OCTALS = 2,
+  ALLOW_TRAILING_JUNK = 4
+};
+
+
+// Convert from Number object to C integer.
+static inline int32_t NumberToInt32(Object* number);
+static inline uint32_t NumberToUint32(Object* number);
+
+
+// Converts a string into a double value according to ECMA-262 9.3.1
+double StringToDouble(const char* str, int flags, double empty_string_val = 0);
+double StringToDouble(String* str, int flags, double empty_string_val = 0);
+
+// Converts a string into an integer.
+int StringToInt(String* str, int index, int radix, double* value);
+int StringToInt(const char* str, int index, int radix, double* value);
+
+// Converts a double to a string value according to ECMA-262 9.8.1.
+// The buffer should be large enough for any floating point number.
+// 100 characters is enough.
+const char* DoubleToCString(double value, Vector<char> buffer);
+
+// Convert an int to a null-terminated string. The returned string is
+// located inside the buffer, but not necessarily at the start.
+const char* IntToCString(int n, Vector<char> buffer);
+
+// Additional number to string conversions for the number type.
+// The caller is responsible for calling free on the returned pointer.
+char* DoubleToFixedCString(double value, int f);
+char* DoubleToExponentialCString(double value, int f);
+char* DoubleToPrecisionCString(double value, int f);
+char* DoubleToRadixCString(double value, int radix);
+
+} }  // namespace v8::internal
+
+#endif  // V8_CONVERSIONS_H_
diff --git a/src/counters.cc b/src/counters.cc
new file mode 100644
index 0000000..239a5f7
--- /dev/null
+++ b/src/counters.cc
@@ -0,0 +1,78 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "counters.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+CounterLookupCallback StatsTable::lookup_function_ = NULL;
+CreateHistogramCallback StatsTable::create_histogram_function_ = NULL;
+AddHistogramSampleCallback StatsTable::add_histogram_sample_function_ = NULL;
+
+// Start the timer.
+void StatsCounterTimer::Start() {
+  if (!counter_.Enabled())
+    return;
+  stop_time_ = 0;
+  start_time_ = OS::Ticks();
+}
+
+// Stop the timer and record the results.
+void StatsCounterTimer::Stop() {
+  if (!counter_.Enabled())
+    return;
+  stop_time_ = OS::Ticks();
+
+  // Compute the delta between start and stop, in milliseconds.
+  int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
+  counter_.Increment(milliseconds);
+}
+
+// Start the timer.
+void HistogramTimer::Start() {
+  if (GetHistogram() != NULL) {
+    stop_time_ = 0;
+    start_time_ = OS::Ticks();
+  }
+}
+
+// Stop the timer and record the results.
+void HistogramTimer::Stop() {
+  if (histogram_ != NULL) {
+    stop_time_ = OS::Ticks();
+
+    // Compute the delta between start and stop, in milliseconds.
+    int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
+    StatsTable::AddHistogramSample(histogram_, milliseconds);
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/counters.h b/src/counters.h
new file mode 100644
index 0000000..5f4dca9
--- /dev/null
+++ b/src/counters.h
@@ -0,0 +1,239 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_COUNTERS_H_
+#define V8_COUNTERS_H_
+
+namespace v8 {
+namespace internal {
+
+// StatsCounters is an interface for plugging into external
+// counters for monitoring.  Counters can be looked up and
+// manipulated by name.
+
+class StatsTable : public AllStatic {
+ public:
+  // Register an application-defined function where
+  // counters can be looked up.
+  static void SetCounterFunction(CounterLookupCallback f) {
+    lookup_function_ = f;
+  }
+
+  // Register an application-defined function to create
+  // a histogram for passing to the AddHistogramSample function
+  static void SetCreateHistogramFunction(CreateHistogramCallback f) {
+    create_histogram_function_ = f;
+  }
+
+  // Register an application-defined function to add a sample
+  // to a histogram created with CreateHistogram function
+  static void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
+    add_histogram_sample_function_ = f;
+  }
+
+  static bool HasCounterFunction() {
+    return lookup_function_ != NULL;
+  }
+
+  // Lookup the location of a counter by name.  If the lookup
+  // is successful, returns a non-NULL pointer for writing the
+  // value of the counter.  Each thread calling this function
+  // may receive a different location to store it's counter.
+  // The return value must not be cached and re-used across
+  // threads, although a single thread is free to cache it.
+  static int *FindLocation(const char* name) {
+    if (!lookup_function_) return NULL;
+    return lookup_function_(name);
+  }
+
+  // Create a histogram by name. If the create is successful,
+  // returns a non-NULL pointer for use with AddHistogramSample
+  // function. min and max define the expected minimum and maximum
+  // sample values. buckets is the maximum number of buckets
+  // that the samples will be grouped into.
+  static void* CreateHistogram(const char* name,
+                               int min,
+                               int max,
+                               size_t buckets) {
+    if (!create_histogram_function_) return NULL;
+    return create_histogram_function_(name, min, max, buckets);
+  }
+
+  // Add a sample to a histogram created with the CreateHistogram
+  // function.
+  static void AddHistogramSample(void* histogram, int sample) {
+    if (!add_histogram_sample_function_) return;
+    return add_histogram_sample_function_(histogram, sample);
+  }
+
+ private:
+  static CounterLookupCallback lookup_function_;
+  static CreateHistogramCallback create_histogram_function_;
+  static AddHistogramSampleCallback add_histogram_sample_function_;
+};
+
+// StatsCounters are dynamically created values which can be tracked in
+// the StatsTable.  They are designed to be lightweight to create and
+// easy to use.
+//
+// Internally, a counter represents a value in a row of a StatsTable.
+// The row has a 32bit value for each process/thread in the table and also
+// a name (stored in the table metadata).  Since the storage location can be
+// thread-specific, this class cannot be shared across threads.
+//
+// This class is designed to be POD initialized.  It will be registered with
+// the counter system on first use.  For example:
+//   StatsCounter c = { "c:myctr", NULL, false };
+struct StatsCounter {
+  const char* name_;
+  int* ptr_;
+  bool lookup_done_;
+
+  // Sets the counter to a specific value.
+  void Set(int value) {
+    int* loc = GetPtr();
+    if (loc) *loc = value;
+  }
+
+  // Increments the counter.
+  void Increment() {
+    int* loc = GetPtr();
+    if (loc) (*loc)++;
+  }
+
+  void Increment(int value) {
+    int* loc = GetPtr();
+    if (loc)
+      (*loc) += value;
+  }
+
+  // Decrements the counter.
+  void Decrement() {
+    int* loc = GetPtr();
+    if (loc) (*loc)--;
+  }
+
+  void Decrement(int value) {
+    int* loc = GetPtr();
+    if (loc) (*loc) -= value;
+  }
+
+  // Is this counter enabled?
+  // Returns false if table is full.
+  bool Enabled() {
+    return GetPtr() != NULL;
+  }
+
+  // Get the internal pointer to the counter. This is used
+  // by the code generator to emit code that manipulates a
+  // given counter without calling the runtime system.
+  int* GetInternalPointer() {
+    int* loc = GetPtr();
+    ASSERT(loc != NULL);
+    return loc;
+  }
+
+ protected:
+  // Returns the cached address of this counter location.
+  int* GetPtr() {
+    if (lookup_done_)
+      return ptr_;
+    lookup_done_ = true;
+    ptr_ = StatsTable::FindLocation(name_);
+    return ptr_;
+  }
+};
+
+// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 };
+struct StatsCounterTimer {
+  StatsCounter counter_;
+
+  int64_t start_time_;
+  int64_t stop_time_;
+
+  // Start the timer.
+  void Start();
+
+  // Stop the timer and record the results.
+  void Stop();
+
+  // Returns true if the timer is running.
+  bool Running() {
+    return counter_.Enabled() && start_time_ != 0 && stop_time_ == 0;
+  }
+};
+
+// A HistogramTimer allows distributions of results to be created
+// HistogramTimer t = { L"foo", NULL, false, 0, 0 };
+struct HistogramTimer {
+  const char* name_;
+  void* histogram_;
+  bool lookup_done_;
+
+  int64_t start_time_;
+  int64_t stop_time_;
+
+  // Start the timer.
+  void Start();
+
+  // Stop the timer and record the results.
+  void Stop();
+
+  // Returns true if the timer is running.
+  bool Running() {
+    return (histogram_ != NULL) && (start_time_ != 0) && (stop_time_ == 0);
+  }
+
+ protected:
+  // Returns the handle to the histogram.
+  void* GetHistogram() {
+    if (!lookup_done_) {
+      lookup_done_ = true;
+      histogram_ = StatsTable::CreateHistogram(name_, 0, 10000, 50);
+    }
+    return histogram_;
+  }
+};
+
+// Helper class for scoping a HistogramTimer.
+class HistogramTimerScope BASE_EMBEDDED {
+ public:
+  explicit HistogramTimerScope(HistogramTimer* timer) :
+  timer_(timer) {
+    timer_->Start();
+  }
+  ~HistogramTimerScope() {
+    timer_->Stop();
+  }
+ private:
+  HistogramTimer* timer_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_COUNTERS_H_
diff --git a/src/cpu.h b/src/cpu.h
new file mode 100644
index 0000000..ddc402f
--- /dev/null
+++ b/src/cpu.h
@@ -0,0 +1,65 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This module contains the architecture-specific code. This make the rest of
+// the code less dependent on differences between different processor
+// architecture.
+// The classes have the same definition for all architectures. The
+// implementation for a particular architecture is put in cpu_<arch>.cc.
+// The build system then uses the implementation for the target architecture.
+//
+
+#ifndef V8_CPU_H_
+#define V8_CPU_H_
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// CPU
+//
+// This class has static methods for the architecture specific functions. Add
+// methods here to cope with differences between the supported architectures.
+//
+// For each architecture the file cpu_<arch>.cc contains the implementation of
+// these functions.
+
+class CPU : public AllStatic {
+ public:
+  // Initializes the cpu architecture support. Called once at VM startup.
+  static void Setup();
+
+  // Flush instruction cache.
+  static void FlushICache(void* start, size_t size);
+
+  // Try to activate a system level debugger.
+  static void DebugBreak();
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_CPU_H_
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
new file mode 100644
index 0000000..4e0243a
--- /dev/null
+++ b/src/d8-debug.cc
@@ -0,0 +1,345 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "d8.h"
+#include "d8-debug.h"
+#include "platform.h"
+#include "debug-agent.h"
+
+
+namespace v8 {
+
+
+void HandleDebugEvent(DebugEvent event,
+                      Handle<Object> exec_state,
+                      Handle<Object> event_data,
+                      Handle<Value> data) {
+  HandleScope scope;
+
+  // Check for handled event.
+  if (event != Break && event != Exception && event != AfterCompile) {
+    return;
+  }
+
+  TryCatch try_catch;
+
+  // Get the toJSONProtocol function on the event and get the JSON format.
+  Local<String> to_json_fun_name = String::New("toJSONProtocol");
+  Local<Function> to_json_fun =
+      Function::Cast(*event_data->Get(to_json_fun_name));
+  Local<Value> event_json = to_json_fun->Call(event_data, 0, NULL);
+  if (try_catch.HasCaught()) {
+    Shell::ReportException(&try_catch);
+    return;
+  }
+
+  // Print the event details.
+  Handle<Object> details =
+      Shell::DebugMessageDetails(Handle<String>::Cast(event_json));
+  if (try_catch.HasCaught()) {
+    Shell::ReportException(&try_catch);
+    return;
+  }
+  String::Utf8Value str(details->Get(String::New("text")));
+  if (str.length() == 0) {
+    // Empty string is used to signal not to process this event.
+    return;
+  }
+  printf("%s\n", *str);
+
+  // Get the debug command processor.
+  Local<String> fun_name = String::New("debugCommandProcessor");
+  Local<Function> fun = Function::Cast(*exec_state->Get(fun_name));
+  Local<Object> cmd_processor =
+      Object::Cast(*fun->Call(exec_state, 0, NULL));
+  if (try_catch.HasCaught()) {
+    Shell::ReportException(&try_catch);
+    return;
+  }
+
+  static const int kBufferSize = 256;
+  bool running = false;
+  while (!running) {
+    char command[kBufferSize];
+    printf("dbg> ");
+    char* str = fgets(command, kBufferSize, stdin);
+    if (str == NULL) break;
+
+    // Ignore empty commands.
+    if (strlen(command) == 0) continue;
+
+    TryCatch try_catch;
+
+    // Convert the debugger command to a JSON debugger request.
+    Handle<Value> request =
+        Shell::DebugCommandToJSONRequest(String::New(command));
+    if (try_catch.HasCaught()) {
+      Shell::ReportException(&try_catch);
+      continue;
+    }
+
+    // If undefined is returned the command was handled internally and there is
+    // no JSON to send.
+    if (request->IsUndefined()) {
+      continue;
+    }
+
+    Handle<String> fun_name;
+    Handle<Function> fun;
+    // All the functions used below take one argument.
+    static const int kArgc = 1;
+    Handle<Value> args[kArgc];
+
+    // Invoke the JavaScript to convert the debug command line to a JSON
+    // request, invoke the JSON request and convert the JSON respose to a text
+    // representation.
+    fun_name = String::New("processDebugRequest");
+    fun = Handle<Function>::Cast(cmd_processor->Get(fun_name));
+    args[0] = request;
+    Handle<Value> response_val = fun->Call(cmd_processor, kArgc, args);
+    if (try_catch.HasCaught()) {
+      Shell::ReportException(&try_catch);
+      continue;
+    }
+    Handle<String> response = Handle<String>::Cast(response_val);
+
+    // Convert the debugger response into text details and the running state.
+    Handle<Object> response_details = Shell::DebugMessageDetails(response);
+    if (try_catch.HasCaught()) {
+      Shell::ReportException(&try_catch);
+      continue;
+    }
+    String::Utf8Value text_str(response_details->Get(String::New("text")));
+    if (text_str.length() > 0) {
+      printf("%s\n", *text_str);
+    }
+    running =
+        response_details->Get(String::New("running"))->ToBoolean()->Value();
+  }
+}
+
+
+void RunRemoteDebugger(int port) {
+  RemoteDebugger debugger(port);
+  debugger.Run();
+}
+
+
+void RemoteDebugger::Run() {
+  bool ok;
+
+  // Make sure that socket support is initialized.
+  ok = i::Socket::Setup();
+  if (!ok) {
+    printf("Unable to initialize socket support %d\n", i::Socket::LastError());
+    return;
+  }
+
+  // Connect to the debugger agent.
+  conn_ = i::OS::CreateSocket();
+  static const int kPortStrSize = 6;
+  char port_str[kPortStrSize];
+  i::OS::SNPrintF(i::Vector<char>(port_str, kPortStrSize), "%d", port_);
+  ok = conn_->Connect("localhost", port_str);
+  if (!ok) {
+    printf("Unable to connect to debug agent %d\n", i::Socket::LastError());
+    return;
+  }
+
+  // Start the receiver thread.
+  ReceiverThread receiver(this);
+  receiver.Start();
+
+  // Start the keyboard thread.
+  KeyboardThread keyboard(this);
+  keyboard.Start();
+
+  // Process events received from debugged VM and from the keyboard.
+  bool terminate = false;
+  while (!terminate) {
+    event_available_->Wait();
+    RemoteDebuggerEvent* event = GetEvent();
+    switch (event->type()) {
+      case RemoteDebuggerEvent::kMessage:
+        HandleMessageReceived(event->data());
+        break;
+      case RemoteDebuggerEvent::kKeyboard:
+        HandleKeyboardCommand(event->data());
+        break;
+      case RemoteDebuggerEvent::kDisconnect:
+        terminate = true;
+        break;
+
+      default:
+        UNREACHABLE();
+    }
+    delete event;
+  }
+
+  // Wait for the receiver thread to end.
+  receiver.Join();
+}
+
+
+void RemoteDebugger::MessageReceived(i::SmartPointer<char> message) {
+  RemoteDebuggerEvent* event =
+      new RemoteDebuggerEvent(RemoteDebuggerEvent::kMessage, message);
+  AddEvent(event);
+}
+
+
+void RemoteDebugger::KeyboardCommand(i::SmartPointer<char> command) {
+  RemoteDebuggerEvent* event =
+      new RemoteDebuggerEvent(RemoteDebuggerEvent::kKeyboard, command);
+  AddEvent(event);
+}
+
+
+void RemoteDebugger::ConnectionClosed() {
+  RemoteDebuggerEvent* event =
+      new RemoteDebuggerEvent(RemoteDebuggerEvent::kDisconnect,
+                              i::SmartPointer<char>());
+  AddEvent(event);
+}
+
+
+void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
+  i::ScopedLock lock(event_access_);
+  if (head_ == NULL) {
+    ASSERT(tail_ == NULL);
+    head_ = event;
+    tail_ = event;
+  } else {
+    ASSERT(tail_ != NULL);
+    tail_->set_next(event);
+    tail_ = event;
+  }
+  event_available_->Signal();
+}
+
+
+RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
+  i::ScopedLock lock(event_access_);
+  ASSERT(head_ != NULL);
+  RemoteDebuggerEvent* result = head_;
+  head_ = head_->next();
+  if (head_ == NULL) {
+    ASSERT(tail_ == result);
+    tail_ = NULL;
+  }
+  return result;
+}
+
+
+void RemoteDebugger::HandleMessageReceived(char* message) {
+  HandleScope scope;
+
+  // Print the event details.
+  TryCatch try_catch;
+  Handle<Object> details =
+      Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message)));
+  if (try_catch.HasCaught()) {
+      Shell::ReportException(&try_catch);
+    return;
+  }
+  String::Utf8Value str(details->Get(String::New("text")));
+  if (str.length() == 0) {
+    // Empty string is used to signal not to process this event.
+    return;
+  }
+  if (*str != NULL) {
+    printf("%s\n", *str);
+  } else {
+    printf("???\n");
+  }
+  printf("dbg> ");
+}
+
+
+void RemoteDebugger::HandleKeyboardCommand(char* command) {
+  HandleScope scope;
+
+  // Convert the debugger command to a JSON debugger request.
+  TryCatch try_catch;
+  Handle<Value> request =
+      Shell::DebugCommandToJSONRequest(String::New(command));
+  if (try_catch.HasCaught()) {
+    Shell::ReportException(&try_catch);
+    return;
+  }
+
+  // If undefined is returned the command was handled internally and there is
+  // no JSON to send.
+  if (request->IsUndefined()) {
+    return;
+  }
+
+  // Send the JSON debugger request.
+  i::DebuggerAgentUtil::SendMessage(conn_, Handle<String>::Cast(request));
+}
+
+
+void ReceiverThread::Run() {
+  // Receive the connect message (with empty body).
+  i::SmartPointer<char> message =
+    i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
+  ASSERT(*message == NULL);
+
+  while (true) {
+    // Receive a message.
+    i::SmartPointer<char> message =
+      i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
+    if (*message == NULL) {
+      remote_debugger_->ConnectionClosed();
+      return;
+    }
+
+    // Pass the message to the main thread.
+    remote_debugger_->MessageReceived(message);
+  }
+}
+
+
+void KeyboardThread::Run() {
+  static const int kBufferSize = 256;
+  while (true) {
+    // read keyboard input.
+    char command[kBufferSize];
+    char* str = fgets(command, kBufferSize, stdin);
+    if (str == NULL) {
+      break;
+    }
+
+    // Pass the keyboard command to the main thread.
+    remote_debugger_->KeyboardCommand(
+        i::SmartPointer<char>(i::StrDup(command)));
+  }
+}
+
+
+}  // namespace v8
diff --git a/src/d8-debug.h b/src/d8-debug.h
new file mode 100644
index 0000000..c7acc2f
--- /dev/null
+++ b/src/d8-debug.h
@@ -0,0 +1,155 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_D8_DEBUG_H_
+#define V8_D8_DEBUG_H_
+
+
+#include "d8.h"
+#include "debug.h"
+
+
+namespace v8 {
+
+
+void HandleDebugEvent(DebugEvent event,
+                      Handle<Object> exec_state,
+                      Handle<Object> event_data,
+                      Handle<Value> data);
+
+// Start the remove debugger connecting to a V8 debugger agent on the specified
+// port.
+void RunRemoteDebugger(int port);
+
+// Forward declerations.
+class RemoteDebuggerEvent;
+class ReceiverThread;
+
+
+// Remote debugging class.
+class RemoteDebugger {
+ public:
+  explicit RemoteDebugger(int port)
+      : port_(port),
+        event_access_(i::OS::CreateMutex()),
+        event_available_(i::OS::CreateSemaphore(0)),
+        head_(NULL), tail_(NULL) {}
+  void Run();
+
+  // Handle events from the subordinate threads.
+  void MessageReceived(i::SmartPointer<char> message);
+  void KeyboardCommand(i::SmartPointer<char> command);
+  void ConnectionClosed();
+
+ private:
+  // Add new debugger event to the list.
+  void AddEvent(RemoteDebuggerEvent* event);
+  // Read next debugger event from the list.
+  RemoteDebuggerEvent* GetEvent();
+
+  // Handle a message from the debugged V8.
+  void HandleMessageReceived(char* message);
+  // Handle a keyboard command.
+  void HandleKeyboardCommand(char* command);
+
+  // Get connection to agent in debugged V8.
+  i::Socket* conn() { return conn_; }
+
+  int port_;  // Port used to connect to debugger V8.
+  i::Socket* conn_;  // Connection to debugger agent in debugged V8.
+
+  // Linked list of events from debugged V8 and from keyboard input. Access to
+  // the list is guarded by a mutex and a semaphore signals new items in the
+  // list.
+  i::Mutex* event_access_;
+  i::Semaphore* event_available_;
+  RemoteDebuggerEvent* head_;
+  RemoteDebuggerEvent* tail_;
+
+  friend class ReceiverThread;
+};
+
+
+// Thread reading from debugged V8 instance.
+class ReceiverThread: public i::Thread {
+ public:
+  explicit ReceiverThread(RemoteDebugger* remote_debugger)
+      : remote_debugger_(remote_debugger) {}
+  ~ReceiverThread() {}
+
+  void Run();
+
+ private:
+  RemoteDebugger* remote_debugger_;
+};
+
+
+// Thread reading keyboard input.
+class KeyboardThread: public i::Thread {
+ public:
+  explicit KeyboardThread(RemoteDebugger* remote_debugger)
+      : remote_debugger_(remote_debugger) {}
+  ~KeyboardThread() {}
+
+  void Run();
+
+ private:
+  RemoteDebugger* remote_debugger_;
+};
+
+
+// Events processed by the main deubgger thread.
+class RemoteDebuggerEvent {
+ public:
+  RemoteDebuggerEvent(int type, i::SmartPointer<char> data)
+      : type_(type), data_(data), next_(NULL) {
+    ASSERT(type == kMessage || type == kKeyboard || type == kDisconnect);
+  }
+
+  static const int kMessage = 1;
+  static const int kKeyboard = 2;
+  static const int kDisconnect = 3;
+
+  int type() { return type_; }
+  char* data() { return *data_; }
+
+ private:
+  void set_next(RemoteDebuggerEvent* event) { next_ = event; }
+  RemoteDebuggerEvent* next() { return next_; }
+
+  int type_;
+  i::SmartPointer<char> data_;
+  RemoteDebuggerEvent* next_;
+
+  friend class RemoteDebugger;
+};
+
+
+}  // namespace v8
+
+
+#endif  // V8_D8_DEBUG_H_
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
new file mode 100644
index 0000000..fe130ce
--- /dev/null
+++ b/src/d8-posix.cc
@@ -0,0 +1,675 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/wait.h>
+#include <signal.h>
+
+
+#include "d8.h"
+#include "d8-debug.h"
+#include "debug.h"
+
+
+namespace v8 {
+
+
+// If the buffer ends in the middle of a UTF-8 sequence then we return
+// the length of the string up to but not including the incomplete UTF-8
+// sequence.  If the buffer ends with a valid UTF-8 sequence then we
+// return the whole buffer.
+static int LengthWithoutIncompleteUtf8(char* buffer, int len) {
+  int answer = len;
+  // 1-byte encoding.
+  static const int kUtf8SingleByteMask = 0x80;
+  static const int kUtf8SingleByteValue = 0x00;
+  // 2-byte encoding.
+  static const int kUtf8TwoByteMask = 0xe0;
+  static const int kUtf8TwoByteValue = 0xc0;
+  // 3-byte encoding.
+  static const int kUtf8ThreeByteMask = 0xf0;
+  static const int kUtf8ThreeByteValue = 0xe0;
+  // 4-byte encoding.
+  static const int kUtf8FourByteMask = 0xf8;
+  static const int kUtf8FourByteValue = 0xf0;
+  // Subsequent bytes of a multi-byte encoding.
+  static const int kMultiByteMask = 0xc0;
+  static const int kMultiByteValue = 0x80;
+  int multi_byte_bytes_seen = 0;
+  while (answer > 0) {
+    int c = buffer[answer - 1];
+    // Ends in valid single-byte sequence?
+    if ((c & kUtf8SingleByteMask) == kUtf8SingleByteValue) return answer;
+    // Ends in one or more subsequent bytes of a multi-byte value?
+    if ((c & kMultiByteMask) == kMultiByteValue) {
+      multi_byte_bytes_seen++;
+      answer--;
+    } else {
+      if ((c & kUtf8TwoByteMask) == kUtf8TwoByteValue) {
+        if (multi_byte_bytes_seen >= 1) {
+          return answer + 2;
+        }
+        return answer - 1;
+      } else if ((c & kUtf8ThreeByteMask) == kUtf8ThreeByteValue) {
+        if (multi_byte_bytes_seen >= 2) {
+          return answer + 3;
+        }
+        return answer - 1;
+      } else if ((c & kUtf8FourByteMask) == kUtf8FourByteValue) {
+        if (multi_byte_bytes_seen >= 3) {
+          return answer + 4;
+        }
+        return answer - 1;
+      } else {
+        return answer;  // Malformed UTF-8.
+      }
+    }
+  }
+  return 0;
+}
+
+
+// Suspends the thread until there is data available from the child process.
+// Returns false on timeout, true on data ready.
+static bool WaitOnFD(int fd,
+                     int read_timeout,
+                     int total_timeout,
+                     struct timeval& start_time) {
+  fd_set readfds, writefds, exceptfds;
+  struct timeval timeout;
+  int gone = 0;
+  if (total_timeout != -1) {
+    struct timeval time_now;
+    gettimeofday(&time_now, NULL);
+    int seconds = time_now.tv_sec - start_time.tv_sec;
+    gone = seconds * 1000 + (time_now.tv_usec - start_time.tv_usec) / 1000;
+    if (gone >= total_timeout) return false;
+  }
+  FD_ZERO(&readfds);
+  FD_ZERO(&writefds);
+  FD_ZERO(&exceptfds);
+  FD_SET(fd, &readfds);
+  FD_SET(fd, &exceptfds);
+  if (read_timeout == -1 ||
+      (total_timeout != -1 && total_timeout - gone < read_timeout)) {
+    read_timeout = total_timeout - gone;
+  }
+  timeout.tv_usec = (read_timeout % 1000) * 1000;
+  timeout.tv_sec = read_timeout / 1000;
+  int number_of_fds_ready = select(fd + 1,
+                                   &readfds,
+                                   &writefds,
+                                   &exceptfds,
+                                   read_timeout != -1 ? &timeout : NULL);
+  return number_of_fds_ready == 1;
+}
+
+
+// Checks whether we ran out of time on the timeout.  Returns true if we ran out
+// of time, false if we still have time.
+static bool TimeIsOut(const struct timeval& start_time, const int& total_time) {
+  if (total_time == -1) return false;
+  struct timeval time_now;
+  gettimeofday(&time_now, NULL);
+  // Careful about overflow.
+  int seconds = time_now.tv_sec - start_time.tv_sec;
+  if (seconds > 100) {
+    if (seconds * 1000 > total_time) return true;
+    return false;
+  }
+  int useconds = time_now.tv_usec - start_time.tv_usec;
+  if (seconds * 1000000 + useconds > total_time * 1000) {
+    return true;
+  }
+  return false;
+}
+
+
+// A utility class that does a non-hanging waitpid on the child process if we
+// bail out of the System() function early.  If you don't ever do a waitpid on
+// a subprocess then it turns into one of those annoying 'zombie processes'.
+class ZombieProtector {
+ public:
+  explicit ZombieProtector(int pid): pid_(pid) { }
+  ~ZombieProtector() { if (pid_ != 0) waitpid(pid_, NULL, 0); }
+  void ChildIsDeadNow() { pid_ = 0; }
+ private:
+  int pid_;
+};
+
+
+// A utility class that closes a file descriptor when it goes out of scope.
+class OpenFDCloser {
+ public:
+  explicit OpenFDCloser(int fd): fd_(fd) { }
+  ~OpenFDCloser() { close(fd_); }
+ private:
+  int fd_;
+};
+
+
+// A utility class that takes the array of command arguments and puts then in an
+// array of new[]ed UTF-8 C strings.  Deallocates them again when it goes out of
+// scope.
+class ExecArgs {
+ public:
+  ExecArgs() {
+    exec_args_[0] = NULL;
+  }
+  bool Init(Handle<Value> arg0, Handle<Array> command_args) {
+    String::Utf8Value prog(arg0);
+    if (*prog == NULL) {
+      const char* message =
+          "os.system(): String conversion of program name failed";
+      ThrowException(String::New(message));
+      return false;
+    }
+    int len = prog.length() + 3;
+    char* c_arg = new char[len];
+    snprintf(c_arg, len, "%s", *prog);
+    exec_args_[0] = c_arg;
+    int i = 1;
+    for (unsigned j = 0; j < command_args->Length(); i++, j++) {
+      Handle<Value> arg(command_args->Get(Integer::New(j)));
+      String::Utf8Value utf8_arg(arg);
+      if (*utf8_arg == NULL) {
+        exec_args_[i] = NULL;  // Consistent state for destructor.
+        const char* message =
+            "os.system(): String conversion of argument failed.";
+        ThrowException(String::New(message));
+        return false;
+      }
+      int len = utf8_arg.length() + 1;
+      char* c_arg = new char[len];
+      snprintf(c_arg, len, "%s", *utf8_arg);
+      exec_args_[i] = c_arg;
+    }
+    exec_args_[i] = NULL;
+    return true;
+  }
+  ~ExecArgs() {
+    for (unsigned i = 0; i < kMaxArgs; i++) {
+      if (exec_args_[i] == NULL) {
+        return;
+      }
+      delete [] exec_args_[i];
+      exec_args_[i] = 0;
+    }
+  }
+  static const unsigned kMaxArgs = 1000;
+  char** arg_array() { return exec_args_; }
+  char* arg0() { return exec_args_[0]; }
+ private:
+  char* exec_args_[kMaxArgs + 1];
+};
+
+
+// Gets the optional timeouts from the arguments to the system() call.
+static bool GetTimeouts(const Arguments& args,
+                        int* read_timeout,
+                        int* total_timeout) {
+  if (args.Length() > 3) {
+    if (args[3]->IsNumber()) {
+      *total_timeout = args[3]->Int32Value();
+    } else {
+      ThrowException(String::New("system: Argument 4 must be a number"));
+      return false;
+    }
+  }
+  if (args.Length() > 2) {
+    if (args[2]->IsNumber()) {
+      *read_timeout = args[2]->Int32Value();
+    } else {
+      ThrowException(String::New("system: Argument 3 must be a number"));
+      return false;
+    }
+  }
+  return true;
+}
+
+
+static const int kReadFD = 0;
+static const int kWriteFD = 1;
+
+
+// This is run in the child process after fork() but before exec().  It normally
+// ends with the child process being replaced with the desired child program.
+// It only returns if an error occurred.
+static void ExecSubprocess(int* exec_error_fds,
+                           int* stdout_fds,
+                           ExecArgs& exec_args) {
+  close(exec_error_fds[kReadFD]);  // Don't need this in the child.
+  close(stdout_fds[kReadFD]);      // Don't need this in the child.
+  close(1);                        // Close stdout.
+  dup2(stdout_fds[kWriteFD], 1);   // Dup pipe fd to stdout.
+  close(stdout_fds[kWriteFD]);     // Don't need the original fd now.
+  fcntl(exec_error_fds[kWriteFD], F_SETFD, FD_CLOEXEC);
+  execvp(exec_args.arg0(), exec_args.arg_array());
+  // Only get here if the exec failed.  Write errno to the parent to tell
+  // them it went wrong.  If it went well the pipe is closed.
+  int err = errno;
+  int bytes_written;
+  do {
+    bytes_written = write(exec_error_fds[kWriteFD], &err, sizeof(err));
+  } while (bytes_written == -1 && errno == EINTR);
+  // Return (and exit child process).
+}
+
+
+// Runs in the parent process.  Checks that the child was able to exec (closing
+// the file desriptor), or reports an error if it failed.
+static bool ChildLaunchedOK(int* exec_error_fds) {
+  int bytes_read;
+  int err;
+  do {
+    bytes_read = read(exec_error_fds[kReadFD], &err, sizeof(err));
+  } while (bytes_read == -1 && errno == EINTR);
+  if (bytes_read != 0) {
+    ThrowException(String::New(strerror(err)));
+    return false;
+  }
+  return true;
+}
+
+
+// Accumulates the output from the child in a string handle.  Returns true if it
+// succeeded or false if an exception was thrown.
+static Handle<Value> GetStdout(int child_fd,
+                               struct timeval& start_time,
+                               int read_timeout,
+                               int total_timeout) {
+  Handle<String> accumulator = String::Empty();
+  const char* source = "function(a, b) { return a + b; }";
+  Handle<Value> cons_as_obj(Script::Compile(String::New(source))->Run());
+  Handle<Function> cons_function(Function::Cast(*cons_as_obj));
+  Handle<Value> cons_args[2];
+
+  int fullness = 0;
+  static const int kStdoutReadBufferSize = 4096;
+  char buffer[kStdoutReadBufferSize];
+
+  if (fcntl(child_fd, F_SETFL, O_NONBLOCK) != 0) {
+    return ThrowException(String::New(strerror(errno)));
+  }
+
+  int bytes_read;
+  do {
+    bytes_read = read(child_fd,
+                      buffer + fullness,
+                      kStdoutReadBufferSize - fullness);
+    if (bytes_read == -1) {
+      if (errno == EAGAIN) {
+        if (!WaitOnFD(child_fd,
+                      read_timeout,
+                      total_timeout,
+                      start_time) ||
+            (TimeIsOut(start_time, total_timeout))) {
+          return ThrowException(String::New("Timed out waiting for output"));
+        }
+        continue;
+      } else if (errno == EINTR) {
+        continue;
+      } else {
+        break;
+      }
+    }
+    if (bytes_read + fullness > 0) {
+      int length = bytes_read == 0 ?
+                   bytes_read + fullness :
+                   LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
+      Handle<String> addition = String::New(buffer, length);
+      cons_args[0] = accumulator;
+      cons_args[1] = addition;
+      accumulator = Handle<String>::Cast(cons_function->Call(
+          Shell::utility_context()->Global(),
+          2,
+          cons_args));
+      fullness = bytes_read + fullness - length;
+      memcpy(buffer, buffer + length, fullness);
+    }
+  } while (bytes_read != 0);
+  return accumulator;
+}
+
+
+// Modern Linux has the waitid call, which is like waitpid, but more useful
+// if you want a timeout.  If we don't have waitid we can't limit the time
+// waiting for the process to exit without losing the information about
+// whether it exited normally.  In the common case this doesn't matter because
+// we don't get here before the child has closed stdout and most programs don't
+// do that before they exit.
+//
+// We're disabling usage of waitid in Mac OS X because it doens't work for us:
+// a parent process hangs on waiting while a child process is already a zombie.
+// See http://code.google.com/p/v8/issues/detail?id=401.
+#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__)
+#define HAS_WAITID 1
+#endif
+
+
+// Get exit status of child.
+static bool WaitForChild(int pid,
+                         ZombieProtector& child_waiter,
+                         struct timeval& start_time,
+                         int read_timeout,
+                         int total_timeout) {
+#ifdef HAS_WAITID
+
+  siginfo_t child_info;
+  child_info.si_pid = 0;
+  int useconds = 1;
+  // Wait for child to exit.
+  while (child_info.si_pid == 0) {
+    waitid(P_PID, pid, &child_info, WEXITED | WNOHANG | WNOWAIT);
+    usleep(useconds);
+    if (useconds < 1000000) useconds <<= 1;
+    if ((read_timeout != -1 && useconds / 1000 > read_timeout) ||
+        (TimeIsOut(start_time, total_timeout))) {
+      ThrowException(String::New("Timed out waiting for process to terminate"));
+      kill(pid, SIGINT);
+      return false;
+    }
+  }
+  if (child_info.si_code == CLD_KILLED) {
+    char message[999];
+    snprintf(message,
+             sizeof(message),
+             "Child killed by signal %d",
+             child_info.si_status);
+    ThrowException(String::New(message));
+    return false;
+  }
+  if (child_info.si_code == CLD_EXITED && child_info.si_status != 0) {
+    char message[999];
+    snprintf(message,
+             sizeof(message),
+             "Child exited with status %d",
+             child_info.si_status);
+    ThrowException(String::New(message));
+    return false;
+  }
+
+#else  // No waitid call.
+
+  int child_status;
+  waitpid(pid, &child_status, 0);  // We hang here if the child doesn't exit.
+  child_waiter.ChildIsDeadNow();
+  if (WIFSIGNALED(child_status)) {
+    char message[999];
+    snprintf(message,
+             sizeof(message),
+             "Child killed by signal %d",
+             WTERMSIG(child_status));
+    ThrowException(String::New(message));
+    return false;
+  }
+  if (WEXITSTATUS(child_status) != 0) {
+    char message[999];
+    int exit_status = WEXITSTATUS(child_status);
+    snprintf(message,
+             sizeof(message),
+             "Child exited with status %d",
+             exit_status);
+    ThrowException(String::New(message));
+    return false;
+  }
+
+#endif  // No waitid call.
+
+  return true;
+}
+
+
+// Implementation of the system() function (see d8.h for details).
+Handle<Value> Shell::System(const Arguments& args) {
+  HandleScope scope;
+  int read_timeout = -1;
+  int total_timeout = -1;
+  if (!GetTimeouts(args, &read_timeout, &total_timeout)) return v8::Undefined();
+  Handle<Array> command_args;
+  if (args.Length() > 1) {
+    if (!args[1]->IsArray()) {
+      return ThrowException(String::New("system: Argument 2 must be an array"));
+    }
+    command_args = Handle<Array>::Cast(args[1]);
+  } else {
+    command_args = Array::New(0);
+  }
+  if (command_args->Length() > ExecArgs::kMaxArgs) {
+    return ThrowException(String::New("Too many arguments to system()"));
+  }
+  if (args.Length() < 1) {
+    return ThrowException(String::New("Too few arguments to system()"));
+  }
+
+  struct timeval start_time;
+  gettimeofday(&start_time, NULL);
+
+  ExecArgs exec_args;
+  if (!exec_args.Init(args[0], command_args)) {
+    return v8::Undefined();
+  }
+  int exec_error_fds[2];
+  int stdout_fds[2];
+
+  if (pipe(exec_error_fds) != 0) {
+    return ThrowException(String::New("pipe syscall failed."));
+  }
+  if (pipe(stdout_fds) != 0) {
+    return ThrowException(String::New("pipe syscall failed."));
+  }
+
+  pid_t pid = fork();
+  if (pid == 0) {  // Child process.
+    ExecSubprocess(exec_error_fds, stdout_fds, exec_args);
+    exit(1);
+  }
+
+  // Parent process.  Ensure that we clean up if we exit this function early.
+  ZombieProtector child_waiter(pid);
+  close(exec_error_fds[kWriteFD]);
+  close(stdout_fds[kWriteFD]);
+  OpenFDCloser error_read_closer(exec_error_fds[kReadFD]);
+  OpenFDCloser stdout_read_closer(stdout_fds[kReadFD]);
+
+  if (!ChildLaunchedOK(exec_error_fds)) return v8::Undefined();
+
+  Handle<Value> accumulator = GetStdout(stdout_fds[kReadFD],
+                                        start_time,
+                                        read_timeout,
+                                        total_timeout);
+  if (accumulator->IsUndefined()) {
+    kill(pid, SIGINT);  // On timeout, kill the subprocess.
+    return accumulator;
+  }
+
+  if (!WaitForChild(pid,
+                    child_waiter,
+                    start_time,
+                    read_timeout,
+                    total_timeout)) {
+    return v8::Undefined();
+  }
+
+  return scope.Close(accumulator);
+}
+
+
+Handle<Value> Shell::ChangeDirectory(const Arguments& args) {
+  if (args.Length() != 1) {
+    const char* message = "chdir() takes one argument";
+    return ThrowException(String::New(message));
+  }
+  String::Utf8Value directory(args[0]);
+  if (*directory == NULL) {
+    const char* message = "os.chdir(): String conversion of argument failed.";
+    return ThrowException(String::New(message));
+  }
+  if (chdir(*directory) != 0) {
+    return ThrowException(String::New(strerror(errno)));
+  }
+  return v8::Undefined();
+}
+
+
+Handle<Value> Shell::SetUMask(const Arguments& args) {
+  if (args.Length() != 1) {
+    const char* message = "umask() takes one argument";
+    return ThrowException(String::New(message));
+  }
+  if (args[0]->IsNumber()) {
+    mode_t mask = args[0]->Int32Value();
+    int previous = umask(mask);
+    return Number::New(previous);
+  } else {
+    const char* message = "umask() argument must be numeric";
+    return ThrowException(String::New(message));
+  }
+}
+
+
+static bool CheckItsADirectory(char* directory) {
+  struct stat stat_buf;
+  int stat_result = stat(directory, &stat_buf);
+  if (stat_result != 0) {
+    ThrowException(String::New(strerror(errno)));
+    return false;
+  }
+  if ((stat_buf.st_mode & S_IFDIR) != 0) return true;
+  ThrowException(String::New(strerror(EEXIST)));
+  return false;
+}
+
+
+// Returns true for success.  Creates intermediate directories as needed.  No
+// error if the directory exists already.
+static bool mkdirp(char* directory, mode_t mask) {
+  int result = mkdir(directory, mask);
+  if (result == 0) return true;
+  if (errno == EEXIST) {
+    return CheckItsADirectory(directory);
+  } else if (errno == ENOENT) {  // Intermediate path element is missing.
+    char* last_slash = strrchr(directory, '/');
+    if (last_slash == NULL) {
+      ThrowException(String::New(strerror(errno)));
+      return false;
+    }
+    *last_slash = 0;
+    if (!mkdirp(directory, mask)) return false;
+    *last_slash = '/';
+    result = mkdir(directory, mask);
+    if (result == 0) return true;
+    if (errno == EEXIST) {
+      return CheckItsADirectory(directory);
+    }
+    ThrowException(String::New(strerror(errno)));
+    return false;
+  } else {
+    ThrowException(String::New(strerror(errno)));
+    return false;
+  }
+}
+
+
+Handle<Value> Shell::MakeDirectory(const Arguments& args) {
+  mode_t mask = 0777;
+  if (args.Length() == 2) {
+    if (args[1]->IsNumber()) {
+      mask = args[1]->Int32Value();
+    } else {
+      const char* message = "mkdirp() second argument must be numeric";
+      return ThrowException(String::New(message));
+    }
+  } else if (args.Length() != 1) {
+    const char* message = "mkdirp() takes one or two arguments";
+    return ThrowException(String::New(message));
+  }
+  String::Utf8Value directory(args[0]);
+  if (*directory == NULL) {
+    const char* message = "os.mkdirp(): String conversion of argument failed.";
+    return ThrowException(String::New(message));
+  }
+  mkdirp(*directory, mask);
+  return v8::Undefined();
+}
+
+
+Handle<Value> Shell::RemoveDirectory(const Arguments& args) {
+  if (args.Length() != 1) {
+    const char* message = "rmdir() takes one or two arguments";
+    return ThrowException(String::New(message));
+  }
+  String::Utf8Value directory(args[0]);
+  if (*directory == NULL) {
+    const char* message = "os.rmdir(): String conversion of argument failed.";
+    return ThrowException(String::New(message));
+  }
+  rmdir(*directory);
+  return v8::Undefined();
+}
+
+
+Handle<Value> Shell::SetEnvironment(const Arguments& args) {
+  if (args.Length() != 2) {
+    const char* message = "setenv() takes two arguments";
+    return ThrowException(String::New(message));
+  }
+  String::Utf8Value var(args[0]);
+  String::Utf8Value value(args[1]);
+  if (*var == NULL) {
+    const char* message =
+        "os.setenv(): String conversion of variable name failed.";
+    return ThrowException(String::New(message));
+  }
+  if (*value == NULL) {
+    const char* message =
+        "os.setenv(): String conversion of variable contents failed.";
+    return ThrowException(String::New(message));
+  }
+  setenv(*var, *value, 1);
+  return v8::Undefined();
+}
+
+
+void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
+  os_templ->Set(String::New("system"), FunctionTemplate::New(System));
+  os_templ->Set(String::New("chdir"), FunctionTemplate::New(ChangeDirectory));
+  os_templ->Set(String::New("setenv"), FunctionTemplate::New(SetEnvironment));
+  os_templ->Set(String::New("umask"), FunctionTemplate::New(SetUMask));
+  os_templ->Set(String::New("mkdirp"), FunctionTemplate::New(MakeDirectory));
+  os_templ->Set(String::New("rmdir"), FunctionTemplate::New(RemoveDirectory));
+}
+
+}  // namespace v8
diff --git a/src/d8-readline.cc b/src/d8-readline.cc
new file mode 100644
index 0000000..34b7b60
--- /dev/null
+++ b/src/d8-readline.cc
@@ -0,0 +1,128 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include <cstdio>  // NOLINT
+#include <readline/readline.h>
+#include <readline/history.h>
+
+
+#include "d8.h"
+
+
+// There are incompatibilities between different versions and different
+// implementations of readline.  This smooths out one known incompatibility.
+#if RL_READLINE_VERSION >= 0x0500
+#define completion_matches rl_completion_matches
+#endif
+
+
+namespace v8 {
+
+
+class ReadLineEditor: public LineEditor {
+ public:
+  ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { }
+  virtual i::SmartPointer<char> Prompt(const char* prompt);
+  virtual bool Open();
+  virtual bool Close();
+  virtual void AddHistory(const char* str);
+ private:
+  static char** AttemptedCompletion(const char* text, int start, int end);
+  static char* CompletionGenerator(const char* text, int state);
+  static char kWordBreakCharacters[];
+};
+
+
+static ReadLineEditor read_line_editor;
+char ReadLineEditor::kWordBreakCharacters[] = {' ', '\t', '\n', '"',
+    '\\', '\'', '`', '@', '.', '>', '<', '=', ';', '|', '&', '{', '(',
+    '\0'};
+
+
+bool ReadLineEditor::Open() {
+  rl_initialize();
+  rl_attempted_completion_function = AttemptedCompletion;
+  rl_completer_word_break_characters = kWordBreakCharacters;
+  rl_bind_key('\t', rl_complete);
+  using_history();
+  return read_history(Shell::kHistoryFileName) == 0;
+}
+
+
+bool ReadLineEditor::Close() {
+  return write_history(Shell::kHistoryFileName) == 0;
+}
+
+
+i::SmartPointer<char> ReadLineEditor::Prompt(const char* prompt) {
+  char* result = readline(prompt);
+  return i::SmartPointer<char>(result);
+}
+
+
+void ReadLineEditor::AddHistory(const char* str) {
+  add_history(str);
+}
+
+
+char** ReadLineEditor::AttemptedCompletion(const char* text,
+                                           int start,
+                                           int end) {
+  char** result = completion_matches(text, CompletionGenerator);
+  rl_attempted_completion_over = true;
+  return result;
+}
+
+
+char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
+  static unsigned current_index;
+  static Persistent<Array> current_completions;
+  if (state == 0) {
+    i::SmartPointer<char> full_text(i::StrNDup(rl_line_buffer, rl_point));
+    HandleScope scope;
+    Handle<Array> completions =
+      Shell::GetCompletions(String::New(text), String::New(*full_text));
+    current_completions = Persistent<Array>::New(completions);
+    current_index = 0;
+  }
+  if (current_index < current_completions->Length()) {
+    HandleScope scope;
+    Handle<Integer> index = Integer::New(current_index);
+    Handle<Value> str_obj = current_completions->Get(index);
+    current_index++;
+    String::Utf8Value str(str_obj);
+    return strdup(*str);
+  } else {
+    current_completions.Dispose();
+    current_completions.Clear();
+    return NULL;
+  }
+}
+
+
+}  // namespace v8
diff --git a/src/d8-windows.cc b/src/d8-windows.cc
new file mode 100644
index 0000000..eeb4735
--- /dev/null
+++ b/src/d8-windows.cc
@@ -0,0 +1,42 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "d8.h"
+#include "d8-debug.h"
+#include "debug.h"
+#include "api.h"
+
+
+namespace v8 {
+
+
+void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
+}
+
+
+}  // namespace v8
diff --git a/src/d8.cc b/src/d8.cc
new file mode 100644
index 0000000..e4658b1
--- /dev/null
+++ b/src/d8.cc
@@ -0,0 +1,772 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include <stdlib.h>
+#include <errno.h>
+
+#include "d8.h"
+#include "d8-debug.h"
+#include "debug.h"
+#include "api.h"
+#include "natives.h"
+#include "platform.h"
+
+
+namespace v8 {
+
+
+const char* Shell::kHistoryFileName = ".d8_history";
+const char* Shell::kPrompt = "d8> ";
+
+
+LineEditor *LineEditor::first_ = NULL;
+
+
+LineEditor::LineEditor(Type type, const char* name)
+    : type_(type),
+      name_(name),
+      next_(first_) {
+  first_ = this;
+}
+
+
+LineEditor* LineEditor::Get() {
+  LineEditor* current = first_;
+  LineEditor* best = current;
+  while (current != NULL) {
+    if (current->type_ > best->type_)
+      best = current;
+    current = current->next_;
+  }
+  return best;
+}
+
+
+class DumbLineEditor: public LineEditor {
+ public:
+  DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { }
+  virtual i::SmartPointer<char> Prompt(const char* prompt);
+};
+
+
+static DumbLineEditor dumb_line_editor;
+
+
+i::SmartPointer<char> DumbLineEditor::Prompt(const char* prompt) {
+  static const int kBufferSize = 256;
+  char buffer[kBufferSize];
+  printf("%s", prompt);
+  char* str = fgets(buffer, kBufferSize, stdin);
+  return i::SmartPointer<char>(str ? i::StrDup(str) : str);
+}
+
+
+CounterMap* Shell::counter_map_;
+i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
+CounterCollection Shell::local_counters_;
+CounterCollection* Shell::counters_ = &local_counters_;
+Persistent<Context> Shell::utility_context_;
+Persistent<Context> Shell::evaluation_context_;
+
+
+bool CounterMap::Match(void* key1, void* key2) {
+  const char* name1 = reinterpret_cast<const char*>(key1);
+  const char* name2 = reinterpret_cast<const char*>(key2);
+  return strcmp(name1, name2) == 0;
+}
+
+
+// Converts a V8 value to a C string.
+const char* ToCString(const v8::String::Utf8Value& value) {
+  return *value ? *value : "<string conversion failed>";
+}
+
+
+// Executes a string within the current v8 context.
+bool Shell::ExecuteString(Handle<String> source,
+                          Handle<Value> name,
+                          bool print_result,
+                          bool report_exceptions) {
+  HandleScope handle_scope;
+  TryCatch try_catch;
+  if (i::FLAG_debugger) {
+    // When debugging make exceptions appear to be uncaught.
+    try_catch.SetVerbose(true);
+  }
+  Handle<Script> script = Script::Compile(source, name);
+  if (script.IsEmpty()) {
+    // Print errors that happened during compilation.
+    if (report_exceptions && !i::FLAG_debugger)
+      ReportException(&try_catch);
+    return false;
+  } else {
+    Handle<Value> result = script->Run();
+    if (result.IsEmpty()) {
+      // Print errors that happened during execution.
+      if (report_exceptions && !i::FLAG_debugger)
+        ReportException(&try_catch);
+      return false;
+    } else {
+      if (print_result && !result->IsUndefined()) {
+        // If all went well and the result wasn't undefined then print
+        // the returned value.
+        v8::String::Utf8Value str(result);
+        const char* cstr = ToCString(str);
+        printf("%s\n", cstr);
+      }
+      return true;
+    }
+  }
+}
+
+
+Handle<Value> Shell::Print(const Arguments& args) {
+  Handle<Value> val = Write(args);
+  printf("\n");
+  return val;
+}
+
+
+Handle<Value> Shell::Write(const Arguments& args) {
+  for (int i = 0; i < args.Length(); i++) {
+    HandleScope handle_scope;
+    if (i != 0) {
+      printf(" ");
+    }
+    v8::String::Utf8Value str(args[i]);
+    fwrite(*str, sizeof(**str), str.length(), stdout);
+  }
+  return Undefined();
+}
+
+
+Handle<Value> Shell::Read(const Arguments& args) {
+  String::Utf8Value file(args[0]);
+  if (*file == NULL) {
+    return ThrowException(String::New("Error loading file"));
+  }
+  Handle<String> source = ReadFile(*file);
+  if (source.IsEmpty()) {
+    return ThrowException(String::New("Error loading file"));
+  }
+  return source;
+}
+
+
+Handle<Value> Shell::ReadLine(const Arguments& args) {
+  i::SmartPointer<char> line(i::ReadLine(""));
+  if (*line == NULL) {
+    return Null();
+  }
+  size_t len = strlen(*line);
+  if (len > 0 && line[len - 1] == '\n') {
+    --len;
+  }
+  return String::New(*line, len);
+}
+
+
+Handle<Value> Shell::Load(const Arguments& args) {
+  for (int i = 0; i < args.Length(); i++) {
+    HandleScope handle_scope;
+    String::Utf8Value file(args[i]);
+    if (*file == NULL) {
+      return ThrowException(String::New("Error loading file"));
+    }
+    Handle<String> source = ReadFile(*file);
+    if (source.IsEmpty()) {
+      return ThrowException(String::New("Error loading file"));
+    }
+    if (!ExecuteString(source, String::New(*file), false, false)) {
+      return ThrowException(String::New("Error executing  file"));
+    }
+  }
+  return Undefined();
+}
+
+
+Handle<Value> Shell::Yield(const Arguments& args) {
+  v8::Unlocker unlocker;
+  return Undefined();
+}
+
+
+Handle<Value> Shell::Quit(const Arguments& args) {
+  int exit_code = args[0]->Int32Value();
+  OnExit();
+  exit(exit_code);
+  return Undefined();
+}
+
+
+Handle<Value> Shell::Version(const Arguments& args) {
+  return String::New(V8::GetVersion());
+}
+
+
+void Shell::ReportException(v8::TryCatch* try_catch) {
+  HandleScope handle_scope;
+  v8::String::Utf8Value exception(try_catch->Exception());
+  const char* exception_string = ToCString(exception);
+  Handle<Message> message = try_catch->Message();
+  if (message.IsEmpty()) {
+    // V8 didn't provide any extra information about this error; just
+    // print the exception.
+    printf("%s\n", exception_string);
+  } else {
+    // Print (filename):(line number): (message).
+    v8::String::Utf8Value filename(message->GetScriptResourceName());
+    const char* filename_string = ToCString(filename);
+    int linenum = message->GetLineNumber();
+    printf("%s:%i: %s\n", filename_string, linenum, exception_string);
+    // Print line of source code.
+    v8::String::Utf8Value sourceline(message->GetSourceLine());
+    const char* sourceline_string = ToCString(sourceline);
+    printf("%s\n", sourceline_string);
+    // Print wavy underline (GetUnderline is deprecated).
+    int start = message->GetStartColumn();
+    for (int i = 0; i < start; i++) {
+      printf(" ");
+    }
+    int end = message->GetEndColumn();
+    for (int i = start; i < end; i++) {
+      printf("^");
+    }
+    printf("\n");
+  }
+}
+
+
+Handle<Array> Shell::GetCompletions(Handle<String> text, Handle<String> full) {
+  HandleScope handle_scope;
+  Context::Scope context_scope(utility_context_);
+  Handle<Object> global = utility_context_->Global();
+  Handle<Value> fun = global->Get(String::New("GetCompletions"));
+  static const int kArgc = 3;
+  Handle<Value> argv[kArgc] = { evaluation_context_->Global(), text, full };
+  Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
+  return handle_scope.Close(Handle<Array>::Cast(val));
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
+  Context::Scope context_scope(utility_context_);
+  Handle<Object> global = utility_context_->Global();
+  Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
+  static const int kArgc = 1;
+  Handle<Value> argv[kArgc] = { message };
+  Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
+  return Handle<Object>::Cast(val);
+}
+
+
+Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
+  Context::Scope context_scope(utility_context_);
+  Handle<Object> global = utility_context_->Global();
+  Handle<Value> fun = global->Get(String::New("DebugCommandToJSONRequest"));
+  static const int kArgc = 1;
+  Handle<Value> argv[kArgc] = { command };
+  Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
+  return val;
+}
+#endif
+
+
+int32_t* Counter::Bind(const char* name, bool is_histogram) {
+  int i;
+  for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
+    name_[i] = static_cast<char>(name[i]);
+  name_[i] = '\0';
+  is_histogram_ = is_histogram;
+  return ptr();
+}
+
+
+void Counter::AddSample(int32_t sample) {
+  count_++;
+  sample_total_ += sample;
+}
+
+
+CounterCollection::CounterCollection() {
+  magic_number_ = 0xDEADFACE;
+  max_counters_ = kMaxCounters;
+  max_name_size_ = Counter::kMaxNameSize;
+  counters_in_use_ = 0;
+}
+
+
+Counter* CounterCollection::GetNextCounter() {
+  if (counters_in_use_ == kMaxCounters) return NULL;
+  return &counters_[counters_in_use_++];
+}
+
+
+void Shell::MapCounters(const char* name) {
+  counters_file_ = i::OS::MemoryMappedFile::create(name,
+    sizeof(CounterCollection), &local_counters_);
+  void* memory = (counters_file_ == NULL) ?
+      NULL : counters_file_->memory();
+  if (memory == NULL) {
+    printf("Could not map counters file %s\n", name);
+    exit(1);
+  }
+  counters_ = static_cast<CounterCollection*>(memory);
+  V8::SetCounterFunction(LookupCounter);
+  V8::SetCreateHistogramFunction(CreateHistogram);
+  V8::SetAddHistogramSampleFunction(AddHistogramSample);
+}
+
+
+int CounterMap::Hash(const char* name) {
+  int h = 0;
+  int c;
+  while ((c = *name++) != 0) {
+    h += h << 5;
+    h += c;
+  }
+  return h;
+}
+
+
+Counter* Shell::GetCounter(const char* name, bool is_histogram) {
+  Counter* counter = counter_map_->Lookup(name);
+
+  if (counter == NULL) {
+    counter = counters_->GetNextCounter();
+    if (counter != NULL) {
+      counter_map_->Set(name, counter);
+      counter->Bind(name, is_histogram);
+    }
+  } else {
+    ASSERT(counter->is_histogram() == is_histogram);
+  }
+  return counter;
+}
+
+
+int* Shell::LookupCounter(const char* name) {
+  Counter* counter = GetCounter(name, false);
+
+  if (counter != NULL) {
+    return counter->ptr();
+  } else {
+    return NULL;
+  }
+}
+
+
+void* Shell::CreateHistogram(const char* name,
+                             int min,
+                             int max,
+                             size_t buckets) {
+  return GetCounter(name, true);
+}
+
+
+void Shell::AddHistogramSample(void* histogram, int sample) {
+  Counter* counter = reinterpret_cast<Counter*>(histogram);
+  counter->AddSample(sample);
+}
+
+
+void Shell::Initialize() {
+  Shell::counter_map_ = new CounterMap();
+  // Set up counters
+  if (i::FLAG_map_counters != NULL)
+    MapCounters(i::FLAG_map_counters);
+  if (i::FLAG_dump_counters) {
+    V8::SetCounterFunction(LookupCounter);
+    V8::SetCreateHistogramFunction(CreateHistogram);
+    V8::SetAddHistogramSampleFunction(AddHistogramSample);
+  }
+
+  // Initialize the global objects
+  HandleScope scope;
+  Handle<ObjectTemplate> global_template = ObjectTemplate::New();
+  global_template->Set(String::New("print"), FunctionTemplate::New(Print));
+  global_template->Set(String::New("write"), FunctionTemplate::New(Write));
+  global_template->Set(String::New("read"), FunctionTemplate::New(Read));
+  global_template->Set(String::New("readline"),
+                       FunctionTemplate::New(ReadLine));
+  global_template->Set(String::New("load"), FunctionTemplate::New(Load));
+  global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
+  global_template->Set(String::New("version"), FunctionTemplate::New(Version));
+
+  Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
+  AddOSMethods(os_templ);
+  global_template->Set(String::New("os"), os_templ);
+
+  utility_context_ = Context::New(NULL, global_template);
+  utility_context_->SetSecurityToken(Undefined());
+  Context::Scope utility_scope(utility_context_);
+
+  i::JSArguments js_args = i::FLAG_js_arguments;
+  i::Handle<i::FixedArray> arguments_array =
+      i::Factory::NewFixedArray(js_args.argc());
+  for (int j = 0; j < js_args.argc(); j++) {
+    i::Handle<i::String> arg =
+        i::Factory::NewStringFromUtf8(i::CStrVector(js_args[j]));
+    arguments_array->set(j, *arg);
+  }
+  i::Handle<i::JSArray> arguments_jsarray =
+      i::Factory::NewJSArrayWithElements(arguments_array);
+  global_template->Set(String::New("arguments"),
+                       Utils::ToLocal(arguments_jsarray));
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Install the debugger object in the utility scope
+  i::Debug::Load();
+  i::JSObject* debug = i::Debug::debug_context()->global();
+  utility_context_->Global()->Set(String::New("$debug"),
+                                  Utils::ToLocal(&debug));
+#endif
+
+  // Run the d8 shell utility script in the utility context
+  int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
+  i::Vector<const char> shell_source
+      = i::NativesCollection<i::D8>::GetScriptSource(source_index);
+  i::Vector<const char> shell_source_name
+      = i::NativesCollection<i::D8>::GetScriptName(source_index);
+  Handle<String> source = String::New(shell_source.start(),
+                                      shell_source.length());
+  Handle<String> name = String::New(shell_source_name.start(),
+                                    shell_source_name.length());
+  Handle<Script> script = Script::Compile(source, name);
+  script->Run();
+
+  // Mark the d8 shell script as native to avoid it showing up as normal source
+  // in the debugger.
+  i::Handle<i::JSFunction> script_fun = Utils::OpenHandle(*script);
+  i::Handle<i::Script> script_object =
+      i::Handle<i::Script>(i::Script::cast(script_fun->shared()->script()));
+  script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
+
+  // Create the evaluation context
+  evaluation_context_ = Context::New(NULL, global_template);
+  evaluation_context_->SetSecurityToken(Undefined());
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Set the security token of the debug context to allow access.
+  i::Debug::debug_context()->set_security_token(i::Heap::undefined_value());
+
+  // Start the debugger agent if requested.
+  if (i::FLAG_debugger_agent) {
+    v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port);
+  }
+
+  // Start the in-process debugger if requested.
+  if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
+    v8::Debug::SetDebugEventListener(HandleDebugEvent);
+  }
+#endif
+}
+
+
+void Shell::OnExit() {
+  if (i::FLAG_dump_counters) {
+    ::printf("+----------------------------------------+-------------+\n");
+    ::printf("| Name                                   | Value       |\n");
+    ::printf("+----------------------------------------+-------------+\n");
+    for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
+      Counter* counter = i.CurrentValue();
+      if (counter->is_histogram()) {
+        ::printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
+        ::printf("| t:%-36s | %11i |\n",
+                 i.CurrentKey(),
+                 counter->sample_total());
+      } else {
+        ::printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
+      }
+    }
+    ::printf("+----------------------------------------+-------------+\n");
+  }
+  if (counters_file_ != NULL)
+    delete counters_file_;
+}
+
+
+static char* ReadChars(const char* name, int* size_out) {
+  v8::Unlocker unlocker;  // Release the V8 lock while reading files.
+  FILE* file = i::OS::FOpen(name, "rb");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+  rewind(file);
+
+  char* chars = new char[size + 1];
+  chars[size] = '\0';
+  for (int i = 0; i < size;) {
+    int read = fread(&chars[i], 1, size - i, file);
+    i += read;
+  }
+  fclose(file);
+  *size_out = size;
+  return chars;
+}
+
+
+static char* ReadToken(char* data, char token) {
+  char* next = i::OS::StrChr(data, token);
+  if (next != NULL) {
+    *next = '\0';
+    return (next + 1);
+  }
+
+  return NULL;
+}
+
+
+static char* ReadLine(char* data) {
+  return ReadToken(data, '\n');
+}
+
+
+static char* ReadWord(char* data) {
+  return ReadToken(data, ' ');
+}
+
+
+// Reads a file into a v8 string.
+Handle<String> Shell::ReadFile(const char* name) {
+  int size = 0;
+  char* chars = ReadChars(name, &size);
+  if (chars == NULL) return Handle<String>();
+  Handle<String> result = String::New(chars);
+  delete[] chars;
+  return result;
+}
+
+
+void Shell::RunShell() {
+  LineEditor* editor = LineEditor::Get();
+  printf("V8 version %s [console: %s]\n", V8::GetVersion(), editor->name());
+  editor->Open();
+  while (true) {
+    Locker locker;
+    HandleScope handle_scope;
+    Context::Scope context_scope(evaluation_context_);
+    i::SmartPointer<char> input = editor->Prompt(Shell::kPrompt);
+    if (input.is_empty())
+      break;
+    editor->AddHistory(*input);
+    Handle<String> name = String::New("(d8)");
+    ExecuteString(String::New(*input), name, true, true);
+  }
+  editor->Close();
+  printf("\n");
+}
+
+
+class ShellThread : public i::Thread {
+ public:
+  ShellThread(int no, i::Vector<const char> files)
+    : no_(no), files_(files) { }
+  virtual void Run();
+ private:
+  int no_;
+  i::Vector<const char> files_;
+};
+
+
+void ShellThread::Run() {
+  // Prepare the context for this thread.
+  Locker locker;
+  HandleScope scope;
+  Handle<ObjectTemplate> global_template = ObjectTemplate::New();
+  global_template->Set(String::New("print"),
+                       FunctionTemplate::New(Shell::Print));
+  global_template->Set(String::New("write"),
+                       FunctionTemplate::New(Shell::Write));
+  global_template->Set(String::New("read"),
+                       FunctionTemplate::New(Shell::Read));
+  global_template->Set(String::New("readline"),
+                       FunctionTemplate::New(Shell::ReadLine));
+  global_template->Set(String::New("load"),
+                       FunctionTemplate::New(Shell::Load));
+  global_template->Set(String::New("yield"),
+                       FunctionTemplate::New(Shell::Yield));
+  global_template->Set(String::New("version"),
+                       FunctionTemplate::New(Shell::Version));
+
+  char* ptr = const_cast<char*>(files_.start());
+  while ((ptr != NULL) && (*ptr != '\0')) {
+    // For each newline-separated line.
+    char* next_line = ReadLine(ptr);
+
+    if (*ptr == '#') {
+      // Skip comment lines.
+      ptr = next_line;
+      continue;
+    }
+
+    Persistent<Context> thread_context = Context::New(NULL, global_template);
+    thread_context->SetSecurityToken(Undefined());
+    Context::Scope context_scope(thread_context);
+
+    while ((ptr != NULL) && (*ptr != '\0')) {
+      char* filename = ptr;
+      ptr = ReadWord(ptr);
+
+      // Skip empty strings.
+      if (strlen(filename) == 0) {
+        break;
+      }
+
+      Handle<String> str = Shell::ReadFile(filename);
+      if (str.IsEmpty()) {
+        printf("WARNING: %s not found\n", filename);
+        break;
+      }
+
+      Shell::ExecuteString(str, String::New(filename), false, false);
+    }
+
+    thread_context.Dispose();
+    ptr = next_line;
+  }
+}
+
+
+int Shell::Main(int argc, char* argv[]) {
+  i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+  if (i::FLAG_help) {
+    return 1;
+  }
+  Initialize();
+  bool run_shell = (argc == 1);
+
+  // Default use preemption if threads are created.
+  bool use_preemption = true;
+
+  // Default to use lowest possible thread preemption interval to test as many
+  // edgecases as possible.
+  int preemption_interval = 1;
+
+  i::List<i::Thread*> threads(1);
+
+  {
+    // Acquire the V8 lock once initialization has finished. Since the thread
+    // below may spawn new threads accessing V8 holding the V8 lock here is
+    // mandatory.
+    Locker locker;
+    Context::Scope context_scope(evaluation_context_);
+    for (int i = 1; i < argc; i++) {
+      char* str = argv[i];
+      if (strcmp(str, "--shell") == 0) {
+        run_shell = true;
+      } else if (strcmp(str, "--preemption") == 0) {
+        use_preemption = true;
+      } else if (strcmp(str, "--no-preemption") == 0) {
+        use_preemption = false;
+      } else if (strcmp(str, "--preemption-interval") == 0) {
+        if (i + 1 < argc) {
+          char* end = NULL;
+          preemption_interval = strtol(argv[++i], &end, 10);  // NOLINT
+          if (preemption_interval <= 0 || *end != '\0' || errno == ERANGE) {
+            printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
+            return 1;
+          }
+        } else {
+          printf("Missing value for --preemption-interval\n");
+          return 1;
+       }
+      } else if (strcmp(str, "-f") == 0) {
+        // Ignore any -f flags for compatibility with other stand-alone
+        // JavaScript engines.
+        continue;
+      } else if (strncmp(str, "--", 2) == 0) {
+        printf("Warning: unknown flag %s.\nTry --help for options\n", str);
+      } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
+        // Execute argument given to -e option directly.
+        v8::HandleScope handle_scope;
+        v8::Handle<v8::String> file_name = v8::String::New("unnamed");
+        v8::Handle<v8::String> source = v8::String::New(argv[i + 1]);
+        if (!ExecuteString(source, file_name, false, true)) {
+          OnExit();
+          return 1;
+        }
+        i++;
+      } else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
+        int size = 0;
+        const char* files = ReadChars(argv[++i], &size);
+        if (files == NULL) return 1;
+        ShellThread* thread =
+            new ShellThread(threads.length(),
+                            i::Vector<const char>(files, size));
+        thread->Start();
+        threads.Add(thread);
+      } else {
+        // Use all other arguments as names of files to load and run.
+        HandleScope handle_scope;
+        Handle<String> file_name = v8::String::New(str);
+        Handle<String> source = ReadFile(str);
+        if (source.IsEmpty()) {
+          printf("Error reading '%s'\n", str);
+          return 1;
+        }
+        if (!ExecuteString(source, file_name, false, true)) {
+          OnExit();
+          return 1;
+        }
+      }
+    }
+
+    // Start preemption if threads have been created and preemption is enabled.
+    if (threads.length() > 0 && use_preemption) {
+      Locker::StartPreemption(preemption_interval);
+    }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    // Run the remote debugger if requested.
+    if (i::FLAG_remote_debugger) {
+      RunRemoteDebugger(i::FLAG_debugger_port);
+      return 0;
+    }
+#endif
+  }
+  if (run_shell)
+    RunShell();
+  for (int i = 0; i < threads.length(); i++) {
+    i::Thread* thread = threads[i];
+    thread->Join();
+    delete thread;
+  }
+  OnExit();
+  return 0;
+}
+
+
+}  // namespace v8
+
+
+int main(int argc, char* argv[]) {
+  return v8::Shell::Main(argc, argv);
+}
diff --git a/src/d8.h b/src/d8.h
new file mode 100644
index 0000000..c93ea46
--- /dev/null
+++ b/src/d8.h
@@ -0,0 +1,225 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_D8_H_
+#define V8_D8_H_
+
+#include "v8.h"
+#include "hashmap.h"
+
+
+namespace v8 {
+
+
+namespace i = v8::internal;
+
+
+// A single counter in a counter collection.
+class Counter {
+ public:
+  static const int kMaxNameSize = 64;
+  int32_t* Bind(const char* name, bool histogram);
+  int32_t* ptr() { return &count_; }
+  int32_t count() { return count_; }
+  int32_t sample_total() { return sample_total_; }
+  bool is_histogram() { return is_histogram_; }
+  void AddSample(int32_t sample);
+ private:
+  int32_t count_;
+  int32_t sample_total_;
+  bool is_histogram_;
+  uint8_t name_[kMaxNameSize];
+};
+
+
+// A set of counters and associated information.  An instance of this
+// class is stored directly in the memory-mapped counters file if
+// the --map-counters options is used
+class CounterCollection {
+ public:
+  CounterCollection();
+  Counter* GetNextCounter();
+ private:
+  static const unsigned kMaxCounters = 256;
+  uint32_t magic_number_;
+  uint32_t max_counters_;
+  uint32_t max_name_size_;
+  uint32_t counters_in_use_;
+  Counter counters_[kMaxCounters];
+};
+
+
+class CounterMap {
+ public:
+  CounterMap(): hash_map_(Match) { }
+  Counter* Lookup(const char* name) {
+    i::HashMap::Entry* answer = hash_map_.Lookup(
+        const_cast<char*>(name),
+        Hash(name),
+        false);
+    if (!answer) return NULL;
+    return reinterpret_cast<Counter*>(answer->value);
+  }
+  void Set(const char* name, Counter* value) {
+    i::HashMap::Entry* answer = hash_map_.Lookup(
+        const_cast<char*>(name),
+        Hash(name),
+        true);
+    ASSERT(answer != NULL);
+    answer->value = value;
+  }
+  class Iterator {
+   public:
+    explicit Iterator(CounterMap* map)
+        : map_(&map->hash_map_), entry_(map_->Start()) { }
+    void Next() { entry_ = map_->Next(entry_); }
+    bool More() { return entry_ != NULL; }
+    const char* CurrentKey() { return static_cast<const char*>(entry_->key); }
+    Counter* CurrentValue() { return static_cast<Counter*>(entry_->value); }
+   private:
+    i::HashMap* map_;
+    i::HashMap::Entry* entry_;
+  };
+ private:
+  static int Hash(const char* name);
+  static bool Match(void* key1, void* key2);
+  i::HashMap hash_map_;
+};
+
+
+class Shell: public i::AllStatic {
+ public:
+  static bool ExecuteString(Handle<String> source,
+                            Handle<Value> name,
+                            bool print_result,
+                            bool report_exceptions);
+  static void ReportException(TryCatch* try_catch);
+  static void Initialize();
+  static void OnExit();
+  static int* LookupCounter(const char* name);
+  static void* CreateHistogram(const char* name,
+                               int min,
+                               int max,
+                               size_t buckets);
+  static void AddHistogramSample(void* histogram, int sample);
+  static void MapCounters(const char* name);
+  static Handle<String> ReadFile(const char* name);
+  static void RunShell();
+  static int Main(int argc, char* argv[]);
+  static Handle<Array> GetCompletions(Handle<String> text,
+                                      Handle<String> full);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  static Handle<Object> DebugMessageDetails(Handle<String> message);
+  static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
+#endif
+
+  static Handle<Value> Print(const Arguments& args);
+  static Handle<Value> Write(const Arguments& args);
+  static Handle<Value> Yield(const Arguments& args);
+  static Handle<Value> Quit(const Arguments& args);
+  static Handle<Value> Version(const Arguments& args);
+  static Handle<Value> Read(const Arguments& args);
+  static Handle<Value> ReadLine(const Arguments& args);
+  static Handle<Value> Load(const Arguments& args);
+  // The OS object on the global object contains methods for performing
+  // operating system calls:
+  //
+  // os.system("program_name", ["arg1", "arg2", ...], timeout1, timeout2) will
+  // run the command, passing the arguments to the program.  The standard output
+  // of the program will be picked up and returned as a multiline string.  If
+  // timeout1 is present then it should be a number.  -1 indicates no timeout
+  // and a positive number is used as a timeout in milliseconds that limits the
+  // time spent waiting between receiving output characters from the program.
+  // timeout2, if present, should be a number indicating the limit in
+  // milliseconds on the total running time of the program.  Exceptions are
+  // thrown on timeouts or other errors or if the exit status of the program
+  // indicates an error.
+  //
+  // os.chdir(dir) changes directory to the given directory.  Throws an
+  // exception/ on error.
+  //
+  // os.setenv(variable, value) sets an environment variable.  Repeated calls to
+  // this method leak memory due to the API of setenv in the standard C library.
+  //
+  // os.umask(alue) calls the umask system call and returns the old umask.
+  //
+  // os.mkdirp(name, mask) creates a directory.  The mask (if present) is anded
+  // with the current umask.  Intermediate directories are created if necessary.
+  // An exception is not thrown if the directory already exists.  Analogous to
+  // the "mkdir -p" command.
+  static Handle<Value> OSObject(const Arguments& args);
+  static Handle<Value> System(const Arguments& args);
+  static Handle<Value> ChangeDirectory(const Arguments& args);
+  static Handle<Value> SetEnvironment(const Arguments& args);
+  static Handle<Value> SetUMask(const Arguments& args);
+  static Handle<Value> MakeDirectory(const Arguments& args);
+  static Handle<Value> RemoveDirectory(const Arguments& args);
+
+  static void AddOSMethods(Handle<ObjectTemplate> os_template);
+
+  static Handle<Context> utility_context() { return utility_context_; }
+
+  static const char* kHistoryFileName;
+  static const char* kPrompt;
+ private:
+  static Persistent<Context> utility_context_;
+  static Persistent<Context> evaluation_context_;
+  static CounterMap* counter_map_;
+  // We statically allocate a set of local counters to be used if we
+  // don't want to store the stats in a memory-mapped file
+  static CounterCollection local_counters_;
+  static CounterCollection* counters_;
+  static i::OS::MemoryMappedFile* counters_file_;
+  static Counter* GetCounter(const char* name, bool is_histogram);
+};
+
+
+class LineEditor {
+ public:
+  enum Type { DUMB = 0, READLINE = 1 };
+  LineEditor(Type type, const char* name);
+  virtual ~LineEditor() { }
+
+  virtual i::SmartPointer<char> Prompt(const char* prompt) = 0;
+  virtual bool Open() { return true; }
+  virtual bool Close() { return true; }
+  virtual void AddHistory(const char* str) { }
+
+  const char* name() { return name_; }
+  static LineEditor* Get();
+ private:
+  Type type_;
+  const char* name_;
+  LineEditor* next_;
+  static LineEditor* first_;
+};
+
+
+}  // namespace v8
+
+
+#endif  // V8_D8_H_
diff --git a/src/d8.js b/src/d8.js
new file mode 100644
index 0000000..be4a051
--- /dev/null
+++ b/src/d8.js
@@ -0,0 +1,1625 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+String.prototype.startsWith = function (str) {
+  if (str.length > this.length)
+    return false;
+  return this.substr(0, str.length) == str;
+}
+
+function log10(num) {
+  return Math.log(num)/Math.log(10);
+}
+
+function ToInspectableObject(obj) {
+  if (!obj && typeof obj === 'object') {
+    return void 0;
+  } else {
+    return Object(obj);
+  }
+}
+
+function GetCompletions(global, last, full) {
+  var full_tokens = full.split();
+  full = full_tokens.pop();
+  var parts = full.split('.');
+  parts.pop();
+  var current = global;
+  for (var i = 0; i < parts.length; i++) {
+    var part = parts[i];
+    var next = current[part];
+    if (!next)
+      return [];
+    current = next;
+  }
+  var result = [];
+  current = ToInspectableObject(current);
+  while (typeof current !== 'undefined') {
+    var mirror = new $debug.ObjectMirror(current);
+    var properties = mirror.properties();
+    for (var i = 0; i < properties.length; i++) {
+      var name = properties[i].name();
+      if (typeof name === 'string' && name.startsWith(last))
+        result.push(name);
+    }
+    current = ToInspectableObject(current.__proto__);
+  }
+  return result;
+}
+
+
+// Global object holding debugger related constants and state.
+const Debug = {};
+
+
+// Debug events which can occour in the V8 JavaScript engine. These originate
+// from the API include file v8-debug.h.
+Debug.DebugEvent = { Break: 1,
+                     Exception: 2,
+                     NewFunction: 3,
+                     BeforeCompile: 4,
+                     AfterCompile: 5 };
+
+
+// The different types of scripts matching enum ScriptType in objects.h.
+Debug.ScriptType = { Native: 0,
+                     Extension: 1,
+                     Normal: 2 };
+
+
+// The different types of script compilations matching enum
+// Script::CompilationType in objects.h.
+Debug.ScriptCompilationType = { Host: 0,
+                                Eval: 1,
+                                JSON: 2 };
+
+
+// The different types of scopes matching constants runtime.cc.
+Debug.ScopeType = { Global: 0,
+                    Local: 1,
+                    With: 2,
+                    Closure: 3,
+                    Catch: 4 };
+
+
+// Current debug state.
+const kNoFrame = -1;
+Debug.State = {
+  currentFrame: kNoFrame,
+  currentSourceLine: -1
+}
+var trace_compile = false;  // Tracing all compile events?
+
+
+// Process a debugger JSON message into a display text and a running status.
+// This function returns an object with properties "text" and "running" holding
+// this information.
+function DebugMessageDetails(message) {
+  // Convert the JSON string to an object.
+  var response = new ProtocolPackage(message);
+
+  if (response.type() == 'event') {
+    return DebugEventDetails(response);
+  } else {
+    return DebugResponseDetails(response);
+  }
+}
+
+function DebugEventDetails(response) {
+  details = {text:'', running:false}
+
+  // Get the running state.
+  details.running = response.running();
+
+  var body = response.body();
+  var result = '';
+  switch (response.event()) {
+    case 'break':
+      if (body.breakpoints) {
+        result += 'breakpoint';
+        if (body.breakpoints.length > 1) {
+          result += 's';
+        }
+        result += ' #';
+        for (var i = 0; i < body.breakpoints.length; i++) {
+          if (i > 0) {
+            result += ', #';
+          }
+          result += body.breakpoints[i];
+        }
+      } else {
+        result += 'break';
+      }
+      result += ' in ';
+      result += body.invocationText;
+      result += ', ';
+      result += SourceInfo(body);
+      result += '\n';
+      result += SourceUnderline(body.sourceLineText, body.sourceColumn);
+      Debug.State.currentSourceLine = body.sourceLine;
+      Debug.State.currentFrame = 0;
+      details.text = result;
+      break;
+      
+    case 'exception':
+      if (body.uncaught) {
+        result += 'Uncaught: ';
+      } else {
+        result += 'Exception: ';
+      }
+      result += '"';
+      result += body.exception.text;
+      result += '"';
+      if (body.sourceLine >= 0) {
+        result += ', ';
+        result += SourceInfo(body);
+        result += '\n';
+        result += SourceUnderline(body.sourceLineText, body.sourceColumn);
+        Debug.State.currentSourceLine = body.sourceLine;
+        Debug.State.currentFrame = 0;
+      } else {
+        result += ' (empty stack)';
+        Debug.State.currentSourceLine = -1;
+        Debug.State.currentFrame = kNoFrame;
+      }
+      details.text = result;
+      break;
+
+    case 'afterCompile':
+      if (trace_compile) {
+        result = 'Source ' + body.script.name + ' compiled:\n'
+        var source = body.script.source;
+        if (!(source[source.length - 1] == '\n')) {
+          result += source;
+        } else {
+          result += source.substring(0, source.length - 1);
+        }
+      }
+      details.text = result;
+      break;
+
+    default:
+      details.text = 'Unknown debug event ' + response.event();
+  }
+
+  return details;
+};
+
+
+function SourceInfo(body) {
+  var result = '';
+  
+  if (body.script) {
+    if (body.script.name) {
+      result += body.script.name;
+    } else {
+      result += '[unnamed]';
+    }
+  }
+  result += ' line ';
+  result += body.sourceLine + 1;
+  result += ' column ';
+  result += body.sourceColumn + 1;
+  
+  return result;
+}
+
+
+function SourceUnderline(source_text, position) {
+  if (!source_text) {
+    return;
+  }
+
+  // Create an underline with a caret pointing to the source position. If the
+  // source contains a tab character the underline will have a tab character in
+  // the same place otherwise the underline will have a space character.
+  var underline = '';
+  for (var i = 0; i < position; i++) {
+    if (source_text[i] == '\t') {
+      underline += '\t';
+    } else {
+      underline += ' ';
+    }
+  }
+  underline += '^';
+
+  // Return the source line text with the underline beneath.
+  return source_text + '\n' + underline;
+};
+
+
+// Converts a text command to a JSON request.
+function DebugCommandToJSONRequest(cmd_line) {
+  return new DebugRequest(cmd_line).JSONRequest();
+};
+
+
+function DebugRequest(cmd_line) {
+  // If the very first character is a { assume that a JSON request have been
+  // entered as a command. Converting that to a JSON request is trivial.
+  if (cmd_line && cmd_line.length > 0 && cmd_line.charAt(0) == '{') {
+    this.request_ = cmd_line;
+    return;
+  }
+
+  // Trim string for leading and trailing whitespace.
+  cmd_line = cmd_line.replace(/^\s+|\s+$/g, '');
+
+  // Find the command.
+  var pos = cmd_line.indexOf(' ');
+  var cmd;
+  var args;
+  if (pos == -1) {
+    cmd = cmd_line;
+    args = '';
+  } else {
+    cmd = cmd_line.slice(0, pos);
+    args = cmd_line.slice(pos).replace(/^\s+|\s+$/g, '');
+  }
+
+  // Switch on command.
+  switch (cmd) {
+    case 'continue':
+    case 'c':
+      this.request_ = this.continueCommandToJSONRequest_(args);
+      break;
+
+    case 'step':
+    case 's':
+      this.request_ = this.stepCommandToJSONRequest_(args);
+      break;
+
+    case 'backtrace':
+    case 'bt':
+      this.request_ = this.backtraceCommandToJSONRequest_(args);
+      break;
+      
+    case 'frame':
+    case 'f':
+      this.request_ = this.frameCommandToJSONRequest_(args);
+      break;
+      
+    case 'scopes':
+      this.request_ = this.scopesCommandToJSONRequest_(args);
+      break;
+      
+    case 'scope':
+      this.request_ = this.scopeCommandToJSONRequest_(args);
+      break;
+      
+    case 'print':
+    case 'p':
+      this.request_ = this.printCommandToJSONRequest_(args);
+      break;
+
+    case 'dir':
+      this.request_ = this.dirCommandToJSONRequest_(args);
+      break;
+
+    case 'references':
+      this.request_ = this.referencesCommandToJSONRequest_(args);
+      break;
+
+    case 'instances':
+      this.request_ = this.instancesCommandToJSONRequest_(args);
+      break;
+
+    case 'source':
+      this.request_ = this.sourceCommandToJSONRequest_(args);
+      break;
+      
+    case 'scripts':
+      this.request_ = this.scriptsCommandToJSONRequest_(args);
+      break;
+      
+    case 'break':
+    case 'b':
+      this.request_ = this.breakCommandToJSONRequest_(args);
+      break;
+      
+    case 'clear':
+      this.request_ = this.clearCommandToJSONRequest_(args);
+      break;
+
+    case 'threads':
+      this.request_ = this.threadsCommandToJSONRequest_(args);
+      break;
+
+    case 'trace':
+      // Return undefined to indicate command handled internally (no JSON).
+      this.request_ = void 0;
+      this.traceCommand_(args);
+      break;
+
+    case 'help':
+    case '?':
+      this.helpCommand_(args);
+      // Return undefined to indicate command handled internally (no JSON).
+      this.request_ = void 0;
+      break;
+
+    default:
+      throw new Error('Unknown command "' + cmd + '"');
+  }
+  
+  last_cmd = cmd;
+}
+
+DebugRequest.prototype.JSONRequest = function() {
+  return this.request_;
+}
+
+
+function RequestPacket(command) {
+  this.seq = 0;
+  this.type = 'request';
+  this.command = command;
+}
+
+
+RequestPacket.prototype.toJSONProtocol = function() {
+  // Encode the protocol header.
+  var json = '{';
+  json += '"seq":' + this.seq;
+  json += ',"type":"' + this.type + '"';
+  if (this.command) {
+    json += ',"command":' + StringToJSON_(this.command);
+  }
+  if (this.arguments) {
+    json += ',"arguments":';
+    // Encode the arguments part.
+    if (this.arguments.toJSONProtocol) {
+      json += this.arguments.toJSONProtocol()
+    } else {
+      json += SimpleObjectToJSON_(this.arguments);
+    }
+  }
+  json += '}';
+  return json;
+}
+
+
+DebugRequest.prototype.createRequest = function(command) {
+  return new RequestPacket(command);
+};
+
+
+// Create a JSON request for the evaluation command.
+DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
+  // Global varaible used to store whether a handle was requested.
+  lookup_handle = null;
+  // Check if the expression is a handle id in the form #<handle>#.
+  var handle_match = expression.match(/^#([0-9]*)#$/);
+  if (handle_match) {
+    // Remember the handle requested in a global variable.
+    lookup_handle = parseInt(handle_match[1]);
+    // Build a lookup request.
+    var request = this.createRequest('lookup');
+    request.arguments = {};
+    request.arguments.handles = [ lookup_handle ];
+    return request.toJSONProtocol();
+  } else {
+    // Build an evaluate request.
+    var request = this.createRequest('evaluate');
+    request.arguments = {};
+    request.arguments.expression = expression;
+    // Request a global evaluation if there is no current frame.
+    if (Debug.State.currentFrame == kNoFrame) {
+      request.arguments.global = true;
+    }
+    return request.toJSONProtocol();
+  }
+};
+
+
+// Create a JSON request for the references/instances command.
+DebugRequest.prototype.makeReferencesJSONRequest_ = function(handle, type) {
+  // Build a references request.
+  var handle_match = handle.match(/^#([0-9]*)#$/);
+  if (handle_match) {
+    var request = this.createRequest('references');
+    request.arguments = {};
+    request.arguments.type = type;
+    request.arguments.handle = parseInt(handle_match[1]);
+    return request.toJSONProtocol();
+  } else {
+    throw new Error('Invalid object id.');
+  }
+};
+
+
+// Create a JSON request for the continue command.
+DebugRequest.prototype.continueCommandToJSONRequest_ = function(args) {
+  var request = this.createRequest('continue');
+  return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the step command.
+DebugRequest.prototype.stepCommandToJSONRequest_ = function(args) {
+  // Requesting a step is through the continue command with additional
+  // arguments.
+  var request = this.createRequest('continue');
+  request.arguments = {};
+
+  // Process arguments if any.
+  if (args && args.length > 0) {
+    args = args.split(/\s*[ ]+\s*/g);
+
+    if (args.length > 2) {
+      throw new Error('Invalid step arguments.');
+    }
+
+    if (args.length > 0) {
+      // Get step count argument if any.
+      if (args.length == 2) {
+        var stepcount = parseInt(args[1]);
+        if (isNaN(stepcount) || stepcount <= 0) {
+          throw new Error('Invalid step count argument "' + args[0] + '".');
+        }
+        request.arguments.stepcount = stepcount;
+      }
+
+      // Get the step action.
+      switch (args[0]) {
+        case 'in':
+        case 'i':
+          request.arguments.stepaction = 'in';
+          break;
+          
+        case 'min':
+        case 'm':
+          request.arguments.stepaction = 'min';
+          break;
+          
+        case 'next':
+        case 'n':
+          request.arguments.stepaction = 'next';
+          break;
+          
+        case 'out':
+        case 'o':
+          request.arguments.stepaction = 'out';
+          break;
+          
+        default:
+          throw new Error('Invalid step argument "' + args[0] + '".');
+      }
+    }
+  } else {
+    // Default is step next.
+    request.arguments.stepaction = 'next';
+  }
+
+  return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the backtrace command.
+DebugRequest.prototype.backtraceCommandToJSONRequest_ = function(args) {
+  // Build a backtrace request from the text command.
+  var request = this.createRequest('backtrace');
+  
+  // Default is to show top 10 frames.
+  request.arguments = {};
+  request.arguments.fromFrame = 0;
+  request.arguments.toFrame = 10;
+
+  args = args.split(/\s*[ ]+\s*/g);
+  if (args.length == 1 && args[0].length > 0) {
+    var frameCount = parseInt(args[0]);
+    if (frameCount > 0) {
+      // Show top frames.
+      request.arguments.fromFrame = 0;
+      request.arguments.toFrame = frameCount;
+    } else {
+      // Show bottom frames.
+      request.arguments.fromFrame = 0;
+      request.arguments.toFrame = -frameCount;
+      request.arguments.bottom = true;
+    }
+  } else if (args.length == 2) {
+    var fromFrame = parseInt(args[0]);
+    var toFrame = parseInt(args[1]);
+    if (isNaN(fromFrame) || fromFrame < 0) {
+      throw new Error('Invalid start frame argument "' + args[0] + '".');
+    }
+    if (isNaN(toFrame) || toFrame < 0) {
+      throw new Error('Invalid end frame argument "' + args[1] + '".');
+    }
+    if (fromFrame > toFrame) {
+      throw new Error('Invalid arguments start frame cannot be larger ' +
+                      'than end frame.');
+    }
+    // Show frame range.
+    request.arguments.fromFrame = fromFrame;
+    request.arguments.toFrame = toFrame + 1;
+  } else if (args.length > 2) {
+    throw new Error('Invalid backtrace arguments.');
+  }
+
+  return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the frame command.
+DebugRequest.prototype.frameCommandToJSONRequest_ = function(args) {
+  // Build a frame request from the text command.
+  var request = this.createRequest('frame');
+  args = args.split(/\s*[ ]+\s*/g);
+  if (args.length > 0 && args[0].length > 0) {
+    request.arguments = {};
+    request.arguments.number = args[0];
+  }
+  return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the scopes command.
+DebugRequest.prototype.scopesCommandToJSONRequest_ = function(args) {
+  // Build a scopes request from the text command.
+  var request = this.createRequest('scopes');
+  return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the scope command.
+DebugRequest.prototype.scopeCommandToJSONRequest_ = function(args) {
+  // Build a scope request from the text command.
+  var request = this.createRequest('scope');
+  args = args.split(/\s*[ ]+\s*/g);
+  if (args.length > 0 && args[0].length > 0) {
+    request.arguments = {};
+    request.arguments.number = args[0];
+  }
+  return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the print command.
+DebugRequest.prototype.printCommandToJSONRequest_ = function(args) {
+  // Build an evaluate request from the text command.
+  if (args.length == 0) {
+    throw new Error('Missing expression.');
+  }
+  return this.makeEvaluateJSONRequest_(args);
+};
+
+
+// Create a JSON request for the dir command.
+DebugRequest.prototype.dirCommandToJSONRequest_ = function(args) {
+  // Build an evaluate request from the text command.
+  if (args.length == 0) {
+    throw new Error('Missing expression.');
+  }
+  return this.makeEvaluateJSONRequest_(args);
+};
+
+
+// Create a JSON request for the references command.
+DebugRequest.prototype.referencesCommandToJSONRequest_ = function(args) {
+  // Build an evaluate request from the text command.
+  if (args.length == 0) {
+    throw new Error('Missing object id.');
+  }
+  
+  return this.makeReferencesJSONRequest_(args, 'referencedBy');
+};
+
+
+// Create a JSON request for the instances command.
+DebugRequest.prototype.instancesCommandToJSONRequest_ = function(args) {
+  // Build an evaluate request from the text command.
+  if (args.length == 0) {
+    throw new Error('Missing object id.');
+  }
+  
+  // Build a references request.
+  return this.makeReferencesJSONRequest_(args, 'constructedBy');
+};
+
+
+// Create a JSON request for the source command.
+DebugRequest.prototype.sourceCommandToJSONRequest_ = function(args) {
+  // Build a evaluate request from the text command.
+  var request = this.createRequest('source');
+
+  // Default is ten lines starting five lines before the current location.
+  var from = Debug.State.currentSourceLine - 5;
+  var lines = 10;
+
+  // Parse the arguments.
+  args = args.split(/\s*[ ]+\s*/g);
+  if (args.length > 1 && args[0].length > 0 && args[1].length > 0) {
+    from = parseInt(args[0]) - 1;
+    lines = parseInt(args[1]);
+  } else if (args.length > 0 && args[0].length > 0) {
+    from = parseInt(args[0]) - 1;
+  }
+
+  if (from < 0) from = 0;
+  if (lines < 0) lines = 10;
+
+  // Request source arround current source location.
+  request.arguments = {};
+  request.arguments.fromLine = from;
+  request.arguments.toLine = from + lines;
+
+  return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the scripts command.
+DebugRequest.prototype.scriptsCommandToJSONRequest_ = function(args) {
+  // Build a evaluate request from the text command.
+  var request = this.createRequest('scripts');
+
+  // Process arguments if any.
+  if (args && args.length > 0) {
+    args = args.split(/\s*[ ]+\s*/g);
+
+    if (args.length > 1) {
+      throw new Error('Invalid scripts arguments.');
+    }
+
+    request.arguments = {};
+    switch (args[0]) {
+      case 'natives':
+        request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Native);
+        break;
+        
+      case 'extensions':
+        request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Extension);
+        break;
+        
+      case 'all':
+        request.arguments.types =
+            ScriptTypeFlag(Debug.ScriptType.Normal) |
+            ScriptTypeFlag(Debug.ScriptType.Native) |
+            ScriptTypeFlag(Debug.ScriptType.Extension);
+        break;
+        
+      default:
+        throw new Error('Invalid argument "' + args[0] + '".');
+    }
+  }
+
+  return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the break command.
+DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
+  // Build a evaluate request from the text command.
+  var request = this.createRequest('setbreakpoint');
+
+  // Process arguments if any.
+  if (args && args.length > 0) {
+    var target = args;
+    var type = 'function';
+    var line;
+    var column;
+    var condition;
+    var pos;
+
+    // Check for breakpoint condition.
+    pos = args.indexOf(' ');
+    if (pos > 0) {
+      target = args.substring(0, pos);
+      condition = args.substring(pos + 1, args.length);
+    }
+
+    // Check for script breakpoint (name:line[:column]). If no ':' in break
+    // specification it is considered a function break point.
+    pos = target.indexOf(':');
+    if (pos > 0) {
+      type = 'script';
+      var tmp = target.substring(pos + 1, target.length);
+      target = target.substring(0, pos);
+      
+      // Check for both line and column.
+      pos = tmp.indexOf(':');
+      if (pos > 0) {
+        column = parseInt(tmp.substring(pos + 1, tmp.length)) - 1;
+        line = parseInt(tmp.substring(0, pos)) - 1;
+      } else {
+        line = parseInt(tmp) - 1;
+      }
+    } else if (target[0] == '#' && target[target.length - 1] == '#') {
+      type = 'handle';
+      target = target.substring(1, target.length - 1);
+    } else {
+      type = 'function';
+    }
+  
+    request.arguments = {};
+    request.arguments.type = type;
+    request.arguments.target = target;
+    request.arguments.line = line;
+    request.arguments.column = column;
+    request.arguments.condition = condition;
+  } else {
+    throw new Error('Invalid break arguments.');
+  }
+
+  return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the clear command.
+DebugRequest.prototype.clearCommandToJSONRequest_ = function(args) {
+  // Build a evaluate request from the text command.
+  var request = this.createRequest('clearbreakpoint');
+
+  // Process arguments if any.
+  if (args && args.length > 0) {
+    request.arguments = {};
+    request.arguments.breakpoint = parseInt(args);
+  } else {
+    throw new Error('Invalid break arguments.');
+  }
+
+  return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the threads command.
+DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) {
+  // Build a threads request from the text command.
+  var request = this.createRequest('threads');
+  return request.toJSONProtocol();
+};
+
+
+// Handle the trace command.
+DebugRequest.prototype.traceCommand_ = function(args) {
+  // Process arguments.
+  if (args && args.length > 0) {
+    if (args == 'compile') {
+      trace_compile = !trace_compile;
+      print('Tracing of compiled scripts ' + (trace_compile ? 'on' : 'off'));
+    } else {
+      throw new Error('Invalid trace arguments.');
+    }
+  } else {
+    throw new Error('Invalid trace arguments.');
+  }
+}
+
+// Handle the help command.
+DebugRequest.prototype.helpCommand_ = function(args) {
+  // Help os quite simple.
+  if (args && args.length > 0) {
+    print('warning: arguments to \'help\' are ignored');
+  }
+
+  print('break location [condition]');
+  print('  break on named function: location is a function name');
+  print('  break on function: location is #<id>#');
+  print('  break on script position: location is name:line[:column]');
+  print('clear <breakpoint #>');
+  print('backtrace [n] | [-n] | [from to]');
+  print('frame <frame #>');
+  print('scopes');
+  print('scope <scope #>');
+  print('step [in | next | out| min [step count]]');
+  print('print <expression>');
+  print('dir <expression>');
+  print('source [from line [num lines]]');
+  print('scripts');
+  print('continue');
+  print('trace compile');
+  print('help');
+}
+
+
+function formatHandleReference_(value) {
+  if (value.handle() >= 0) {
+    return '#' + value.handle() + '#';
+  } else {
+    return '#Transient#';
+  }
+}
+
+
+function formatObject_(value, include_properties) {
+  var result = '';
+  result += formatHandleReference_(value);
+  result += ', type: object'
+  result += ', constructor ';
+  var ctor = value.constructorFunctionValue();
+  result += formatHandleReference_(ctor);
+  result += ', __proto__ ';
+  var proto = value.protoObjectValue();
+  result += formatHandleReference_(proto);
+  result += ', ';
+  result += value.propertyCount();
+  result +=  ' properties.';
+  if (include_properties) {
+    result +=  '\n';
+    for (var i = 0; i < value.propertyCount(); i++) {
+      result += '  ';
+      result += value.propertyName(i);
+      result += ': ';
+      var property_value = value.propertyValue(i);
+      if (property_value instanceof ProtocolReference) {
+        result += '<no type>';
+      } else {
+        if (property_value && property_value.type()) {
+          result += property_value.type();
+        } else {
+          result += '<no type>';
+        }
+      }
+      result += ' ';
+      result += formatHandleReference_(property_value);
+      result += '\n';
+    }
+  }
+  return result;
+}
+
+
+function formatScope_(scope) {
+  var result = '';
+  var index = scope.index;
+  result += '#' + (index <= 9 ? '0' : '') + index;
+  result += ' ';
+  switch (scope.type) {
+    case Debug.ScopeType.Global:
+      result += 'Global, ';
+      result += '#' + scope.object.ref + '#';
+      break;
+    case Debug.ScopeType.Local:
+      result += 'Local';
+      break;
+    case Debug.ScopeType.With:
+      result += 'With, ';
+      result += '#' + scope.object.ref + '#';
+      break;
+    case Debug.ScopeType.Catch:
+      result += 'Catch, ';
+      result += '#' + scope.object.ref + '#';
+      break;
+    case Debug.ScopeType.Closure:
+      result += 'Closure';
+      break;
+    default:
+      result += 'UNKNOWN';
+  }
+  return result;
+}
+
+
+// Convert a JSON response to text for display in a text based debugger.
+function DebugResponseDetails(response) {
+  details = {text:'', running:false}
+
+  try {
+    if (!response.success()) {
+      details.text = response.message();
+      return details;
+    }
+
+    // Get the running state.
+    details.running = response.running();
+
+    var body = response.body();
+    var result = '';
+    switch (response.command()) {
+      case 'setbreakpoint':
+        result = 'set breakpoint #';
+        result += body.breakpoint;
+        details.text = result;
+        break;
+        
+      case 'clearbreakpoint':
+        result = 'cleared breakpoint #';
+        result += body.breakpoint;
+        details.text = result;
+        break;
+        
+      case 'backtrace':
+        if (body.totalFrames == 0) {
+          result = '(empty stack)';
+        } else {
+          var result = 'Frames #' + body.fromFrame + ' to #' +
+              (body.toFrame - 1) + ' of ' + body.totalFrames + '\n';
+          for (i = 0; i < body.frames.length; i++) {
+            if (i != 0) result += '\n';
+            result += body.frames[i].text;
+          }
+        }
+        details.text = result;
+        break;
+        
+      case 'frame':
+        details.text = SourceUnderline(body.sourceLineText,
+                                       body.column);
+        Debug.State.currentSourceLine = body.line;
+        Debug.State.currentFrame = body.index;
+        break;
+        
+      case 'scopes':
+        if (body.totalScopes == 0) {
+          result = '(no scopes)';
+        } else {
+          result = 'Scopes #' + body.fromScope + ' to #' +
+                   (body.toScope - 1) + ' of ' + body.totalScopes + '\n';
+          for (i = 0; i < body.scopes.length; i++) {
+            if (i != 0) {
+              result += '\n';
+            }
+            result += formatScope_(body.scopes[i]);
+          }
+        }
+        details.text = result;
+        break;
+
+      case 'scope':
+        result += formatScope_(body);
+        result += '\n';
+        var scope_object_value = response.lookup(body.object.ref);
+        result += formatObject_(scope_object_value, true);
+        details.text = result;
+        break;
+      
+      case 'evaluate':
+      case 'lookup':
+        if (last_cmd == 'p' || last_cmd == 'print') {
+          result = body.text;
+        } else {
+          var value;
+          if (lookup_handle) {
+            value = response.bodyValue(lookup_handle);
+          } else {
+            value = response.bodyValue();
+          }
+          if (value.isObject()) {
+            result += formatObject_(value, true);
+          } else {
+            result += 'type: ';
+            result += value.type();
+            if (!value.isUndefined() && !value.isNull()) {
+              result += ', ';
+              if (value.isString()) {
+                result += '"';
+              }
+              result += value.value();
+              if (value.isString()) {
+                result += '"';
+              }
+            }
+            result += '\n';
+          }
+        }
+        details.text = result;
+        break;
+
+      case 'references':
+        var count = body.length;
+        result += 'found ' + count + ' objects';
+        result += '\n';
+        for (var i = 0; i < count; i++) {
+          var value = response.bodyValue(i);
+          result += formatObject_(value, false);
+          result += '\n';
+        }
+        details.text = result;
+        break;
+        
+      case 'source':
+        // Get the source from the response.
+        var source = body.source;
+        var from_line = body.fromLine + 1;
+        var lines = source.split('\n');
+        var maxdigits = 1 + Math.floor(log10(from_line + lines.length));
+        if (maxdigits < 3) {
+          maxdigits = 3;
+        }
+        var result = '';
+        for (var num = 0; num < lines.length; num++) {
+          // Check if there's an extra newline at the end.
+          if (num == (lines.length - 1) && lines[num].length == 0) {
+            break;
+          }
+
+          var current_line = from_line + num;
+          spacer = maxdigits - (1 + Math.floor(log10(current_line)));
+          if (current_line == Debug.State.currentSourceLine + 1) {
+            for (var i = 0; i < maxdigits; i++) {
+              result += '>';
+            }
+            result += '  ';
+          } else {
+            for (var i = 0; i < spacer; i++) {
+              result += ' ';
+            }
+            result += current_line + ': ';
+          }
+          result += lines[num];
+          result += '\n';
+        }
+        details.text = result;
+        break;
+        
+      case 'scripts':
+        var result = '';
+        for (i = 0; i < body.length; i++) {
+          if (i != 0) result += '\n';
+          if (body[i].id) {
+            result += body[i].id;
+          } else {
+            result += '[no id]';
+          }
+          result += ', ';
+          if (body[i].name) {
+            result += body[i].name;
+          } else {
+            if (body[i].compilationType == Debug.ScriptCompilationType.Eval) {
+              result += 'eval from ';
+              var script_value = response.lookup(body[i].evalFromScript.ref);
+              result += ' ' + script_value.field('name');
+              result += ':' + (body[i].evalFromLocation.line + 1);
+              result += ':' + body[i].evalFromLocation.column;
+            } else if (body[i].compilationType ==
+                       Debug.ScriptCompilationType.JSON) {
+              result += 'JSON ';
+            } else {  // body[i].compilation == Debug.ScriptCompilationType.Host
+              result += '[unnamed] ';
+            }
+          }
+          result += ' (lines: ';
+          result += body[i].lineCount;
+          result += ', length: ';
+          result += body[i].sourceLength;
+          if (body[i].type == Debug.ScriptType.Native) {
+            result += ', native';
+          } else if (body[i].type == Debug.ScriptType.Extension) {
+            result += ', extension';
+          }
+          result += '), [';
+          var sourceStart = body[i].sourceStart;
+          if (sourceStart.length > 40) {
+            sourceStart = sourceStart.substring(0, 37) + '...';
+          }
+          result += sourceStart;
+          result += ']';
+        }
+        details.text = result;
+        break;
+
+      case 'threads':
+        var result = 'Active V8 threads: ' + body.totalThreads + '\n';
+        body.threads.sort(function(a, b) { return a.id - b.id; });
+        for (i = 0; i < body.threads.length; i++) {
+          result += body.threads[i].current ? '*' : ' ';
+          result += ' ';
+          result += body.threads[i].id;
+          result += '\n';
+        }
+        details.text = result;
+        break;
+
+      case 'continue':
+        details.text = "(running)";
+        break;
+        
+      default:
+        details.text =
+            'Response for unknown command \'' + response.command + '\'' +
+            ' (' + json_response + ')';
+    }
+  } catch (e) {
+    details.text = 'Error: "' + e + '" formatting response';
+  }
+  
+  return details;
+};
+
+
+/**
+ * Protocol packages send from the debugger.
+ * @param {string} json - raw protocol packet as JSON string.
+ * @constructor
+ */
+function ProtocolPackage(json) {
+  this.packet_ = JSON.parse(json);
+  this.refs_ = [];
+  if (this.packet_.refs) {
+    for (var i = 0; i < this.packet_.refs.length; i++) {
+      this.refs_[this.packet_.refs[i].handle] = this.packet_.refs[i];
+    }
+  }
+}
+
+
+/**
+ * Get the packet type.
+ * @return {String} the packet type
+ */
+ProtocolPackage.prototype.type = function() {
+  return this.packet_.type;
+}
+
+
+/**
+ * Get the packet event.
+ * @return {Object} the packet event
+ */
+ProtocolPackage.prototype.event = function() {
+  return this.packet_.event;
+}
+
+
+/**
+ * Get the packet request sequence.
+ * @return {number} the packet request sequence
+ */
+ProtocolPackage.prototype.requestSeq = function() {
+  return this.packet_.request_seq;
+}
+
+
+/**
+ * Get the packet request sequence.
+ * @return {number} the packet request sequence
+ */
+ProtocolPackage.prototype.running = function() {
+  return this.packet_.running ? true : false;
+}
+
+
+ProtocolPackage.prototype.success = function() {
+  return this.packet_.success ? true : false;
+}
+
+
+ProtocolPackage.prototype.message = function() {
+  return this.packet_.message;
+}
+
+
+ProtocolPackage.prototype.command = function() {
+  return this.packet_.command;
+}
+
+
+ProtocolPackage.prototype.body = function() {
+  return this.packet_.body;
+}
+
+
+ProtocolPackage.prototype.bodyValue = function(index) {
+  if (index != null) {
+    return new ProtocolValue(this.packet_.body[index], this);
+  } else {
+    return new ProtocolValue(this.packet_.body, this);
+  }
+}
+
+
+ProtocolPackage.prototype.body = function() {
+  return this.packet_.body;
+}
+
+
+ProtocolPackage.prototype.lookup = function(handle) {
+  var value = this.refs_[handle];
+  if (value) {
+    return new ProtocolValue(value, this);
+  } else {
+    return new ProtocolReference(handle);
+  }
+}
+
+
+function ProtocolValue(value, packet) {
+  this.value_ = value;
+  this.packet_ = packet;
+}
+
+
+/**
+ * Get the value type.
+ * @return {String} the value type
+ */
+ProtocolValue.prototype.type = function() {
+  return this.value_.type;
+}
+
+
+/**
+ * Get a metadata field from a protocol value. 
+ * @return {Object} the metadata field value
+ */
+ProtocolValue.prototype.field = function(name) {
+  return this.value_[name];
+}
+
+
+/**
+ * Check is the value is a primitive value.
+ * @return {boolean} true if the value is primitive
+ */
+ProtocolValue.prototype.isPrimitive = function() {
+  return this.isUndefined() || this.isNull() || this.isBoolean() ||
+         this.isNumber() || this.isString();
+}
+
+
+/**
+ * Get the object handle.
+ * @return {number} the value handle
+ */
+ProtocolValue.prototype.handle = function() {
+  return this.value_.handle;
+}
+
+
+/**
+ * Check is the value is undefined.
+ * @return {boolean} true if the value is undefined
+ */
+ProtocolValue.prototype.isUndefined = function() {
+  return this.value_.type == 'undefined';
+}
+
+
+/**
+ * Check is the value is null.
+ * @return {boolean} true if the value is null
+ */
+ProtocolValue.prototype.isNull = function() {
+  return this.value_.type == 'null';
+}
+
+
+/**
+ * Check is the value is a boolean.
+ * @return {boolean} true if the value is a boolean
+ */
+ProtocolValue.prototype.isBoolean = function() {
+  return this.value_.type == 'boolean';
+}
+
+
+/**
+ * Check is the value is a number.
+ * @return {boolean} true if the value is a number
+ */
+ProtocolValue.prototype.isNumber = function() {
+  return this.value_.type == 'number';
+}
+
+
+/**
+ * Check is the value is a string.
+ * @return {boolean} true if the value is a string
+ */
+ProtocolValue.prototype.isString = function() {
+  return this.value_.type == 'string';
+}
+
+
+/**
+ * Check is the value is an object.
+ * @return {boolean} true if the value is an object
+ */
+ProtocolValue.prototype.isObject = function() {
+  return this.value_.type == 'object' || this.value_.type == 'function' ||
+         this.value_.type == 'error' || this.value_.type == 'regexp';
+}
+
+
+/**
+ * Get the constructor function
+ * @return {ProtocolValue} constructor function
+ */
+ProtocolValue.prototype.constructorFunctionValue = function() {
+  var ctor = this.value_.constructorFunction;
+  return this.packet_.lookup(ctor.ref);
+}
+
+
+/**
+ * Get the __proto__ value
+ * @return {ProtocolValue} __proto__ value
+ */
+ProtocolValue.prototype.protoObjectValue = function() {
+  var proto = this.value_.protoObject;
+  return this.packet_.lookup(proto.ref);
+}
+
+
+/**
+ * Get the number og properties.
+ * @return {number} the number of properties
+ */
+ProtocolValue.prototype.propertyCount = function() {
+  return this.value_.properties ? this.value_.properties.length : 0;
+}
+
+
+/**
+ * Get the specified property name.
+ * @return {string} property name
+ */
+ProtocolValue.prototype.propertyName = function(index) {
+  var property = this.value_.properties[index];
+  return property.name;
+}
+
+
+/**
+ * Return index for the property name.
+ * @param name The property name to look for
+ * @return {number} index for the property name
+ */
+ProtocolValue.prototype.propertyIndex = function(name) {
+  for (var i = 0; i < this.propertyCount(); i++) {
+    if (this.value_.properties[i].name == name) {
+      return i;
+    }
+  }
+  return null;
+}
+
+
+/**
+ * Get the specified property value.
+ * @return {ProtocolValue} property value
+ */
+ProtocolValue.prototype.propertyValue = function(index) {
+  var property = this.value_.properties[index];
+  return this.packet_.lookup(property.ref);
+}
+
+
+/**
+ * Check is the value is a string.
+ * @return {boolean} true if the value is a string
+ */
+ProtocolValue.prototype.value = function() {
+  return this.value_.value;
+}
+
+
+function ProtocolReference(handle) {
+  this.handle_ = handle;
+}
+
+
+ProtocolReference.prototype.handle = function() {
+  return this.handle_;
+}
+
+
+function MakeJSONPair_(name, value) {
+  return '"' + name + '":' + value;
+}
+
+
+function ArrayToJSONObject_(content) {
+  return '{' + content.join(',') + '}';
+}
+
+
+function ArrayToJSONArray_(content) {
+  return '[' + content.join(',') + ']';
+}
+
+
+function BooleanToJSON_(value) {
+  return String(value); 
+}
+
+
+function NumberToJSON_(value) {
+  return String(value); 
+}
+
+
+// Mapping of some control characters to avoid the \uXXXX syntax for most
+// commonly used control cahracters.
+const ctrlCharMap_ = {
+  '\b': '\\b',
+  '\t': '\\t',
+  '\n': '\\n',
+  '\f': '\\f',
+  '\r': '\\r',
+  '"' : '\\"',
+  '\\': '\\\\'
+};
+
+
+// Regular expression testing for ", \ and control characters (0x00 - 0x1F).
+const ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]');
+
+
+// Regular expression matching ", \ and control characters (0x00 - 0x1F)
+// globally.
+const ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g');
+
+
+/**
+ * Convert a String to its JSON representation (see http://www.json.org/). To
+ * avoid depending on the String object this method calls the functions in
+ * string.js directly and not through the value.
+ * @param {String} value The String value to format as JSON
+ * @return {string} JSON formatted String value
+ */
+function StringToJSON_(value) {
+  // Check for" , \ and control characters (0x00 - 0x1F). No need to call
+  // RegExpTest as ctrlchar is constructed using RegExp.
+  if (ctrlCharTest_.test(value)) {
+    // Replace ", \ and control characters (0x00 - 0x1F).
+    return '"' +
+      value.replace(ctrlCharMatch_, function (char) {
+        // Use charmap if possible.
+        var mapped = ctrlCharMap_[char];
+        if (mapped) return mapped;
+        mapped = char.charCodeAt();
+        // Convert control character to unicode escape sequence.
+        return '\\u00' +
+          '0' + // TODO %NumberToRadixString(Math.floor(mapped / 16), 16) +
+          '0' // TODO %NumberToRadixString(mapped % 16, 16);
+      })
+    + '"';
+  }
+
+  // Simple string with no special characters.
+  return '"' + value + '"';
+}
+
+
+/**
+ * Convert a Date to ISO 8601 format. To avoid depending on the Date object
+ * this method calls the functions in date.js directly and not through the
+ * value.
+ * @param {Date} value The Date value to format as JSON
+ * @return {string} JSON formatted Date value
+ */
+function DateToISO8601_(value) {
+  function f(n) {
+    return n < 10 ? '0' + n : n;
+  }
+  function g(n) {
+    return n < 10 ? '00' + n : n < 100 ? '0' + n : n;
+  }
+  return builtins.GetUTCFullYearFrom(value)         + '-' +
+          f(builtins.GetUTCMonthFrom(value) + 1)    + '-' +
+          f(builtins.GetUTCDateFrom(value))         + 'T' +
+          f(builtins.GetUTCHoursFrom(value))        + ':' +
+          f(builtins.GetUTCMinutesFrom(value))      + ':' +
+          f(builtins.GetUTCSecondsFrom(value))      + '.' +
+          g(builtins.GetUTCMillisecondsFrom(value)) + 'Z';
+}
+
+
+/**
+ * Convert a Date to ISO 8601 format. To avoid depending on the Date object
+ * this method calls the functions in date.js directly and not through the
+ * value.
+ * @param {Date} value The Date value to format as JSON
+ * @return {string} JSON formatted Date value
+ */
+function DateToJSON_(value) {
+  return '"' + DateToISO8601_(value) + '"';
+}
+
+
+/**
+ * Convert an Object to its JSON representation (see http://www.json.org/).
+ * This implementation simply runs through all string property names and adds
+ * each property to the JSON representation for some predefined types. For type
+ * "object" the function calls itself recursively unless the object has the
+ * function property "toJSONProtocol" in which case that is used. This is not
+ * a general implementation but sufficient for the debugger. Note that circular
+ * structures will cause infinite recursion.
+ * @param {Object} object The object to format as JSON
+ * @return {string} JSON formatted object value
+ */
+function SimpleObjectToJSON_(object) {
+  var content = [];
+  for (var key in object) {
+    // Only consider string keys.
+    if (typeof key == 'string') {
+      var property_value = object[key];
+
+      // Format the value based on its type.
+      var property_value_json;
+      switch (typeof property_value) {
+        case 'object':
+          if (typeof property_value.toJSONProtocol == 'function') {
+            property_value_json = property_value.toJSONProtocol(true)
+          } else if (property_value.constructor.name == 'Array'){
+            property_value_json = SimpleArrayToJSON_(property_value);
+          } else {
+            property_value_json = SimpleObjectToJSON_(property_value);
+          }
+          break;
+
+        case 'boolean':
+          property_value_json = BooleanToJSON_(property_value);
+          break;
+
+        case 'number':
+          property_value_json = NumberToJSON_(property_value);
+          break;
+
+        case 'string':
+          property_value_json = StringToJSON_(property_value);
+          break;
+
+        default:
+          property_value_json = null;
+      }
+
+      // Add the property if relevant.
+      if (property_value_json) {
+        content.push(StringToJSON_(key) + ':' + property_value_json);
+      }
+    }
+  }
+
+  // Make JSON object representation.
+  return '{' + content.join(',') + '}';
+}
+
+
+/**
+ * Convert an array to its JSON representation. This is a VERY simple
+ * implementation just to support what is needed for the debugger.
+ * @param {Array} arrya The array to format as JSON
+ * @return {string} JSON formatted array value
+ */
+function SimpleArrayToJSON_(array) {
+  // Make JSON array representation.
+  var json = '[';
+  for (var i = 0; i < array.length; i++) {
+    if (i != 0) {
+      json += ',';
+    }
+    var elem = array[i];
+    if (elem.toJSONProtocol) {
+      json += elem.toJSONProtocol(true)
+    } else if (typeof(elem) === 'object')  {
+      json += SimpleObjectToJSON_(elem);
+    } else if (typeof(elem) === 'boolean')  {
+      json += BooleanToJSON_(elem);
+    } else if (typeof(elem) === 'number')  {
+      json += NumberToJSON_(elem);
+    } else if (typeof(elem) === 'string')  {
+      json += StringToJSON_(elem);
+    } else {
+      json += elem;
+    }
+  }
+  json += ']';
+  return json;
+}
diff --git a/src/date-delay.js b/src/date-delay.js
new file mode 100644
index 0000000..0778dc9
--- /dev/null
+++ b/src/date-delay.js
@@ -0,0 +1,1173 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// This file relies on the fact that the following declarations have been made
+// in v8natives.js:
+// const $isFinite = GlobalIsFinite;
+
+// -------------------------------------------------------------------
+
+// This file contains date support implemented in JavaScript.
+
+
+// Keep reference to original values of some global properties.  This
+// has the added benefit that the code in this file is isolated from
+// changes to these properties.
+const $Date = global.Date;
+
+// Helper function to throw error.
+function ThrowDateTypeError() {
+  throw new $TypeError('this is not a Date object.');
+}
+
+// ECMA 262 - 15.9.1.2
+function Day(time) {
+  return FLOOR(time / msPerDay);
+}
+
+
+// ECMA 262 - 5.2
+function Modulo(value, remainder) {
+  var mod = value % remainder;
+  // Guard against returning -0.
+  if (mod == 0) return 0;
+  return mod >= 0 ? mod : mod + remainder;
+}
+
+
+function TimeWithinDay(time) {
+  return Modulo(time, msPerDay);
+}
+
+
+// ECMA 262 - 15.9.1.3
+function DaysInYear(year) {
+  if (year % 4 != 0) return 365;
+  if ((year % 100 == 0) && (year % 400 != 0)) return 365;
+  return 366;
+}
+
+
+function DayFromYear(year) {
+  return 365 * (year-1970)
+      + FLOOR((year-1969)/4)
+      - FLOOR((year-1901)/100)
+      + FLOOR((year-1601)/400);
+}
+
+
+function TimeFromYear(year) {
+  return msPerDay * DayFromYear(year);
+}
+
+
+function YearFromTime(time) {
+  return FromJulianDay(Day(time) + kDayZeroInJulianDay).year;
+}
+
+
+function InLeapYear(time) {
+  return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0;
+}
+
+
+// ECMA 262 - 15.9.1.4
+function MonthFromTime(time) {
+  return FromJulianDay(Day(time) + kDayZeroInJulianDay).month;
+}
+
+
+function DayWithinYear(time) {
+  return Day(time) - DayFromYear(YearFromTime(time));
+}
+
+
+// ECMA 262 - 15.9.1.5
+function DateFromTime(time) {
+  return FromJulianDay(Day(time) + kDayZeroInJulianDay).date;
+}
+
+
+// ECMA 262 - 15.9.1.9
+function EquivalentYear(year) {
+  // Returns an equivalent year in the range [2008-2035] matching
+  // - leap year.
+  // - week day of first day.
+  var time = TimeFromYear(year);
+  var recent_year = (InLeapYear(time) == 0 ? 1967 : 1956) +
+      (WeekDay(time) * 12) % 28;
+  // Find the year in the range 2008..2037 that is equivalent mod 28.
+  // Add 3*28 to give a positive argument to the modulus operator.
+  return 2008 + (recent_year + 3*28 - 2008) % 28;
+}
+
+
+function EquivalentTime(t) {
+  // The issue here is that some library calls don't work right for dates
+  // that cannot be represented using a non-negative signed 32 bit integer
+  // (measured in whole seconds based on the 1970 epoch).
+  // We solve this by mapping the time to a year with same leap-year-ness
+  // and same starting day for the year.  The ECMAscript specification says
+  // we must do this, but for compatibility with other browsers, we use
+  // the actual year if it is in the range 1970..2037
+  if (t >= 0 && t <= 2.1e12) return t;
+  var day = MakeDay(EquivalentYear(YearFromTime(t)), MonthFromTime(t), DateFromTime(t));
+  return TimeClip(MakeDate(day, TimeWithinDay(t)));
+}
+
+
+// Because computing the DST offset is a pretty expensive operation
+// we keep a cache of last computed offset along with a time interval
+// where we know the cache is valid.
+var DST_offset_cache = {
+  // Cached DST offset.
+  offset: 0,
+  // Time interval where the cached offset is valid.
+  start: 0, end: -1,
+  // Size of next interval expansion.
+  increment: 0
+};
+
+
+// NOTE: The implementation relies on the fact that no time zones have
+// more than one daylight savings offset change per month.
+// If this function is called with NaN it returns NaN.
+function DaylightSavingsOffset(t) {
+  // Load the cache object from the builtins object.
+  var cache = DST_offset_cache;
+
+  // Cache the start and the end in local variables for fast access.
+  var start = cache.start;
+  var end = cache.end;
+
+  if (start <= t) {
+    // If the time fits in the cached interval, return the cached offset.
+    if (t <= end) return cache.offset;
+
+    // Compute a possible new interval end.
+    var new_end = end + cache.increment;
+
+    if (t <= new_end) {
+      var end_offset = %DateDaylightSavingsOffset(EquivalentTime(new_end));
+      if (cache.offset == end_offset) {
+        // If the offset at the end of the new interval still matches
+        // the offset in the cache, we grow the cached time interval
+        // and return the offset.
+        cache.end = new_end;
+        cache.increment = msPerMonth;
+        return end_offset;
+      } else {
+        var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
+        if (offset == end_offset) {
+          // The offset at the given time is equal to the offset at the
+          // new end of the interval, so that means that we've just skipped
+          // the point in time where the DST offset change occurred. Updated
+          // the interval to reflect this and reset the increment.
+          cache.start = t;
+          cache.end = new_end;
+          cache.increment = msPerMonth;
+        } else {
+          // The interval contains a DST offset change and the given time is
+          // before it. Adjust the increment to avoid a linear search for
+          // the offset change point and change the end of the interval.
+          cache.increment /= 3;
+          cache.end = t;
+        }
+        // Update the offset in the cache and return it.
+        cache.offset = offset;
+        return offset;
+      }
+    }
+  }
+
+  // Compute the DST offset for the time and shrink the cache interval
+  // to only contain the time. This allows fast repeated DST offset
+  // computations for the same time.
+  var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
+  cache.offset = offset;
+  cache.start = cache.end = t;
+  cache.increment = msPerMonth;
+  return offset;
+}
+
+
+var timezone_cache_time = $NaN;
+var timezone_cache_timezone;
+
+function LocalTimezone(t) {
+  if (NUMBER_IS_NAN(t)) return "";
+  if (t == timezone_cache_time) {
+    return timezone_cache_timezone;
+  }
+  var timezone = %DateLocalTimezone(EquivalentTime(t));
+  timezone_cache_time = t;
+  timezone_cache_timezone = timezone;
+  return timezone;
+}
+
+
+function WeekDay(time) {
+  return Modulo(Day(time) + 4, 7);
+}
+
+var local_time_offset = %DateLocalTimeOffset();
+
+function LocalTime(time) {
+  if (NUMBER_IS_NAN(time)) return time;
+  return time + local_time_offset + DaylightSavingsOffset(time);
+}
+
+function LocalTimeNoCheck(time) {
+  return time + local_time_offset + DaylightSavingsOffset(time);
+}
+
+
+function UTC(time) {
+  if (NUMBER_IS_NAN(time)) return time;
+  var tmp = time - local_time_offset;
+  return tmp - DaylightSavingsOffset(tmp);
+}
+
+
+// ECMA 262 - 15.9.1.10
+function HourFromTime(time) {
+  return Modulo(FLOOR(time / msPerHour), HoursPerDay);
+}
+
+
+function MinFromTime(time) {
+  return Modulo(FLOOR(time / msPerMinute), MinutesPerHour);
+}
+
+
+function SecFromTime(time) {
+  return Modulo(FLOOR(time / msPerSecond), SecondsPerMinute);
+}
+
+
+function msFromTime(time) {
+  return Modulo(time, msPerSecond);
+}
+
+
+// ECMA 262 - 15.9.1.11
+function MakeTime(hour, min, sec, ms) {
+  if (!$isFinite(hour)) return $NaN;
+  if (!$isFinite(min)) return $NaN;
+  if (!$isFinite(sec)) return $NaN;
+  if (!$isFinite(ms)) return $NaN;
+  return TO_INTEGER(hour) * msPerHour
+      + TO_INTEGER(min) * msPerMinute
+      + TO_INTEGER(sec) * msPerSecond
+      + TO_INTEGER(ms);
+}
+
+
+// ECMA 262 - 15.9.1.12
+function TimeInYear(year) {
+  return DaysInYear(year) * msPerDay;
+}
+
+
+// Compute modified Julian day from year, month, date.
+function ToJulianDay(year, month, date) {
+  var jy = (month > 1) ? year : year - 1;
+  var jm = (month > 1) ? month + 2 : month + 14;
+  var ja = FLOOR(jy / 100);
+  return FLOOR(FLOOR(365.25*jy) + FLOOR(30.6001*jm) + date + 1720995) + 2 - ja + FLOOR(0.25*ja);
+}
+
+var four_year_cycle_table = CalculateDateTable();
+
+
+function CalculateDateTable() {
+  var month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
+  var four_year_cycle_table = new $Array(1461);
+
+  var cumulative = 0;
+  var position = 0;
+  var leap_position = 0;
+  for (var month = 0; month < 12; month++) {
+    var month_bits = month << kMonthShift;
+    var length = month_lengths[month];
+    for (var day = 1; day <= length; day++) {
+      four_year_cycle_table[leap_position] =
+        month_bits + day;
+      four_year_cycle_table[366 + position] =
+        (1 << kYearShift) + month_bits + day;
+      four_year_cycle_table[731 + position] =
+        (2 << kYearShift) + month_bits + day;
+      four_year_cycle_table[1096 + position] =
+        (3 << kYearShift) + month_bits + day;
+      leap_position++;
+      position++;
+    }
+    if (month == 1) {
+      four_year_cycle_table[leap_position++] = month_bits + 29;
+    }
+  }
+  return four_year_cycle_table;
+}
+
+
+// Constructor for creating objects holding year, month, and date.
+// Introduced to ensure the two return points in FromJulianDay match same map.
+function DayTriplet(year, month, date) {
+  this.year = year;
+  this.month = month;
+  this.date = date;
+}
+
+var julian_day_cache_triplet;
+var julian_day_cache_day = $NaN;
+
+// Compute year, month, and day from modified Julian day.
+// The missing days in 1582 are ignored for JavaScript compatibility.
+function FromJulianDay(julian) {
+  if (julian_day_cache_day == julian) {
+    return julian_day_cache_triplet;
+  }
+  var result;
+  // Avoid floating point and non-Smi maths in common case.  This is also a period of
+  // time where leap years are very regular.  The range is not too large to avoid overflow
+  // when doing the multiply-to-divide trick.
+  if (julian > kDayZeroInJulianDay &&
+      (julian - kDayZeroInJulianDay) < 40177) { // 1970 - 2080
+    var jsimple = (julian - kDayZeroInJulianDay) + 731; // Day 0 is 1st January 1968
+    var y = 1968;
+    // Divide by 1461 by multiplying with 22967 and shifting down by 25!
+    var after_1968 = (jsimple * 22967) >> 25;
+    y += after_1968 << 2;
+    jsimple -= 1461 * after_1968;
+    var four_year_cycle = four_year_cycle_table[jsimple];
+    result = new DayTriplet(y + (four_year_cycle >> kYearShift),
+                            (four_year_cycle & kMonthMask) >> kMonthShift,
+                            four_year_cycle & kDayMask);
+  } else {
+    var jalpha = FLOOR((julian - 1867216.25) / 36524.25);
+    var jb = julian + 1 + jalpha - FLOOR(0.25 * jalpha) + 1524;
+    var jc = FLOOR(6680.0 + ((jb-2439870) - 122.1)/365.25);
+    var jd = FLOOR(365 * jc + (0.25 * jc));
+    var je = FLOOR((jb - jd)/30.6001);
+    var m = je - 1;
+    if (m > 12) m -= 13;
+    var y = jc - 4715;
+    if (m > 2) { --y; --m; }
+    var d = jb - jd - FLOOR(30.6001 * je);
+    result = new DayTriplet(y, m, d);
+  }
+  julian_day_cache_day = julian;
+  julian_day_cache_triplet = result;
+  return result;
+}
+
+
+// Compute number of days given a year, month, date.
+// Note that month and date can lie outside the normal range.
+//   For example:
+//     MakeDay(2007, -4, 20) --> MakeDay(2006, 8, 20)
+//     MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
+//     MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
+function MakeDay(year, month, date) {
+  if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
+
+  // Conversion to integers.
+  year = TO_INTEGER(year);
+  month = TO_INTEGER(month);
+  date = TO_INTEGER(date);
+
+  // Overflow months into year.
+  year = year + FLOOR(month/12);
+  month = month % 12;
+  if (month < 0) {
+    month += 12;
+  }
+
+  // Return days relative to Jan 1 1970.
+  return ToJulianDay(year, month, date) - kDayZeroInJulianDay;
+}
+
+
+// ECMA 262 - 15.9.1.13
+function MakeDate(day, time) {
+  if (!$isFinite(day)) return $NaN;
+  if (!$isFinite(time)) return $NaN;
+  return day * msPerDay + time;
+}
+
+
+// ECMA 262 - 15.9.1.14
+function TimeClip(time) {
+  if (!$isFinite(time)) return $NaN;
+  if ($abs(time) > 8.64E15) return $NaN;
+  return TO_INTEGER(time);
+}
+
+
+// The Date cache is used to limit the cost of parsing the same Date
+// strings over and over again.
+var Date_cache = {
+  // Cached time value.
+  time: $NaN,
+  // Cached year when interpreting the time as a local time. Only
+  // valid when the time matches cached time.
+  year: $NaN,
+  // String input for which the cached time is valid.
+  string: null
+};
+
+
+%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) {
+  if (!%_IsConstructCall()) {
+    // ECMA 262 - 15.9.2
+    return (new $Date()).toString();
+  }
+
+  // ECMA 262 - 15.9.3
+  var argc = %_ArgumentsLength();
+  var value;
+  if (argc == 0) {
+    value = %DateCurrentTime();
+
+  } else if (argc == 1) {
+    if (IS_NUMBER(year)) {
+      value = TimeClip(year);
+
+    } else if (IS_STRING(year)) {
+      // Probe the Date cache. If we already have a time value for the
+      // given time, we re-use that instead of parsing the string again.
+      var cache = Date_cache;
+      if (cache.string === year) {
+        value = cache.time;
+      } else {
+        value = DateParse(year);
+        if (!NUMBER_IS_NAN(value)) {
+          cache.time = value;
+          cache.year = YearFromTime(LocalTimeNoCheck(value));
+          cache.string = year;
+        }
+      }
+
+    } else {
+      // According to ECMA 262, no hint should be given for this
+      // conversion. However, ToPrimitive defaults to STRING_HINT for
+      // Date objects which will lose precision when the Date
+      // constructor is called with another Date object as its
+      // argument. We therefore use NUMBER_HINT for the conversion,
+      // which is the default for everything else than Date objects.
+      // This makes us behave like KJS and SpiderMonkey.
+      var time = ToPrimitive(year, NUMBER_HINT);
+      value = IS_STRING(time) ? DateParse(time) : TimeClip(ToNumber(time));
+    }
+
+  } else {
+    year = ToNumber(year);
+    month = ToNumber(month);
+    date = argc > 2 ? ToNumber(date) : 1;
+    hours = argc > 3 ? ToNumber(hours) : 0;
+    minutes = argc > 4 ? ToNumber(minutes) : 0;
+    seconds = argc > 5 ? ToNumber(seconds) : 0;
+    ms = argc > 6 ? ToNumber(ms) : 0;
+    year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+        ? 1900 + TO_INTEGER(year) : year;
+    var day = MakeDay(year, month, date);
+    var time = MakeTime(hours, minutes, seconds, ms);
+    value = TimeClip(UTC(MakeDate(day, time)));
+  }
+  %_SetValueOf(this, value);
+});
+
+
+// Helper functions.
+function GetTimeFrom(aDate) {
+  return DATE_VALUE(aDate);
+}
+
+
+function GetMillisecondsFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return msFromTime(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCMillisecondsFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return msFromTime(t);
+}
+
+
+function GetSecondsFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return SecFromTime(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCSecondsFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return SecFromTime(t);
+}
+
+
+function GetMinutesFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MinFromTime(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCMinutesFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MinFromTime(t);
+}
+
+
+function GetHoursFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return HourFromTime(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCHoursFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return HourFromTime(t);
+}
+
+
+function GetFullYearFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  var cache = Date_cache;
+  if (cache.time === t) return cache.year;
+  return YearFromTime(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCFullYearFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return YearFromTime(t);
+}
+
+
+function GetMonthFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MonthFromTime(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCMonthFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MonthFromTime(t);
+}
+
+
+function GetDateFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return DateFromTime(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCDateFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return DateFromTime(t);
+}
+
+
+%FunctionSetPrototype($Date, new $Date($NaN));
+
+
+var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
+var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
+
+
+function TwoDigitString(value) {
+  return value < 10 ? "0" + value : "" + value;
+}
+
+
+function DateString(time) {
+  var YMD = FromJulianDay(Day(time) + kDayZeroInJulianDay);
+  return WeekDays[WeekDay(time)] + ' '
+      + Months[YMD.month] + ' '
+      + TwoDigitString(YMD.date) + ' '
+      + YMD.year;
+}
+
+
+var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'];
+var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'];
+
+
+function LongDateString(time) {
+  var YMD = FromJulianDay(Day(time) + kDayZeroInJulianDay);
+  return LongWeekDays[WeekDay(time)] + ', '
+      + LongMonths[YMD.month] + ' '
+      + TwoDigitString(YMD.date) + ', '
+      + YMD.year;
+}
+
+
+function TimeString(time) {
+  return TwoDigitString(HourFromTime(time)) + ':'
+      + TwoDigitString(MinFromTime(time)) + ':'
+      + TwoDigitString(SecFromTime(time));
+}
+
+
+function LocalTimezoneString(time) {
+  var timezoneOffset =
+      (local_time_offset + DaylightSavingsOffset(time)) / msPerMinute;
+  var sign = (timezoneOffset >= 0) ? 1 : -1;
+  var hours = FLOOR((sign * timezoneOffset)/60);
+  var min   = FLOOR((sign * timezoneOffset)%60);
+  var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
+      TwoDigitString(hours) + TwoDigitString(min);
+  return gmt + ' (' +  LocalTimezone(time) + ')';
+}
+
+
+function DatePrintString(time) {
+  return DateString(time) + ' ' + TimeString(time);
+}
+
+// -------------------------------------------------------------------
+
+// Reused output buffer. Used when parsing date strings.
+var parse_buffer = $Array(7);
+
+// ECMA 262 - 15.9.4.2
+function DateParse(string) {
+  var arr = %DateParseString(ToString(string), parse_buffer);
+  if (IS_NULL(arr)) return $NaN;
+
+  var day = MakeDay(arr[0], arr[1], arr[2]);
+  var time = MakeTime(arr[3], arr[4], arr[5], 0);
+  var date = MakeDate(day, time);
+
+  if (IS_NULL(arr[6])) {
+    return TimeClip(UTC(date));
+  } else {
+    return TimeClip(date - arr[6] * 1000);
+  }
+}
+
+
+// ECMA 262 - 15.9.4.3
+function DateUTC(year, month, date, hours, minutes, seconds, ms) {
+  year = ToNumber(year);
+  month = ToNumber(month);
+  var argc = %_ArgumentsLength();
+  date = argc > 2 ? ToNumber(date) : 1;
+  hours = argc > 3 ? ToNumber(hours) : 0;
+  minutes = argc > 4 ? ToNumber(minutes) : 0;
+  seconds = argc > 5 ? ToNumber(seconds) : 0;
+  ms = argc > 6 ? ToNumber(ms) : 0;
+  year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+      ? 1900 + TO_INTEGER(year) : year;
+  var day = MakeDay(year, month, date);
+  var time = MakeTime(hours, minutes, seconds, ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(day, time)));
+}
+
+
+// Mozilla-specific extension. Returns the number of milliseconds
+// elapsed since 1 January 1970 00:00:00 UTC.
+function DateNow() {
+  return %DateCurrentTime();
+}
+
+
+// ECMA 262 - 15.9.5.2
+function DateToString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  return DatePrintString(LocalTimeNoCheck(t)) + LocalTimezoneString(t);
+}
+
+
+// ECMA 262 - 15.9.5.3
+function DateToDateString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  return DateString(LocalTimeNoCheck(t));
+}
+
+
+// ECMA 262 - 15.9.5.4
+function DateToTimeString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  var lt = LocalTimeNoCheck(t);
+  return TimeString(lt) + LocalTimezoneString(lt);
+}
+
+
+// ECMA 262 - 15.9.5.5
+function DateToLocaleString() {
+  return DateToString.call(this);
+}
+
+
+// ECMA 262 - 15.9.5.6
+function DateToLocaleDateString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  return LongDateString(LocalTimeNoCheck(t));
+}
+
+
+// ECMA 262 - 15.9.5.7
+function DateToLocaleTimeString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  var lt = LocalTimeNoCheck(t);
+  return TimeString(lt);
+}
+
+
+// ECMA 262 - 15.9.5.8
+function DateValueOf() {
+  return DATE_VALUE(this);
+}
+
+
+// ECMA 262 - 15.9.5.9
+function DateGetTime() {
+  return DATE_VALUE(this);
+}
+
+
+// ECMA 262 - 15.9.5.10
+function DateGetFullYear() {
+  return GetFullYearFrom(this)
+}
+
+
+// ECMA 262 - 15.9.5.11
+function DateGetUTCFullYear() {
+  return GetUTCFullYearFrom(this)
+}
+
+
+// ECMA 262 - 15.9.5.12
+function DateGetMonth() {
+  return GetMonthFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.13
+function DateGetUTCMonth() {
+  return GetUTCMonthFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.14
+function DateGetDate() {
+  return GetDateFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.15
+function DateGetUTCDate() {
+  return GetUTCDateFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.16
+function DateGetDay() {
+  var t = %_ValueOf(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return WeekDay(LocalTimeNoCheck(t));
+}
+
+
+// ECMA 262 - 15.9.5.17
+function DateGetUTCDay() {
+  var t = %_ValueOf(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return WeekDay(t);
+}
+
+
+// ECMA 262 - 15.9.5.18
+function DateGetHours() {
+  return GetHoursFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.19
+function DateGetUTCHours() {
+  return GetUTCHoursFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.20
+function DateGetMinutes() {
+  return GetMinutesFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.21
+function DateGetUTCMinutes() {
+  return GetUTCMinutesFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.22
+function DateGetSeconds() {
+  return GetSecondsFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.23
+function DateGetUTCSeconds() {
+  return GetUTCSecondsFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.24
+function DateGetMilliseconds() {
+  return GetMillisecondsFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.25
+function DateGetUTCMilliseconds() {
+  return GetUTCMillisecondsFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.26
+function DateGetTimezoneOffset() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return (t - LocalTimeNoCheck(t)) / msPerMinute;
+}
+
+
+// ECMA 262 - 15.9.5.27
+function DateSetTime(ms) {
+  if (!IS_DATE(this)) ThrowDateTypeError();
+  return %_SetValueOf(this, TimeClip(ToNumber(ms)));
+}
+
+
+// ECMA 262 - 15.9.5.28
+function DateSetMilliseconds(ms) {
+  var t = LocalTime(DATE_VALUE(this));
+  ms = ToNumber(ms);
+  var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+}
+
+
+// ECMA 262 - 15.9.5.29
+function DateSetUTCMilliseconds(ms) {
+  var t = DATE_VALUE(this);
+  ms = ToNumber(ms);
+  var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+}
+
+
+// ECMA 262 - 15.9.5.30
+function DateSetSeconds(sec, ms) {
+  var t = LocalTime(DATE_VALUE(this));
+  sec = ToNumber(sec);
+  ms = %_ArgumentsLength() < 2 ? GetMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+}
+
+
+// ECMA 262 - 15.9.5.31
+function DateSetUTCSeconds(sec, ms) {
+  var t = DATE_VALUE(this);
+  sec = ToNumber(sec);
+  ms = %_ArgumentsLength() < 2 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+}
+
+
+// ECMA 262 - 15.9.5.33
+function DateSetMinutes(min, sec, ms) {
+  var t = LocalTime(DATE_VALUE(this));
+  min = ToNumber(min);
+  var argc = %_ArgumentsLength();
+  sec = argc < 2 ? GetSecondsFrom(this) : ToNumber(sec);
+  ms = argc < 3 ? GetMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(HourFromTime(t), min, sec, ms);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+}
+
+
+// ECMA 262 - 15.9.5.34
+function DateSetUTCMinutes(min, sec, ms) {
+  var t = DATE_VALUE(this);
+  min = ToNumber(min);
+  var argc = %_ArgumentsLength();
+  sec = argc < 2 ? GetUTCSecondsFrom(this) : ToNumber(sec);
+  ms = argc < 3 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(HourFromTime(t), min, sec, ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+}
+
+
+// ECMA 262 - 15.9.5.35
+function DateSetHours(hour, min, sec, ms) {
+  var t = LocalTime(DATE_VALUE(this));
+  hour = ToNumber(hour);
+  var argc = %_ArgumentsLength();
+  min = argc < 2 ? GetMinutesFrom(this) : ToNumber(min);
+  sec = argc < 3 ? GetSecondsFrom(this) : ToNumber(sec);
+  ms = argc < 4 ? GetMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(hour, min, sec, ms);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+}
+
+
+// ECMA 262 - 15.9.5.34
+function DateSetUTCHours(hour, min, sec, ms) {
+  var t = DATE_VALUE(this);
+  hour = ToNumber(hour);
+  var argc = %_ArgumentsLength();
+  min = argc < 2 ? GetUTCMinutesFrom(this) : ToNumber(min);
+  sec = argc < 3 ? GetUTCSecondsFrom(this) : ToNumber(sec);
+  ms = argc < 4 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(hour, min, sec, ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+}
+
+
+// ECMA 262 - 15.9.5.36
+function DateSetDate(date) {
+  var t = LocalTime(DATE_VALUE(this));
+  date = ToNumber(date);
+  var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+}
+
+
+// ECMA 262 - 15.9.5.37
+function DateSetUTCDate(date) {
+  var t = DATE_VALUE(this);
+  date = ToNumber(date);
+  var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
+  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+}
+
+
+// ECMA 262 - 15.9.5.38
+function DateSetMonth(month, date) {
+  var t = LocalTime(DATE_VALUE(this));
+  month = ToNumber(month);
+  date = %_ArgumentsLength() < 2 ? GetDateFrom(this) : ToNumber(date);
+  var day = MakeDay(YearFromTime(t), month, date);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+}
+
+
+// ECMA 262 - 15.9.5.39
+function DateSetUTCMonth(month, date) {
+  var t = DATE_VALUE(this);
+  month = ToNumber(month);
+  date = %_ArgumentsLength() < 2 ? GetUTCDateFrom(this) : ToNumber(date);
+  var day = MakeDay(YearFromTime(t), month, date);
+  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+}
+
+
+// ECMA 262 - 15.9.5.40
+function DateSetFullYear(year, month, date) {
+  var t = DATE_VALUE(this);
+  t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
+  year = ToNumber(year);
+  var argc = %_ArgumentsLength();
+  month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
+  date = argc < 3 ? DateFromTime(t) : ToNumber(date);
+  var day = MakeDay(year, month, date);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+}
+
+
+// ECMA 262 - 15.9.5.41
+function DateSetUTCFullYear(year, month, date) {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) t = 0;
+  var argc = %_ArgumentsLength();
+  year = ToNumber(year);
+  month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
+  date = argc < 3 ? DateFromTime(t) : ToNumber(date);
+  var day = MakeDay(year, month, date);
+  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+}
+
+
+// ECMA 262 - 15.9.5.42
+function DateToUTCString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
+  return WeekDays[WeekDay(t)] + ', '
+      + TwoDigitString(DateFromTime(t)) + ' '
+      + Months[MonthFromTime(t)] + ' '
+      + YearFromTime(t) + ' '
+      + TimeString(t) + ' GMT';
+}
+
+
+// ECMA 262 - B.2.4
+function DateGetYear() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return $NaN;
+  return YearFromTime(LocalTimeNoCheck(t)) - 1900;
+}
+
+
+// ECMA 262 - B.2.5
+function DateSetYear(year) {
+  var t = LocalTime(DATE_VALUE(this));
+  if (NUMBER_IS_NAN(t)) t = 0;
+  year = ToNumber(year);
+  if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
+  year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+      ? 1900 + TO_INTEGER(year) : year;
+  var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+}
+
+
+// ECMA 262 - B.2.6
+//
+// Notice that this does not follow ECMA 262 completely.  ECMA 262
+// says that toGMTString should be the same Function object as
+// toUTCString.  JSC does not do this, so for compatibility we do not
+// do that either.  Instead, we create a new function whose name
+// property will return toGMTString.
+function DateToGMTString() {
+  return DateToUTCString.call(this);
+}
+
+
+function PadInt(n) {
+  // Format integers to have at least two digits.
+  return n < 10 ? '0' + n : n;
+}
+
+
+function DateToISOString() {
+  return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1) +
+      '-' + PadInt(this.getUTCDate()) + 'T' + PadInt(this.getUTCHours()) +
+      ':' + PadInt(this.getUTCMinutes()) + ':' + PadInt(this.getUTCSeconds()) +
+      'Z';
+}
+
+
+function DateToJSON(key) {
+  return CheckJSONPrimitive(this.toISOString());
+}
+
+
+// -------------------------------------------------------------------
+
+function SetupDate() {
+  // Setup non-enumerable properties of the Date object itself.
+  InstallFunctions($Date, DONT_ENUM, $Array(
+    "UTC", DateUTC,
+    "parse", DateParse,
+    "now", DateNow
+  ));
+
+  // Setup non-enumerable constructor property of the Date prototype object.
+  %SetProperty($Date.prototype, "constructor", $Date, DONT_ENUM);
+
+  // Setup non-enumerable functions of the Date prototype object and
+  // set their names.
+  InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array(
+    "toString", DateToString,
+    "toDateString", DateToDateString,
+    "toTimeString", DateToTimeString,
+    "toLocaleString", DateToLocaleString,
+    "toLocaleDateString", DateToLocaleDateString,
+    "toLocaleTimeString", DateToLocaleTimeString,
+    "valueOf", DateValueOf,
+    "getTime", DateGetTime,
+    "getFullYear", DateGetFullYear,
+    "getUTCFullYear", DateGetUTCFullYear,
+    "getMonth", DateGetMonth,
+    "getUTCMonth", DateGetUTCMonth,
+    "getDate", DateGetDate,
+    "getUTCDate", DateGetUTCDate,
+    "getDay", DateGetDay,
+    "getUTCDay", DateGetUTCDay,
+    "getHours", DateGetHours,
+    "getUTCHours", DateGetUTCHours,
+    "getMinutes", DateGetMinutes,
+    "getUTCMinutes", DateGetUTCMinutes,
+    "getSeconds", DateGetSeconds,
+    "getUTCSeconds", DateGetUTCSeconds,
+    "getMilliseconds", DateGetMilliseconds,
+    "getUTCMilliseconds", DateGetUTCMilliseconds,
+    "getTimezoneOffset", DateGetTimezoneOffset,
+    "setTime", DateSetTime,
+    "setMilliseconds", DateSetMilliseconds,
+    "setUTCMilliseconds", DateSetUTCMilliseconds,
+    "setSeconds", DateSetSeconds,
+    "setUTCSeconds", DateSetUTCSeconds,
+    "setMinutes", DateSetMinutes,
+    "setUTCMinutes", DateSetUTCMinutes,
+    "setHours", DateSetHours,
+    "setUTCHours", DateSetUTCHours,
+    "setDate", DateSetDate,
+    "setUTCDate", DateSetUTCDate,
+    "setMonth", DateSetMonth,
+    "setUTCMonth", DateSetUTCMonth,
+    "setFullYear", DateSetFullYear,
+    "setUTCFullYear", DateSetUTCFullYear,
+    "toGMTString", DateToGMTString,
+    "toUTCString", DateToUTCString,
+    "getYear", DateGetYear,
+    "setYear", DateSetYear,
+    "toISOString", DateToISOString,
+    "toJSON", DateToJSON
+  ));
+}
+
+SetupDate();
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
new file mode 100644
index 0000000..3d4161d
--- /dev/null
+++ b/src/dateparser-inl.h
@@ -0,0 +1,112 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DATEPARSER_INL_H_
+#define V8_DATEPARSER_INL_H_
+
+namespace v8 {
+namespace internal {
+
+template <typename Char>
+bool DateParser::Parse(Vector<Char> str, FixedArray* out) {
+  ASSERT(out->length() >= OUTPUT_SIZE);
+  InputReader<Char> in(str);
+  TimeZoneComposer tz;
+  TimeComposer time;
+  DayComposer day;
+
+  while (!in.IsEnd()) {
+    if (in.IsAsciiDigit()) {
+      // Parse a number (possibly with 1 or 2 trailing colons).
+      int n = in.ReadUnsignedNumber();
+      if (in.Skip(':')) {
+        if (in.Skip(':')) {
+          // n + "::"
+          if (!time.IsEmpty()) return false;
+          time.Add(n);
+          time.Add(0);
+        } else {
+          // n + ":"
+          if (!time.Add(n)) return false;
+        }
+      } else if (tz.IsExpecting(n)) {
+        tz.SetAbsoluteMinute(n);
+      } else if (time.IsExpecting(n)) {
+        time.AddFinal(n);
+        // Require end or white space immediately after finalizing time.
+        if (!in.IsEnd() && !in.SkipWhiteSpace()) return false;
+      } else {
+        if (!day.Add(n)) return false;
+        in.Skip('-');  // Ignore suffix '-' for year, month, or day.
+      }
+    } else if (in.IsAsciiAlphaOrAbove()) {
+      // Parse a "word" (sequence of chars. >= 'A').
+      uint32_t pre[KeywordTable::kPrefixLength];
+      int len = in.ReadWord(pre, KeywordTable::kPrefixLength);
+      int index = KeywordTable::Lookup(pre, len);
+      KeywordType type = KeywordTable::GetType(index);
+
+      if (type == AM_PM && !time.IsEmpty()) {
+        time.SetHourOffset(KeywordTable::GetValue(index));
+      } else if (type == MONTH_NAME) {
+        day.SetNamedMonth(KeywordTable::GetValue(index));
+        in.Skip('-');  // Ignore suffix '-' for month names
+      } else if (type == TIME_ZONE_NAME && in.HasReadNumber()) {
+        tz.Set(KeywordTable::GetValue(index));
+      } else {
+        // Garbage words are illegal if a number has been read.
+        if (in.HasReadNumber()) return false;
+      }
+    } else if (in.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
+      // Parse UTC offset (only after UTC or time).
+      tz.SetSign(in.GetAsciiSignValue());
+      in.Next();
+      int n = in.ReadUnsignedNumber();
+      if (in.Skip(':')) {
+        tz.SetAbsoluteHour(n);
+        tz.SetAbsoluteMinute(kNone);
+      } else {
+        tz.SetAbsoluteHour(n / 100);
+        tz.SetAbsoluteMinute(n % 100);
+      }
+    } else if (in.Is('(')) {
+      // Ignore anything from '(' to a matching ')' or end of string.
+      in.SkipParentheses();
+    } else if ((in.IsAsciiSign() || in.Is(')')) && in.HasReadNumber()) {
+      // Extra sign or ')' is illegal if a number has been read.
+      return false;
+    } else {
+      // Ignore other characters.
+      in.Next();
+    }
+  }
+  return day.Write(out) && time.Write(out) && tz.Write(out);
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_DATEPARSER_INL_H_
diff --git a/src/dateparser.cc b/src/dateparser.cc
new file mode 100644
index 0000000..1cc9aa1
--- /dev/null
+++ b/src/dateparser.cc
@@ -0,0 +1,186 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "dateparser.h"
+
+namespace v8 {
+namespace internal {
+
+bool DateParser::DayComposer::Write(FixedArray* output) {
+  int year = 0;  // Default year is 0 (=> 2000) for KJS compatibility.
+  int month = kNone;
+  int day = kNone;
+
+  if (named_month_ == kNone) {
+    if (index_ < 2) return false;
+    if (index_ == 3 && !IsDay(comp_[0])) {
+      // YMD
+      year = comp_[0];
+      month = comp_[1];
+      day = comp_[2];
+    } else {
+      // MD(Y)
+      month = comp_[0];
+      day = comp_[1];
+      if (index_ == 3) year = comp_[2];
+    }
+  } else {
+    month = named_month_;
+    if (index_ < 1) return false;
+    if (index_ == 1) {
+      // MD or DM
+      day = comp_[0];
+    } else if (!IsDay(comp_[0])) {
+      // YMD, MYD, or YDM
+      year = comp_[0];
+      day = comp_[1];
+    } else {
+      // DMY, MDY, or DYM
+      day = comp_[0];
+      year = comp_[1];
+    }
+  }
+
+  if (Between(year, 0, 49)) year += 2000;
+  else if (Between(year, 50, 99)) year += 1900;
+
+  if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
+
+  output->set(YEAR,
+              Smi::FromInt(year),
+              SKIP_WRITE_BARRIER);
+  output->set(MONTH,
+              Smi::FromInt(month - 1),
+              SKIP_WRITE_BARRIER);  // 0-based
+  output->set(DAY,
+              Smi::FromInt(day),
+              SKIP_WRITE_BARRIER);
+  return true;
+}
+
+
+bool DateParser::TimeComposer::Write(FixedArray* output) {
+  // All time slots default to 0
+  while (index_ < kSize) {
+    comp_[index_++] = 0;
+  }
+
+  int& hour = comp_[0];
+  int& minute = comp_[1];
+  int& second = comp_[2];
+
+  if (hour_offset_ != kNone) {
+    if (!IsHour12(hour)) return false;
+    hour %= 12;
+    hour += hour_offset_;
+  }
+
+  if (!IsHour(hour) || !IsMinute(minute) || !IsSecond(second)) return false;
+
+  output->set(HOUR,
+              Smi::FromInt(hour),
+              SKIP_WRITE_BARRIER);
+  output->set(MINUTE,
+              Smi::FromInt(minute),
+              SKIP_WRITE_BARRIER);
+  output->set(SECOND,
+              Smi::FromInt(second),
+              SKIP_WRITE_BARRIER);
+  return true;
+}
+
+bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
+  if (sign_ != kNone) {
+    if (hour_ == kNone) hour_ = 0;
+    if (minute_ == kNone) minute_ = 0;
+    int total_seconds = sign_ * (hour_ * 3600 + minute_ * 60);
+    if (!Smi::IsValid(total_seconds)) return false;
+    output->set(UTC_OFFSET,
+                Smi::FromInt(total_seconds),
+                SKIP_WRITE_BARRIER);
+  } else {
+    output->set(UTC_OFFSET,
+                Heap::null_value(),
+                SKIP_WRITE_BARRIER);
+  }
+  return true;
+}
+
+const int8_t DateParser::KeywordTable::
+    array[][DateParser::KeywordTable::kEntrySize] = {
+  {'j', 'a', 'n', DateParser::MONTH_NAME, 1},
+  {'f', 'e', 'b', DateParser::MONTH_NAME, 2},
+  {'m', 'a', 'r', DateParser::MONTH_NAME, 3},
+  {'a', 'p', 'r', DateParser::MONTH_NAME, 4},
+  {'m', 'a', 'y', DateParser::MONTH_NAME, 5},
+  {'j', 'u', 'n', DateParser::MONTH_NAME, 6},
+  {'j', 'u', 'l', DateParser::MONTH_NAME, 7},
+  {'a', 'u', 'g', DateParser::MONTH_NAME, 8},
+  {'s', 'e', 'p', DateParser::MONTH_NAME, 9},
+  {'o', 'c', 't', DateParser::MONTH_NAME, 10},
+  {'n', 'o', 'v', DateParser::MONTH_NAME, 11},
+  {'d', 'e', 'c', DateParser::MONTH_NAME, 12},
+  {'a', 'm', '\0', DateParser::AM_PM, 0},
+  {'p', 'm', '\0', DateParser::AM_PM, 12},
+  {'u', 't', '\0', DateParser::TIME_ZONE_NAME, 0},
+  {'u', 't', 'c', DateParser::TIME_ZONE_NAME, 0},
+  {'g', 'm', 't', DateParser::TIME_ZONE_NAME, 0},
+  {'c', 'd', 't', DateParser::TIME_ZONE_NAME, -5},
+  {'c', 's', 't', DateParser::TIME_ZONE_NAME, -6},
+  {'e', 'd', 't', DateParser::TIME_ZONE_NAME, -4},
+  {'e', 's', 't', DateParser::TIME_ZONE_NAME, -5},
+  {'m', 'd', 't', DateParser::TIME_ZONE_NAME, -6},
+  {'m', 's', 't', DateParser::TIME_ZONE_NAME, -7},
+  {'p', 'd', 't', DateParser::TIME_ZONE_NAME, -7},
+  {'p', 's', 't', DateParser::TIME_ZONE_NAME, -8},
+  {'\0', '\0', '\0', DateParser::INVALID, 0},
+};
+
+
+// We could use perfect hashing here, but this is not a bottleneck.
+int DateParser::KeywordTable::Lookup(const uint32_t* pre, int len) {
+  int i;
+  for (i = 0; array[i][kTypeOffset] != INVALID; i++) {
+    int j = 0;
+    while (j < kPrefixLength &&
+           pre[j] == static_cast<uint32_t>(array[i][j])) {
+      j++;
+    }
+    // Check if we have a match and the length is legal.
+    // Word longer than keyword is only allowed for month names.
+    if (j == kPrefixLength &&
+        (len <= kPrefixLength || array[i][kTypeOffset] == MONTH_NAME)) {
+      return i;
+    }
+  }
+  return i;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/dateparser.h b/src/dateparser.h
new file mode 100644
index 0000000..d339a4f
--- /dev/null
+++ b/src/dateparser.h
@@ -0,0 +1,240 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DATEPARSER_H_
+#define V8_DATEPARSER_H_
+
+#include "scanner.h"
+
+namespace v8 {
+namespace internal {
+
+class DateParser : public AllStatic {
+ public:
+
+  // Parse the string as a date. If parsing succeeds, return true after
+  // filling out the output array as follows (all integers are Smis):
+  // [0]: year
+  // [1]: month (0 = Jan, 1 = Feb, ...)
+  // [2]: day
+  // [3]: hour
+  // [4]: minute
+  // [5]: second
+  // [6]: UTC offset in seconds, or null value if no timezone specified
+  // If parsing fails, return false (content of output array is not defined).
+  template <typename Char>
+  static bool Parse(Vector<Char> str, FixedArray* output);
+
+  enum {
+    YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, UTC_OFFSET, OUTPUT_SIZE
+  };
+
+ private:
+  // Range testing
+  static inline bool Between(int x, int lo, int hi) {
+    return static_cast<unsigned>(x - lo) <= static_cast<unsigned>(hi - lo);
+  }
+  // Indicates a missing value.
+  static const int kNone = kMaxInt;
+
+  // InputReader provides basic string parsing and character classification.
+  template <typename Char>
+  class InputReader BASE_EMBEDDED {
+   public:
+    explicit InputReader(Vector<Char> s)
+        : index_(0),
+          buffer_(s),
+          has_read_number_(false) {
+      Next();
+    }
+
+    // Advance to the next character of the string.
+    void Next() { ch_ = (index_ < buffer_.length()) ? buffer_[index_++] : 0; }
+
+    // Read a string of digits as an unsigned number (cap just below kMaxInt).
+    int ReadUnsignedNumber() {
+      has_read_number_ = true;
+      int n;
+      for (n = 0; IsAsciiDigit() && n < kMaxInt / 10 - 1; Next()) {
+        n = n * 10 + ch_ - '0';
+      }
+      return n;
+    }
+
+    // Read a word (sequence of chars. >= 'A'), fill the given buffer with a
+    // lower-case prefix, and pad any remainder of the buffer with zeroes.
+    // Return word length.
+    int ReadWord(uint32_t* prefix, int prefix_size) {
+      int len;
+      for (len = 0; IsAsciiAlphaOrAbove(); Next(), len++) {
+        if (len < prefix_size) prefix[len] = GetAsciiAlphaLower();
+      }
+      for (int i = len; i < prefix_size; i++) prefix[i] = 0;
+      return len;
+    }
+
+    // The skip methods return whether they actually skipped something.
+    bool Skip(uint32_t c) { return ch_ == c ?  (Next(), true) : false; }
+
+    bool SkipWhiteSpace() {
+      return Scanner::kIsWhiteSpace.get(ch_) ? (Next(), true) : false;
+    }
+
+    bool SkipParentheses() {
+      if (ch_ != '(') return false;
+      int balance = 0;
+      do {
+        if (ch_ == ')') --balance;
+        else if (ch_ == '(') ++balance;
+        Next();
+      } while (balance > 0 && ch_);
+      return true;
+    }
+
+    // Character testing/classification. Non-ASCII digits are not supported.
+    bool Is(uint32_t c) const { return ch_ == c; }
+    bool IsEnd() const { return ch_ == 0; }
+    bool IsAsciiDigit() const { return IsDecimalDigit(ch_); }
+    bool IsAsciiAlphaOrAbove() const { return ch_ >= 'A'; }
+    bool IsAsciiSign() const { return ch_ == '+' || ch_ == '-'; }
+
+    // Return 1 for '+' and -1 for '-'.
+    int GetAsciiSignValue() const { return 44 - static_cast<int>(ch_); }
+
+    // Indicates whether any (possibly empty!) numbers have been read.
+    bool HasReadNumber() const { return has_read_number_; }
+
+   private:
+    // If current character is in 'A'-'Z' or 'a'-'z', return its lower-case.
+    // Else, return something outside of 'A'-'Z' and 'a'-'z'.
+    uint32_t GetAsciiAlphaLower() const { return ch_ | 32; }
+
+    int index_;
+    Vector<Char> buffer_;
+    bool has_read_number_;
+    uint32_t ch_;
+  };
+
+  enum KeywordType { INVALID, MONTH_NAME, TIME_ZONE_NAME, AM_PM };
+
+  // KeywordTable maps names of months, time zones, am/pm to numbers.
+  class KeywordTable : public AllStatic {
+   public:
+    // Look up a word in the keyword table and return an index.
+    // 'pre' contains a prefix of the word, zero-padded to size kPrefixLength
+    // and 'len' is the word length.
+    static int Lookup(const uint32_t* pre, int len);
+    // Get the type of the keyword at index i.
+    static KeywordType GetType(int i) {
+      return static_cast<KeywordType>(array[i][kTypeOffset]);
+    }
+    // Get the value of the keyword at index i.
+    static int GetValue(int i) { return array[i][kValueOffset]; }
+
+    static const int kPrefixLength = 3;
+    static const int kTypeOffset = kPrefixLength;
+    static const int kValueOffset = kTypeOffset + 1;
+    static const int kEntrySize = kValueOffset + 1;
+    static const int8_t array[][kEntrySize];
+  };
+
+  class TimeZoneComposer BASE_EMBEDDED {
+   public:
+    TimeZoneComposer() : sign_(kNone), hour_(kNone), minute_(kNone) {}
+    void Set(int offset_in_hours) {
+      sign_ = offset_in_hours < 0 ? -1 : 1;
+      hour_ = offset_in_hours * sign_;
+      minute_ = 0;
+    }
+    void SetSign(int sign) { sign_ = sign < 0 ? -1 : 1; }
+    void SetAbsoluteHour(int hour) { hour_ = hour; }
+    void SetAbsoluteMinute(int minute) { minute_ = minute; }
+    bool IsExpecting(int n) const {
+      return hour_ != kNone && minute_ == kNone && TimeComposer::IsMinute(n);
+    }
+    bool IsUTC() const { return hour_ == 0 && minute_ == 0; }
+    bool Write(FixedArray* output);
+   private:
+    int sign_;
+    int hour_;
+    int minute_;
+  };
+
+  class TimeComposer BASE_EMBEDDED {
+   public:
+    TimeComposer() : index_(0), hour_offset_(kNone) {}
+    bool IsEmpty() const { return index_ == 0; }
+    bool IsExpecting(int n) const {
+      return (index_ == 1 && IsMinute(n)) || (index_ == 2 && IsSecond(n));
+    }
+    bool Add(int n) {
+      return index_ < kSize ? (comp_[index_++] = n, true) : false;
+    }
+    bool AddFinal(int n) {
+      if (!Add(n)) return false;
+      while (index_ < kSize) comp_[index_++] = 0;
+      return true;
+    }
+    void SetHourOffset(int n) { hour_offset_ = n; }
+    bool Write(FixedArray* output);
+
+    static bool IsMinute(int x) { return Between(x, 0, 59); }
+   private:
+    static bool IsHour(int x) { return Between(x, 0, 23); }
+    static bool IsHour12(int x) { return Between(x, 0, 12); }
+    static bool IsSecond(int x) { return Between(x, 0, 59); }
+
+    static const int kSize = 3;
+    int comp_[kSize];
+    int index_;
+    int hour_offset_;
+  };
+
+  class DayComposer BASE_EMBEDDED {
+   public:
+    DayComposer() : index_(0), named_month_(kNone) {}
+    bool IsEmpty() const { return index_ == 0; }
+    bool Add(int n) {
+      return index_ < kSize ? (comp_[index_++] = n, true) : false;
+    }
+    void SetNamedMonth(int n) { named_month_ = n; }
+    bool Write(FixedArray* output);
+   private:
+    static bool IsMonth(int x) { return Between(x, 1, 12); }
+    static bool IsDay(int x) { return Between(x, 1, 31); }
+
+    static const int kSize = 3;
+    int comp_[kSize];
+    int index_;
+    int named_month_;
+  };
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_DATEPARSER_H_
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
new file mode 100644
index 0000000..9d5cace
--- /dev/null
+++ b/src/debug-agent.cc
@@ -0,0 +1,423 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+#include "debug-agent.h"
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+namespace v8 {
+namespace internal {
+
+// Public V8 debugger API message handler function. This function just delegates
+// to the debugger agent through it's data parameter.
+void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
+  DebuggerAgent::instance_->DebuggerMessage(message);
+}
+
+// static
+DebuggerAgent* DebuggerAgent::instance_ = NULL;
+
+// Debugger agent main thread.
+void DebuggerAgent::Run() {
+  const int kOneSecondInMicros = 1000000;
+
+  // Allow this socket to reuse port even if still in TIME_WAIT.
+  server_->SetReuseAddress(true);
+
+  // First bind the socket to the requested port.
+  bool bound = false;
+  while (!bound && !terminate_) {
+    bound = server_->Bind(port_);
+
+    // If an error occoured wait a bit before retrying. The most common error
+    // would be that the port is already in use so this avoids a busy loop and
+    // make the agent take over the port when it becomes free.
+    if (!bound) {
+      terminate_now_->Wait(kOneSecondInMicros);
+    }
+  }
+
+  // Accept connections on the bound port.
+  while (!terminate_) {
+    bool ok = server_->Listen(1);
+    listening_->Signal();
+    if (ok) {
+      // Accept the new connection.
+      Socket* client = server_->Accept();
+      ok = client != NULL;
+      if (ok) {
+        // Create and start a new session.
+        CreateSession(client);
+      }
+    }
+  }
+}
+
+
+void DebuggerAgent::Shutdown() {
+  // Set the termination flag.
+  terminate_ = true;
+
+  // Signal termination and make the server exit either its listen call or its
+  // binding loop. This makes sure that no new sessions can be established.
+  terminate_now_->Signal();
+  server_->Shutdown();
+  Join();
+
+  // Close existing session if any.
+  CloseSession();
+}
+
+
+void DebuggerAgent::WaitUntilListening() {
+  listening_->Wait();
+}
+
+void DebuggerAgent::CreateSession(Socket* client) {
+  ScopedLock with(session_access_);
+
+  // If another session is already established terminate this one.
+  if (session_ != NULL) {
+    static const char* message = "Remote debugging session already active\r\n";
+
+    client->Send(message, strlen(message));
+    delete client;
+    return;
+  }
+
+  // Create a new session and hook up the debug message handler.
+  session_ = new DebuggerAgentSession(this, client);
+  v8::Debug::SetMessageHandler2(DebuggerAgentMessageHandler);
+  session_->Start();
+}
+
+
+void DebuggerAgent::CloseSession() {
+  ScopedLock with(session_access_);
+
+  // Terminate the session.
+  if (session_ != NULL) {
+    session_->Shutdown();
+    session_->Join();
+    delete session_;
+    session_ = NULL;
+  }
+}
+
+
+void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
+  ScopedLock with(session_access_);
+
+  // Forward the message handling to the session.
+  if (session_ != NULL) {
+    v8::String::Value val(message.GetJSON());
+    session_->DebuggerMessage(Vector<uint16_t>(const_cast<uint16_t*>(*val),
+                              val.length()));
+  }
+}
+
+
+void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
+  // Don't do anything during termination.
+  if (terminate_) {
+    return;
+  }
+
+  // Terminate the session.
+  ScopedLock with(session_access_);
+  ASSERT(session == session_);
+  if (session == session_) {
+    CloseSession();
+  }
+}
+
+
+void DebuggerAgentSession::Run() {
+  // Send the hello message.
+  bool ok = DebuggerAgentUtil::SendConnectMessage(client_, *agent_->name_);
+  if (!ok) return;
+
+  while (true) {
+    // Read data from the debugger front end.
+    SmartPointer<char> message = DebuggerAgentUtil::ReceiveMessage(client_);
+    if (*message == NULL) {
+      // Session is closed.
+      agent_->OnSessionClosed(this);
+      return;
+    }
+
+    // Convert UTF-8 to UTF-16.
+    unibrow::Utf8InputBuffer<> buf(*message, strlen(*message));
+    int len = 0;
+    while (buf.has_more()) {
+      buf.GetNext();
+      len++;
+    }
+    int16_t* temp = NewArray<int16_t>(len + 1);
+    buf.Reset(*message, strlen(*message));
+    for (int i = 0; i < len; i++) {
+      temp[i] = buf.GetNext();
+    }
+
+    // Send the request received to the debugger.
+    v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp), len);
+    DeleteArray(temp);
+  }
+}
+
+
+void DebuggerAgentSession::DebuggerMessage(Vector<uint16_t> message) {
+  DebuggerAgentUtil::SendMessage(client_, message);
+}
+
+
+void DebuggerAgentSession::Shutdown() {
+  // Shutdown the socket to end the blocking receive.
+  client_->Shutdown();
+}
+
+
+const char* DebuggerAgentUtil::kContentLength = "Content-Length";
+int DebuggerAgentUtil::kContentLengthSize = strlen(kContentLength);
+
+
+SmartPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
+  int received;
+
+  // Read header.
+  int content_length = 0;
+  while (true) {
+    const int kHeaderBufferSize = 80;
+    char header_buffer[kHeaderBufferSize];
+    int header_buffer_position = 0;
+    char c = '\0';  // One character receive buffer.
+    char prev_c = '\0';  // Previous character.
+
+    // Read until CRLF.
+    while (!(c == '\n' && prev_c == '\r')) {
+      prev_c = c;
+      received = conn->Receive(&c, 1);
+      if (received <= 0) {
+        PrintF("Error %d\n", Socket::LastError());
+        return SmartPointer<char>();
+      }
+
+      // Add character to header buffer.
+      if (header_buffer_position < kHeaderBufferSize) {
+        header_buffer[header_buffer_position++] = c;
+      }
+    }
+
+    // Check for end of header (empty header line).
+    if (header_buffer_position == 2) {  // Receive buffer contains CRLF.
+      break;
+    }
+
+    // Terminate header.
+    ASSERT(header_buffer_position > 1);  // At least CRLF is received.
+    ASSERT(header_buffer_position <= kHeaderBufferSize);
+    header_buffer[header_buffer_position - 2] = '\0';
+
+    // Split header.
+    char* key = header_buffer;
+    char* value = NULL;
+    for (int i = 0; header_buffer[i] != '\0'; i++) {
+      if (header_buffer[i] == ':') {
+        header_buffer[i] = '\0';
+        value = header_buffer + i + 1;
+        while (*value == ' ') {
+          value++;
+        }
+        break;
+      }
+    }
+
+    // Check that key is Content-Length.
+    if (strcmp(key, kContentLength) == 0) {
+      // Get the content length value if present and within a sensible range.
+      if (value == NULL || strlen(value) > 7) {
+        return SmartPointer<char>();
+      }
+      for (int i = 0; value[i] != '\0'; i++) {
+        // Bail out if illegal data.
+        if (value[i] < '0' || value[i] > '9') {
+          return SmartPointer<char>();
+        }
+        content_length = 10 * content_length + (value[i] - '0');
+      }
+    } else {
+      // For now just print all other headers than Content-Length.
+      PrintF("%s: %s\n", key, value != NULL ? value : "(no value)");
+    }
+  }
+
+  // Return now if no body.
+  if (content_length == 0) {
+    return SmartPointer<char>();
+  }
+
+  // Read body.
+  char* buffer = NewArray<char>(content_length + 1);
+  received = ReceiveAll(conn, buffer, content_length);
+  if (received < content_length) {
+    PrintF("Error %d\n", Socket::LastError());
+    return SmartPointer<char>();
+  }
+  buffer[content_length] = '\0';
+
+  return SmartPointer<char>(buffer);
+}
+
+
+bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
+                                           const char* embedding_host) {
+  static const int kBufferSize = 80;
+  char buffer[kBufferSize];  // Sending buffer.
+  bool ok;
+  int len;
+
+  // Send the header.
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+                     "Type: connect\r\n");
+  ok = conn->Send(buffer, len);
+  if (!ok) return false;
+
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+                     "V8-Version: %s\r\n", v8::V8::GetVersion());
+  ok = conn->Send(buffer, len);
+  if (!ok) return false;
+
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+                     "Protocol-Version: 1\r\n");
+  ok = conn->Send(buffer, len);
+  if (!ok) return false;
+
+  if (embedding_host != NULL) {
+    len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+                       "Embedding-Host: %s\r\n", embedding_host);
+    ok = conn->Send(buffer, len);
+    if (!ok) return false;
+  }
+
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+                     "%s: 0\r\n", kContentLength);
+  ok = conn->Send(buffer, len);
+  if (!ok) return false;
+
+  // Terminate header with empty line.
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
+  ok = conn->Send(buffer, len);
+  if (!ok) return false;
+
+  // No body for connect message.
+
+  return true;
+}
+
+
+bool DebuggerAgentUtil::SendMessage(const Socket* conn,
+                                    const Vector<uint16_t> message) {
+  static const int kBufferSize = 80;
+  char buffer[kBufferSize];  // Sending buffer both for header and body.
+
+  // Calculate the message size in UTF-8 encoding.
+  int utf8_len = 0;
+  for (int i = 0; i < message.length(); i++) {
+    utf8_len += unibrow::Utf8::Length(message[i]);
+  }
+
+  // Send the header.
+  int len;
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+                     "%s: %d\r\n", kContentLength, utf8_len);
+  conn->Send(buffer, len);
+
+  // Terminate header with empty line.
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
+  conn->Send(buffer, len);
+
+  // Send message body as UTF-8.
+  int buffer_position = 0;  // Current buffer position.
+  for (int i = 0; i < message.length(); i++) {
+    // Write next UTF-8 encoded character to buffer.
+    buffer_position +=
+        unibrow::Utf8::Encode(buffer + buffer_position, message[i]);
+    ASSERT(buffer_position < kBufferSize);
+
+    // Send buffer if full or last character is encoded.
+    if (kBufferSize - buffer_position < 3 || i == message.length() - 1) {
+      conn->Send(buffer, buffer_position);
+      buffer_position = 0;
+    }
+  }
+
+  return true;
+}
+
+
+bool DebuggerAgentUtil::SendMessage(const Socket* conn,
+                                    const v8::Handle<v8::String> request) {
+  static const int kBufferSize = 80;
+  char buffer[kBufferSize];  // Sending buffer both for header and body.
+
+  // Convert the request to UTF-8 encoding.
+  v8::String::Utf8Value utf8_request(request);
+
+  // Send the header.
+  int len;
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+                     "Content-Length: %d\r\n", utf8_request.length());
+  conn->Send(buffer, len);
+
+  // Terminate header with empty line.
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
+  conn->Send(buffer, len);
+
+  // Send message body as UTF-8.
+  conn->Send(*utf8_request, utf8_request.length());
+
+  return true;
+}
+
+
+// Receive the full buffer before returning unless an error occours.
+int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
+  int total_received = 0;
+  while (total_received < len) {
+    int received = conn->Receive(data + total_received, len - total_received);
+    if (received <= 0) {
+      return total_received;
+    }
+    total_received += received;
+  }
+  return total_received;
+}
+
+} }  // namespace v8::internal
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/debug-agent.h b/src/debug-agent.h
new file mode 100644
index 0000000..3647994
--- /dev/null
+++ b/src/debug-agent.h
@@ -0,0 +1,129 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DEBUG_AGENT_H_
+#define V8_DEBUG_AGENT_H_
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+#include "../include/v8-debug.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward decelrations.
+class DebuggerAgentSession;
+
+
+// Debugger agent which starts a socket listener on the debugger port and
+// handles connection from a remote debugger.
+class DebuggerAgent: public Thread {
+ public:
+  explicit DebuggerAgent(const char* name, int port)
+      : name_(StrDup(name)), port_(port),
+        server_(OS::CreateSocket()), terminate_(false),
+        session_access_(OS::CreateMutex()), session_(NULL),
+        terminate_now_(OS::CreateSemaphore(0)),
+        listening_(OS::CreateSemaphore(0)) {
+    ASSERT(instance_ == NULL);
+    instance_ = this;
+  }
+  ~DebuggerAgent() {
+     instance_ = NULL;
+     delete server_;
+  }
+
+  void Shutdown();
+  void WaitUntilListening();
+
+ private:
+  void Run();
+  void CreateSession(Socket* socket);
+  void DebuggerMessage(const v8::Debug::Message& message);
+  void CloseSession();
+  void OnSessionClosed(DebuggerAgentSession* session);
+
+  SmartPointer<const char> name_;  // Name of the embedding application.
+  int port_;  // Port to use for the agent.
+  Socket* server_;  // Server socket for listen/accept.
+  bool terminate_;  // Termination flag.
+  Mutex* session_access_;  // Mutex guarging access to session_.
+  DebuggerAgentSession* session_;  // Current active session if any.
+  Semaphore* terminate_now_;  // Semaphore to signal termination.
+  Semaphore* listening_;
+
+  static DebuggerAgent* instance_;
+
+  friend class DebuggerAgentSession;
+  friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
+
+  DISALLOW_COPY_AND_ASSIGN(DebuggerAgent);
+};
+
+
+// Debugger agent session. The session receives requests from the remote
+// debugger and sends debugger events/responses to the remote debugger.
+class DebuggerAgentSession: public Thread {
+ public:
+  DebuggerAgentSession(DebuggerAgent* agent, Socket* client)
+      : agent_(agent), client_(client) {}
+
+  void DebuggerMessage(Vector<uint16_t> message);
+  void Shutdown();
+
+ private:
+  void Run();
+
+  void DebuggerMessage(Vector<char> message);
+
+  DebuggerAgent* agent_;
+  Socket* client_;
+
+  DISALLOW_COPY_AND_ASSIGN(DebuggerAgentSession);
+};
+
+
+// Utility methods factored out to be used by the D8 shell as well.
+class DebuggerAgentUtil {
+ public:
+  static const char* kContentLength;
+  static int kContentLengthSize;
+
+  static SmartPointer<char> ReceiveMessage(const Socket* conn);
+  static bool SendConnectMessage(const Socket* conn,
+                                 const char* embedding_host);
+  static bool SendMessage(const Socket* conn, const Vector<uint16_t> message);
+  static bool SendMessage(const Socket* conn,
+                          const v8::Handle<v8::String> message);
+  static int ReceiveAll(const Socket* conn, char* data, int len);
+};
+
+} }  // namespace v8::internal
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+#endif  // V8_DEBUG_AGENT_H_
diff --git a/src/debug-delay.js b/src/debug-delay.js
new file mode 100644
index 0000000..cb789be
--- /dev/null
+++ b/src/debug-delay.js
@@ -0,0 +1,2035 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Default number of frames to include in the response to backtrace request.
+const kDefaultBacktraceLength = 10;
+
+const Debug = {};
+
+// Regular expression to skip "crud" at the beginning of a source line which is
+// not really code. Currently the regular expression matches whitespace and
+// comments.
+const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
+
+// Debug events which can occour in the V8 JavaScript engine. These originate
+// from the API include file debug.h.
+Debug.DebugEvent = { Break: 1,
+                     Exception: 2,
+                     NewFunction: 3,
+                     BeforeCompile: 4,
+                     AfterCompile: 5,
+                     ScriptCollected: 6 };
+
+// Types of exceptions that can be broken upon.
+Debug.ExceptionBreak = { All : 0,
+                         Uncaught: 1 };
+
+// The different types of steps.
+Debug.StepAction = { StepOut: 0,
+                     StepNext: 1,
+                     StepIn: 2,
+                     StepMin: 3,
+                     StepInMin: 4 };
+
+// The different types of scripts matching enum ScriptType in objects.h.
+Debug.ScriptType = { Native: 0,
+                     Extension: 1,
+                     Normal: 2 };
+
+// The different types of script compilations matching enum
+// Script::CompilationType in objects.h.
+Debug.ScriptCompilationType = { Host: 0,
+                                Eval: 1,
+                                JSON: 2 };
+
+// The different script break point types.
+Debug.ScriptBreakPointType = { ScriptId: 0,
+                               ScriptName: 1 };
+
+function ScriptTypeFlag(type) {
+  return (1 << type);
+}
+
+// Globals.
+var next_response_seq = 0;
+var next_break_point_number = 1;
+var break_points = [];
+var script_break_points = [];
+
+
+// Create a new break point object and add it to the list of break points.
+function MakeBreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
+  var break_point = new BreakPoint(source_position, opt_line, opt_column, opt_script_break_point);
+  break_points.push(break_point);
+  return break_point;
+}
+
+
+// Object representing a break point.
+// NOTE: This object does not have a reference to the function having break
+// point as this would cause function not to be garbage collected when it is
+// not used any more. We do not want break points to keep functions alive.
+function BreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
+  this.source_position_ = source_position;
+  this.source_line_ = opt_line;
+  this.source_column_ = opt_column;
+  if (opt_script_break_point) {
+    this.script_break_point_ = opt_script_break_point;
+  } else {
+    this.number_ = next_break_point_number++;
+  }
+  this.hit_count_ = 0;
+  this.active_ = true;
+  this.condition_ = null;
+  this.ignoreCount_ = 0;
+}
+
+
+BreakPoint.prototype.number = function() {
+  return this.number_;
+};
+
+
+BreakPoint.prototype.func = function() {
+  return this.func_;
+};
+
+
+BreakPoint.prototype.source_position = function() {
+  return this.source_position_;
+};
+
+
+BreakPoint.prototype.hit_count = function() {
+  return this.hit_count_;
+};
+
+
+BreakPoint.prototype.active = function() {
+  if (this.script_break_point()) {
+    return this.script_break_point().active();
+  }
+  return this.active_;
+};
+
+
+BreakPoint.prototype.condition = function() {
+  if (this.script_break_point() && this.script_break_point().condition()) {
+    return this.script_break_point().condition();
+  }
+  return this.condition_;
+};
+
+
+BreakPoint.prototype.ignoreCount = function() {
+  return this.ignoreCount_;
+};
+
+
+BreakPoint.prototype.script_break_point = function() {
+  return this.script_break_point_;
+};
+
+
+BreakPoint.prototype.enable = function() {
+  this.active_ = true;
+};
+
+
+BreakPoint.prototype.disable = function() {
+  this.active_ = false;
+};
+
+
+BreakPoint.prototype.setCondition = function(condition) {
+  this.condition_ = condition;
+};
+
+
+BreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
+  this.ignoreCount_ = ignoreCount;
+};
+
+
+BreakPoint.prototype.isTriggered = function(exec_state) {
+  // Break point not active - not triggered.
+  if (!this.active()) return false;
+
+  // Check for conditional break point.
+  if (this.condition()) {
+    // If break point has condition try to evaluate it in the top frame.
+    try {
+      var mirror = exec_state.frame(0).evaluate(this.condition());
+      // If no sensible mirror or non true value break point not triggered.
+      if (!(mirror instanceof ValueMirror) || !%ToBoolean(mirror.value_)) {
+        return false;
+      }
+    } catch (e) {
+      // Exception evaluating condition counts as not triggered.
+      return false;
+    }
+  }
+
+  // Update the hit count.
+  this.hit_count_++;
+  if (this.script_break_point_) {
+    this.script_break_point_.hit_count_++;
+  }
+
+  // If the break point has an ignore count it is not triggered.
+  if (this.ignoreCount_ > 0) {
+    this.ignoreCount_--;
+    return false;
+  }
+
+  // Break point triggered.
+  return true;
+};
+
+
+// Function called from the runtime when a break point is hit. Returns true if
+// the break point is triggered and supposed to break execution.
+function IsBreakPointTriggered(break_id, break_point) {
+  return break_point.isTriggered(MakeExecutionState(break_id));
+}
+
+
+// Object representing a script break point. The script is referenced by its
+// script name or script id and the break point is represented as line and
+// column.
+function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
+                          opt_groupId) {
+  this.type_ = type;
+  if (type == Debug.ScriptBreakPointType.ScriptId) {
+    this.script_id_ = script_id_or_name;
+  } else {  // type == Debug.ScriptBreakPointType.ScriptName
+    this.script_name_ = script_id_or_name;
+  }
+  this.line_ = opt_line || 0;
+  this.column_ = opt_column;
+  this.groupId_ = opt_groupId;
+  this.hit_count_ = 0;
+  this.active_ = true;
+  this.condition_ = null;
+  this.ignoreCount_ = 0;
+}
+
+
+ScriptBreakPoint.prototype.number = function() {
+  return this.number_;
+};
+
+
+ScriptBreakPoint.prototype.groupId = function() {
+  return this.groupId_;
+};
+
+
+ScriptBreakPoint.prototype.type = function() {
+  return this.type_;
+};
+
+
+ScriptBreakPoint.prototype.script_id = function() {
+  return this.script_id_;
+};
+
+
+ScriptBreakPoint.prototype.script_name = function() {
+  return this.script_name_;
+};
+
+
+ScriptBreakPoint.prototype.line = function() {
+  return this.line_;
+};
+
+
+ScriptBreakPoint.prototype.column = function() {
+  return this.column_;
+};
+
+
+ScriptBreakPoint.prototype.hit_count = function() {
+  return this.hit_count_;
+};
+
+
+ScriptBreakPoint.prototype.active = function() {
+  return this.active_;
+};
+
+
+ScriptBreakPoint.prototype.condition = function() {
+  return this.condition_;
+};
+
+
+ScriptBreakPoint.prototype.ignoreCount = function() {
+  return this.ignoreCount_;
+};
+
+
+ScriptBreakPoint.prototype.enable = function() {
+  this.active_ = true;
+};
+
+
+ScriptBreakPoint.prototype.disable = function() {
+  this.active_ = false;
+};
+
+
+ScriptBreakPoint.prototype.setCondition = function(condition) {
+  this.condition_ = condition;
+};
+
+
+ScriptBreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
+  this.ignoreCount_ = ignoreCount;
+
+  // Set ignore count on all break points created from this script break point.
+  for (var i = 0; i < break_points.length; i++) {
+    if (break_points[i].script_break_point() === this) {
+      break_points[i].setIgnoreCount(ignoreCount);
+    }
+  }
+};
+
+
+// Check whether a script matches this script break point. Currently this is
+// only based on script name.
+ScriptBreakPoint.prototype.matchesScript = function(script) {
+  if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
+    return this.script_id_ == script.id;
+  } else {  // this.type_ == Debug.ScriptBreakPointType.ScriptName
+    return this.script_name_ == script.name &&
+           script.line_offset <= this.line_  &&
+           this.line_ < script.line_offset + script.lineCount();
+  }
+};
+
+
+// Set the script break point in a script.
+ScriptBreakPoint.prototype.set = function (script) {
+  var column = this.column();
+  var line = this.line();
+  // If the column is undefined the break is on the line. To help locate the
+  // first piece of breakable code on the line try to find the column on the
+  // line which contains some source.
+  if (IS_UNDEFINED(column)) {
+    var source_line = script.sourceLine(this.line());
+
+    // Allocate array for caching the columns where the actual source starts.
+    if (!script.sourceColumnStart_) {
+      script.sourceColumnStart_ = new Array(script.lineCount());
+    }
+
+    // Fill cache if needed and get column where the actual source starts.
+    if (IS_UNDEFINED(script.sourceColumnStart_[line])) {
+      script.sourceColumnStart_[line] =
+          source_line.match(sourceLineBeginningSkip)[0].length;
+    }
+    column = script.sourceColumnStart_[line];
+  }
+
+  // Convert the line and column into an absolute position within the script.
+  var pos = Debug.findScriptSourcePosition(script, this.line(), column);
+
+  // If the position is not found in the script (the script might be shorter
+  // than it used to be) just ignore it.
+  if (pos === null) return;
+
+  // Create a break point object and set the break point.
+  break_point = MakeBreakPoint(pos, this.line(), this.column(), this);
+  break_point.setIgnoreCount(this.ignoreCount());
+  %SetScriptBreakPoint(script, pos, break_point);
+
+  return break_point;
+};
+
+
+// Clear all the break points created from this script break point
+ScriptBreakPoint.prototype.clear = function () {
+  var remaining_break_points = [];
+  for (var i = 0; i < break_points.length; i++) {
+    if (break_points[i].script_break_point() &&
+        break_points[i].script_break_point() === this) {
+      %ClearBreakPoint(break_points[i]);
+    } else {
+      remaining_break_points.push(break_points[i]);
+    }
+  }
+  break_points = remaining_break_points;
+};
+
+
+// Function called from runtime when a new script is compiled to set any script
+// break points set in this script.
+function UpdateScriptBreakPoints(script) {
+  for (var i = 0; i < script_break_points.length; i++) {
+    if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
+        script_break_points[i].matchesScript(script)) {
+      script_break_points[i].set(script);
+    }
+  }
+}
+
+
+Debug.setListener = function(listener, opt_data) {
+  if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
+    throw new Error('Parameters have wrong types.');
+  }
+  %SetDebugEventListener(listener, opt_data);
+};
+
+
+Debug.breakExecution = function(f) {
+  %Break();
+};
+
+Debug.breakLocations = function(f) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  return %GetBreakLocations(f);
+};
+
+// Returns a Script object. If the parameter is a function the return value
+// is the script in which the function is defined. If the parameter is a string
+// the return value is the script for which the script name has that string
+// value.  If it is a regexp and there is a unique script whose name matches
+// we return that, otherwise undefined.
+Debug.findScript = function(func_or_script_name) {
+  if (IS_FUNCTION(func_or_script_name)) {
+    return %FunctionGetScript(func_or_script_name);
+  } else if (IS_REGEXP(func_or_script_name)) {
+    var scripts = Debug.scripts();
+    var last_result = null;
+    var result_count = 0;
+    for (var i in scripts) {
+      var script = scripts[i];
+      if (func_or_script_name.test(script.name)) {
+        last_result = script;
+        result_count++;
+      }
+    }
+    // Return the unique script matching the regexp.  If there are more
+    // than one we don't return a value since there is no good way to
+    // decide which one to return.  Returning a "random" one, say the
+    // first, would introduce nondeterminism (or something close to it)
+    // because the order is the heap iteration order.
+    if (result_count == 1) {
+      return last_result;
+    } else {
+      return undefined;
+    }
+  } else {
+    return %GetScript(func_or_script_name);
+  }
+};
+
+// Returns the script source. If the parameter is a function the return value
+// is the script source for the script in which the function is defined. If the
+// parameter is a string the return value is the script for which the script
+// name has that string value.
+Debug.scriptSource = function(func_or_script_name) {
+  return this.findScript(func_or_script_name).source;
+};
+
+Debug.source = function(f) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  return %FunctionGetSourceCode(f);
+};
+
+Debug.disassemble = function(f) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  return %DebugDisassembleFunction(f);
+};
+
+Debug.disassembleConstructor = function(f) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  return %DebugDisassembleConstructor(f);
+};
+
+Debug.sourcePosition = function(f) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  return %FunctionGetScriptSourcePosition(f);
+};
+
+
+Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
+  var script = %FunctionGetScript(func);
+  var script_offset = %FunctionGetScriptSourcePosition(func);
+  return script.locationFromLine(opt_line, opt_column, script_offset);
+}
+
+
+// Returns the character position in a script based on a line number and an
+// optional position within that line.
+Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
+  var location = script.locationFromLine(opt_line, opt_column);
+  return location ? location.position : null;
+}
+
+
+Debug.findBreakPoint = function(break_point_number, remove) {
+  var break_point;
+  for (var i = 0; i < break_points.length; i++) {
+    if (break_points[i].number() == break_point_number) {
+      break_point = break_points[i];
+      // Remove the break point from the list if requested.
+      if (remove) {
+        break_points.splice(i, 1);
+      }
+      break;
+    }
+  }
+  if (break_point) {
+    return break_point;
+  } else {
+    return this.findScriptBreakPoint(break_point_number, remove);
+  }
+};
+
+
+Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
+  if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.');
+  // Break points in API functions are not supported.
+  if (%FunctionIsAPIFunction(func)) {
+    throw new Error('Cannot set break point in native code.');
+  }
+  // Find source position relative to start of the function
+  var break_position =
+      this.findFunctionSourceLocation(func, opt_line, opt_column).position;
+  var source_position = break_position - this.sourcePosition(func);
+  // Find the script for the function.
+  var script = %FunctionGetScript(func);
+  // Break in builtin JavaScript code is not supported.
+  if (script.type == Debug.ScriptType.Native) {
+    throw new Error('Cannot set break point in native code.');
+  }
+  // If the script for the function has a name convert this to a script break
+  // point.
+  if (script && script.id) {
+    // Adjust the source position to be script relative.
+    source_position += %FunctionGetScriptSourcePosition(func);
+    // Find line and column for the position in the script and set a script
+    // break point from that.
+    var location = script.locationFromPosition(source_position, false);
+    return this.setScriptBreakPointById(script.id,
+                                        location.line, location.column,
+                                        opt_condition);
+  } else {
+    // Set a break point directly on the function.
+    var break_point = MakeBreakPoint(source_position, opt_line, opt_column);
+    %SetFunctionBreakPoint(func, source_position, break_point);
+    break_point.setCondition(opt_condition);
+    return break_point.number();
+  }
+};
+
+
+Debug.enableBreakPoint = function(break_point_number) {
+  var break_point = this.findBreakPoint(break_point_number, false);
+  break_point.enable();
+};
+
+
+Debug.disableBreakPoint = function(break_point_number) {
+  var break_point = this.findBreakPoint(break_point_number, false);
+  break_point.disable();
+};
+
+
+Debug.changeBreakPointCondition = function(break_point_number, condition) {
+  var break_point = this.findBreakPoint(break_point_number, false);
+  break_point.setCondition(condition);
+};
+
+
+Debug.changeBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
+  if (ignoreCount < 0) {
+    throw new Error('Invalid argument');
+  }
+  var break_point = this.findBreakPoint(break_point_number, false);
+  break_point.setIgnoreCount(ignoreCount);
+};
+
+
+Debug.clearBreakPoint = function(break_point_number) {
+  var break_point = this.findBreakPoint(break_point_number, true);
+  if (break_point) {
+    return %ClearBreakPoint(break_point);
+  } else {
+    break_point = this.findScriptBreakPoint(break_point_number, true);
+    if (!break_point) {
+      throw new Error('Invalid breakpoint');
+    }
+  }
+};
+
+
+Debug.clearAllBreakPoints = function() {
+  for (var i = 0; i < break_points.length; i++) {
+    break_point = break_points[i];
+    %ClearBreakPoint(break_point);
+  }
+  break_points = [];
+};
+
+
+Debug.findScriptBreakPoint = function(break_point_number, remove) {
+  var script_break_point;
+  for (var i = 0; i < script_break_points.length; i++) {
+    if (script_break_points[i].number() == break_point_number) {
+      script_break_point = script_break_points[i];
+      // Remove the break point from the list if requested.
+      if (remove) {
+        script_break_point.clear();
+        script_break_points.splice(i,1);
+      }
+      break;
+    }
+  }
+  return script_break_point;
+}
+
+
+// Sets a breakpoint in a script identified through id or name at the
+// specified source line and column within that line.
+Debug.setScriptBreakPoint = function(type, script_id_or_name,
+                                     opt_line, opt_column, opt_condition,
+                                     opt_groupId) {
+  // Create script break point object.
+  var script_break_point =
+      new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
+                           opt_groupId);
+
+  // Assign number to the new script break point and add it.
+  script_break_point.number_ = next_break_point_number++;
+  script_break_point.setCondition(opt_condition);
+  script_break_points.push(script_break_point);
+
+  // Run through all scripts to see if this script break point matches any
+  // loaded scripts.
+  var scripts = this.scripts();
+  for (var i = 0; i < scripts.length; i++) {
+    if (script_break_point.matchesScript(scripts[i])) {
+      script_break_point.set(scripts[i]);
+    }
+  }
+
+  return script_break_point.number();
+}
+
+
+Debug.setScriptBreakPointById = function(script_id,
+                                         opt_line, opt_column,
+                                         opt_condition, opt_groupId) {
+  return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
+                                  script_id, opt_line, opt_column,
+                                  opt_condition, opt_groupId);
+}
+
+
+Debug.setScriptBreakPointByName = function(script_name,
+                                           opt_line, opt_column,
+                                           opt_condition, opt_groupId) {
+  return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
+                                  script_name, opt_line, opt_column,
+                                  opt_condition, opt_groupId);
+}
+
+
+Debug.enableScriptBreakPoint = function(break_point_number) {
+  var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+  script_break_point.enable();
+};
+
+
+Debug.disableScriptBreakPoint = function(break_point_number) {
+  var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+  script_break_point.disable();
+};
+
+
+Debug.changeScriptBreakPointCondition = function(break_point_number, condition) {
+  var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+  script_break_point.setCondition(condition);
+};
+
+
+Debug.changeScriptBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
+  if (ignoreCount < 0) {
+    throw new Error('Invalid argument');
+  }
+  var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+  script_break_point.setIgnoreCount(ignoreCount);
+};
+
+
+Debug.scriptBreakPoints = function() {
+  return script_break_points;
+}
+
+
+Debug.clearStepping = function() {
+  %ClearStepping();
+}
+
+Debug.setBreakOnException = function() {
+  return %ChangeBreakOnException(Debug.ExceptionBreak.All, true);
+};
+
+Debug.clearBreakOnException = function() {
+  return %ChangeBreakOnException(Debug.ExceptionBreak.All, false);
+};
+
+Debug.setBreakOnUncaughtException = function() {
+  return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, true);
+};
+
+Debug.clearBreakOnUncaughtException = function() {
+  return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
+};
+
+Debug.showBreakPoints = function(f, full) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  var source = full ? this.scriptSource(f) : this.source(f);
+  var offset = full ? this.sourcePosition(f) : 0;
+  var locations = this.breakLocations(f);
+  if (!locations) return source;
+  locations.sort(function(x, y) { return x - y; });
+  var result = "";
+  var prev_pos = 0;
+  var pos;
+  for (var i = 0; i < locations.length; i++) {
+    pos = locations[i] - offset;
+    result += source.slice(prev_pos, pos);
+    result += "[B" + i + "]";
+    prev_pos = pos;
+  }
+  pos = source.length;
+  result += source.substring(prev_pos, pos);
+  return result;
+};
+
+
+// Get all the scripts currently loaded. Locating all the scripts is based on
+// scanning the heap.
+Debug.scripts = function() {
+  // Collect all scripts in the heap.
+  return %DebugGetLoadedScripts();
+}
+
+function MakeExecutionState(break_id) {
+  return new ExecutionState(break_id);
+}
+
+function ExecutionState(break_id) {
+  this.break_id = break_id;
+  this.selected_frame = 0;
+}
+
+ExecutionState.prototype.prepareStep = function(opt_action, opt_count) {
+  var action = Debug.StepAction.StepIn;
+  if (!IS_UNDEFINED(opt_action)) action = %ToNumber(opt_action);
+  var count = opt_count ? %ToNumber(opt_count) : 1;
+
+  return %PrepareStep(this.break_id, action, count);
+}
+
+ExecutionState.prototype.evaluateGlobal = function(source, disable_break) {
+  return MakeMirror(
+      %DebugEvaluateGlobal(this.break_id, source, Boolean(disable_break)));
+};
+
+ExecutionState.prototype.frameCount = function() {
+  return %GetFrameCount(this.break_id);
+};
+
+ExecutionState.prototype.threadCount = function() {
+  return %GetThreadCount(this.break_id);
+};
+
+ExecutionState.prototype.frame = function(opt_index) {
+  // If no index supplied return the selected frame.
+  if (opt_index == null) opt_index = this.selected_frame;
+  return new FrameMirror(this.break_id, opt_index);
+};
+
+ExecutionState.prototype.cframesValue = function(opt_from_index, opt_to_index) {
+  return %GetCFrames(this.break_id);
+};
+
+ExecutionState.prototype.setSelectedFrame = function(index) {
+  var i = %ToNumber(index);
+  if (i < 0 || i >= this.frameCount()) throw new Error('Illegal frame index.');
+  this.selected_frame = i;
+};
+
+ExecutionState.prototype.selectedFrame = function() {
+  return this.selected_frame;
+};
+
+ExecutionState.prototype.debugCommandProcessor = function(protocol) {
+  return new DebugCommandProcessor(this, protocol);
+};
+
+
+function MakeBreakEvent(exec_state, break_points_hit) {
+  return new BreakEvent(exec_state, break_points_hit);
+}
+
+
+function BreakEvent(exec_state, break_points_hit) {
+  this.exec_state_ = exec_state;
+  this.break_points_hit_ = break_points_hit;
+}
+
+
+BreakEvent.prototype.executionState = function() {
+  return this.exec_state_;
+};
+
+
+BreakEvent.prototype.eventType = function() {
+  return Debug.DebugEvent.Break;
+};
+
+
+BreakEvent.prototype.func = function() {
+  return this.exec_state_.frame(0).func();
+};
+
+
+BreakEvent.prototype.sourceLine = function() {
+  return this.exec_state_.frame(0).sourceLine();
+};
+
+
+BreakEvent.prototype.sourceColumn = function() {
+  return this.exec_state_.frame(0).sourceColumn();
+};
+
+
+BreakEvent.prototype.sourceLineText = function() {
+  return this.exec_state_.frame(0).sourceLineText();
+};
+
+
+BreakEvent.prototype.breakPointsHit = function() {
+  return this.break_points_hit_;
+};
+
+
+BreakEvent.prototype.toJSONProtocol = function() {
+  var o = { seq: next_response_seq++,
+            type: "event",
+            event: "break",
+            body: { invocationText: this.exec_state_.frame(0).invocationText(),
+                  }
+          };
+
+  // Add script related information to the event if available.
+  var script = this.func().script();
+  if (script) {
+    o.body.sourceLine = this.sourceLine(),
+    o.body.sourceColumn = this.sourceColumn(),
+    o.body.sourceLineText = this.sourceLineText(),
+    o.body.script = MakeScriptObject_(script, false);
+  }
+
+  // Add an Array of break points hit if any.
+  if (this.breakPointsHit()) {
+    o.body.breakpoints = [];
+    for (var i = 0; i < this.breakPointsHit().length; i++) {
+      // Find the break point number. For break points originating from a
+      // script break point supply the script break point number.
+      var breakpoint = this.breakPointsHit()[i];
+      var script_break_point = breakpoint.script_break_point();
+      var number;
+      if (script_break_point) {
+        number = script_break_point.number();
+      } else {
+        number = breakpoint.number();
+      }
+      o.body.breakpoints.push(number);
+    }
+  }
+  return JSON.stringify(ObjectToProtocolObject_(o));
+};
+
+
+function MakeExceptionEvent(exec_state, exception, uncaught) {
+  return new ExceptionEvent(exec_state, exception, uncaught);
+}
+
+
+function ExceptionEvent(exec_state, exception, uncaught) {
+  this.exec_state_ = exec_state;
+  this.exception_ = exception;
+  this.uncaught_ = uncaught;
+}
+
+
+ExceptionEvent.prototype.executionState = function() {
+  return this.exec_state_;
+};
+
+
+ExceptionEvent.prototype.eventType = function() {
+  return Debug.DebugEvent.Exception;
+};
+
+
+ExceptionEvent.prototype.exception = function() {
+  return this.exception_;
+}
+
+
+ExceptionEvent.prototype.uncaught = function() {
+  return this.uncaught_;
+}
+
+
+ExceptionEvent.prototype.func = function() {
+  return this.exec_state_.frame(0).func();
+};
+
+
+ExceptionEvent.prototype.sourceLine = function() {
+  return this.exec_state_.frame(0).sourceLine();
+};
+
+
+ExceptionEvent.prototype.sourceColumn = function() {
+  return this.exec_state_.frame(0).sourceColumn();
+};
+
+
+ExceptionEvent.prototype.sourceLineText = function() {
+  return this.exec_state_.frame(0).sourceLineText();
+};
+
+
+ExceptionEvent.prototype.toJSONProtocol = function() {
+  var o = new ProtocolMessage();
+  o.event = "exception";
+  o.body = { uncaught: this.uncaught_,
+             exception: MakeMirror(this.exception_)
+           };
+
+  // Exceptions might happen whithout any JavaScript frames.
+  if (this.exec_state_.frameCount() > 0) {
+    o.body.sourceLine = this.sourceLine();
+    o.body.sourceColumn = this.sourceColumn();
+    o.body.sourceLineText = this.sourceLineText();
+
+    // Add script information to the event if available.
+    var script = this.func().script();
+    if (script) {
+      o.body.script = MakeScriptObject_(script, false);
+    }
+  } else {
+    o.body.sourceLine = -1;
+  }
+
+  return o.toJSONProtocol();
+};
+
+
+function MakeCompileEvent(exec_state, script, before) {
+  return new CompileEvent(exec_state, script, before);
+}
+
+
+function CompileEvent(exec_state, script, before) {
+  this.exec_state_ = exec_state;
+  this.script_ = MakeMirror(script);
+  this.before_ = before;
+}
+
+
+CompileEvent.prototype.executionState = function() {
+  return this.exec_state_;
+};
+
+
+CompileEvent.prototype.eventType = function() {
+  if (this.before_) {
+    return Debug.DebugEvent.BeforeCompile;
+  } else {
+    return Debug.DebugEvent.AfterCompile;
+  }
+};
+
+
+CompileEvent.prototype.script = function() {
+  return this.script_;
+};
+
+
+CompileEvent.prototype.toJSONProtocol = function() {
+  var o = new ProtocolMessage();
+  o.running = true;
+  if (this.before_) {
+    o.event = "beforeCompile";
+  } else {
+    o.event = "afterCompile";
+  }
+  o.body = {};
+  o.body.script = this.script_;
+
+  return o.toJSONProtocol();
+}
+
+
+function MakeNewFunctionEvent(func) {
+  return new NewFunctionEvent(func);
+}
+
+
+function NewFunctionEvent(func) {
+  this.func = func;
+}
+
+
+NewFunctionEvent.prototype.eventType = function() {
+  return Debug.DebugEvent.NewFunction;
+};
+
+
+NewFunctionEvent.prototype.name = function() {
+  return this.func.name;
+};
+
+
+NewFunctionEvent.prototype.setBreakPoint = function(p) {
+  Debug.setBreakPoint(this.func, p || 0);
+};
+
+
+function MakeScriptCollectedEvent(exec_state, id) {
+  return new ScriptCollectedEvent(exec_state, id);
+}
+
+
+function ScriptCollectedEvent(exec_state, id) {
+  this.exec_state_ = exec_state;
+  this.id_ = id;
+}
+
+
+ScriptCollectedEvent.prototype.id = function() {
+  return this.id_;
+};
+
+
+ScriptCollectedEvent.prototype.executionState = function() {
+  return this.exec_state_;
+};
+
+
+ScriptCollectedEvent.prototype.toJSONProtocol = function() {
+  var o = new ProtocolMessage();
+  o.running = true;
+  o.event = "scriptCollected";
+  o.body = {};
+  o.body.script = { id: this.id() };
+  return o.toJSONProtocol();
+}
+
+
+function MakeScriptObject_(script, include_source) {
+  var o = { id: script.id(),
+            name: script.name(),
+            lineOffset: script.lineOffset(),
+            columnOffset: script.columnOffset(),
+            lineCount: script.lineCount(),
+          };
+  if (!IS_UNDEFINED(script.data())) {
+    o.data = script.data();
+  }
+  if (include_source) {
+    o.source = script.source();
+  }
+  return o;
+};
+
+
+function DebugCommandProcessor(exec_state) {
+  this.exec_state_ = exec_state;
+  this.running_ = false;
+};
+
+
+DebugCommandProcessor.prototype.processDebugRequest = function (request) {
+  return this.processDebugJSONRequest(request);
+}
+
+
+function ProtocolMessage(request) {
+  // Update sequence number.
+  this.seq = next_response_seq++;
+
+  if (request) {
+    // If message is based on a request this is a response. Fill the initial
+    // response from the request.
+    this.type = 'response';
+    this.request_seq = request.seq;
+    this.command = request.command;
+  } else {
+    // If message is not based on a request it is a dabugger generated event.
+    this.type = 'event';
+  }
+  this.success = true;
+  this.running = false;
+}
+
+
+ProtocolMessage.prototype.setOption = function(name, value) {
+  if (!this.options_) {
+    this.options_ = {};
+  }
+  this.options_[name] = value;
+}
+
+
+ProtocolMessage.prototype.failed = function(message) {
+  this.success = false;
+  this.message = message;
+}
+
+
+ProtocolMessage.prototype.toJSONProtocol = function() {
+  // Encode the protocol header.
+  var json = {};
+  json.seq= this.seq;
+  if (this.request_seq) {
+    json.request_seq = this.request_seq;
+  }
+  json.type = this.type;
+  if (this.event) {
+    json.event = this.event;
+  }
+  if (this.command) {
+    json.command = this.command;
+  }
+  if (this.success) {
+    json.success = this.success;
+  } else {
+    json.success = false;
+  }
+  if (this.body) {
+    // Encode the body part.
+    var bodyJson;
+    var serializer = MakeMirrorSerializer(true, this.options_);
+    if (this.body instanceof Mirror) {
+      bodyJson = serializer.serializeValue(this.body);
+    } else if (this.body instanceof Array) {
+      bodyJson = [];
+      for (var i = 0; i < this.body.length; i++) {
+        if (this.body[i] instanceof Mirror) {
+          bodyJson.push(serializer.serializeValue(this.body[i]));
+        } else {
+          bodyJson.push(ObjectToProtocolObject_(this.body[i], serializer));
+        }
+      }
+    } else {
+      bodyJson = ObjectToProtocolObject_(this.body, serializer);
+    }
+    json.body = bodyJson;
+    json.refs = serializer.serializeReferencedObjects();
+  }
+  if (this.message) {
+    json.message = this.message;
+  }
+  if (this.running) {
+    json.running = true;
+  } else {
+    json.running = false;
+  }
+  return JSON.stringify(json);
+}
+
+
+DebugCommandProcessor.prototype.createResponse = function(request) {
+  return new ProtocolMessage(request);
+};
+
+
+DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) {
+  var request;  // Current request.
+  var response;  // Generated response.
+  try {
+    try {
+      // Convert the JSON string to an object.
+      request = %CompileString('(' + json_request + ')', false)();
+
+      // Create an initial response.
+      response = this.createResponse(request);
+
+      if (!request.type) {
+        throw new Error('Type not specified');
+      }
+
+      if (request.type != 'request') {
+        throw new Error("Illegal type '" + request.type + "' in request");
+      }
+
+      if (!request.command) {
+        throw new Error('Command not specified');
+      }
+
+      // TODO(yurys): remove request.arguments.compactFormat check once
+      // ChromeDevTools are switched to 'inlineRefs'
+      if (request.arguments && (request.arguments.inlineRefs ||
+                                request.arguments.compactFormat)) {
+        response.setOption('inlineRefs', true);
+      }
+
+      if (request.command == 'continue') {
+        this.continueRequest_(request, response);
+      } else if (request.command == 'break') {
+        this.breakRequest_(request, response);
+      } else if (request.command == 'setbreakpoint') {
+        this.setBreakPointRequest_(request, response);
+      } else if (request.command == 'changebreakpoint') {
+        this.changeBreakPointRequest_(request, response);
+      } else if (request.command == 'clearbreakpoint') {
+        this.clearBreakPointRequest_(request, response);
+      } else if (request.command == 'clearbreakpointgroup') {
+        this.clearBreakPointGroupRequest_(request, response);
+      } else if (request.command == 'backtrace') {
+        this.backtraceRequest_(request, response);
+      } else if (request.command == 'frame') {
+        this.frameRequest_(request, response);
+      } else if (request.command == 'scopes') {
+        this.scopesRequest_(request, response);
+      } else if (request.command == 'scope') {
+        this.scopeRequest_(request, response);
+      } else if (request.command == 'evaluate') {
+        this.evaluateRequest_(request, response);
+      } else if (request.command == 'lookup') {
+        this.lookupRequest_(request, response);
+      } else if (request.command == 'references') {
+        this.referencesRequest_(request, response);
+      } else if (request.command == 'source') {
+        this.sourceRequest_(request, response);
+      } else if (request.command == 'scripts') {
+        this.scriptsRequest_(request, response);
+      } else if (request.command == 'threads') {
+        this.threadsRequest_(request, response);
+      } else {
+        throw new Error('Unknown command "' + request.command + '" in request');
+      }
+    } catch (e) {
+      // If there is no response object created one (without command).
+      if (!response) {
+        response = this.createResponse();
+      }
+      response.success = false;
+      response.message = %ToString(e);
+    }
+
+    // Return the response as a JSON encoded string.
+    try {
+      this.running_ = response.running;  // Store the running state.
+      return response.toJSONProtocol();
+    } catch (e) {
+      // Failed to generate response - return generic error.
+      return '{"seq":' + response.seq + ',' +
+              '"request_seq":' + request.seq + ',' +
+              '"type":"response",' +
+              '"success":false,' +
+              '"message":"Internal error: ' + %ToString(e) + '"}';
+    }
+  } catch (e) {
+    // Failed in one of the catch blocks above - most generic error.
+    return '{"seq":0,"type":"response","success":false,"message":"Internal error"}';
+  }
+};
+
+
+DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
+  // Check for arguments for continue.
+  if (request.arguments) {
+    var count = 1;
+    var action = Debug.StepAction.StepIn;
+
+    // Pull out arguments.
+    var stepaction = request.arguments.stepaction;
+    var stepcount = request.arguments.stepcount;
+
+    // Get the stepcount argument if any.
+    if (stepcount) {
+      count = %ToNumber(stepcount);
+      if (count < 0) {
+        throw new Error('Invalid stepcount argument "' + stepcount + '".');
+      }
+    }
+
+    // Get the stepaction argument.
+    if (stepaction) {
+      if (stepaction == 'in') {
+        action = Debug.StepAction.StepIn;
+      } else if (stepaction == 'min') {
+        action = Debug.StepAction.StepMin;
+      } else if (stepaction == 'next') {
+        action = Debug.StepAction.StepNext;
+      } else if (stepaction == 'out') {
+        action = Debug.StepAction.StepOut;
+      } else {
+        throw new Error('Invalid stepaction argument "' + stepaction + '".');
+      }
+    }
+
+    // Setup the VM for stepping.
+    this.exec_state_.prepareStep(action, count);
+  }
+
+  // VM should be running after executing this request.
+  response.running = true;
+};
+
+
+DebugCommandProcessor.prototype.breakRequest_ = function(request, response) {
+  // Ignore as break command does not do anything when broken.
+};
+
+
+DebugCommandProcessor.prototype.setBreakPointRequest_ =
+    function(request, response) {
+  // Check for legal request.
+  if (!request.arguments) {
+    response.failed('Missing arguments');
+    return;
+  }
+
+  // Pull out arguments.
+  var type = request.arguments.type;
+  var target = request.arguments.target;
+  var line = request.arguments.line;
+  var column = request.arguments.column;
+  var enabled = IS_UNDEFINED(request.arguments.enabled) ?
+      true : request.arguments.enabled;
+  var condition = request.arguments.condition;
+  var ignoreCount = request.arguments.ignoreCount;
+  var groupId = request.arguments.groupId;
+
+  // Check for legal arguments.
+  if (!type || IS_UNDEFINED(target)) {
+    response.failed('Missing argument "type" or "target"');
+    return;
+  }
+  if (type != 'function' && type != 'handle' &&
+      type != 'script' && type != 'scriptId') {
+    response.failed('Illegal type "' + type + '"');
+    return;
+  }
+
+  // Either function or script break point.
+  var break_point_number;
+  if (type == 'function') {
+    // Handle function break point.
+    if (!IS_STRING(target)) {
+      response.failed('Argument "target" is not a string value');
+      return;
+    }
+    var f;
+    try {
+      // Find the function through a global evaluate.
+      f = this.exec_state_.evaluateGlobal(target).value();
+    } catch (e) {
+      response.failed('Error: "' + %ToString(e) +
+                      '" evaluating "' + target + '"');
+      return;
+    }
+    if (!IS_FUNCTION(f)) {
+      response.failed('"' + target + '" does not evaluate to a function');
+      return;
+    }
+
+    // Set function break point.
+    break_point_number = Debug.setBreakPoint(f, line, column, condition);
+  } else if (type == 'handle') {
+    // Find the object pointed by the specified handle.
+    var handle = parseInt(target, 10);
+    var mirror = LookupMirror(handle);
+    if (!mirror) {
+      return response.failed('Object #' + handle + '# not found');
+    }
+    if (!mirror.isFunction()) {
+      return response.failed('Object #' + handle + '# is not a function');
+    }
+
+    // Set function break point.
+    break_point_number = Debug.setBreakPoint(mirror.value(),
+                                             line, column, condition);
+  } else if (type == 'script') {
+    // set script break point.
+    break_point_number =
+        Debug.setScriptBreakPointByName(target, line, column, condition,
+                                        groupId);
+  } else {  // type == 'scriptId.
+    break_point_number =
+        Debug.setScriptBreakPointById(target, line, column, condition, groupId);
+  }
+
+  // Set additional break point properties.
+  var break_point = Debug.findBreakPoint(break_point_number);
+  if (ignoreCount) {
+    Debug.changeBreakPointIgnoreCount(break_point_number, ignoreCount);
+  }
+  if (!enabled) {
+    Debug.disableBreakPoint(break_point_number);
+  }
+
+  // Add the break point number to the response.
+  response.body = { type: type,
+                    breakpoint: break_point_number }
+
+  // Add break point information to the response.
+  if (break_point instanceof ScriptBreakPoint) {
+    if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
+      response.body.type = 'scriptId';
+      response.body.script_id = break_point.script_id();
+    } else {
+      response.body.type = 'scriptName';
+      response.body.script_name = break_point.script_name();
+    }
+    response.body.line = break_point.line();
+    response.body.column = break_point.column();
+  } else {
+    response.body.type = 'function';
+  }
+};
+
+
+DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, response) {
+  // Check for legal request.
+  if (!request.arguments) {
+    response.failed('Missing arguments');
+    return;
+  }
+
+  // Pull out arguments.
+  var break_point = %ToNumber(request.arguments.breakpoint);
+  var enabled = request.arguments.enabled;
+  var condition = request.arguments.condition;
+  var ignoreCount = request.arguments.ignoreCount;
+
+  // Check for legal arguments.
+  if (!break_point) {
+    response.failed('Missing argument "breakpoint"');
+    return;
+  }
+
+  // Change enabled state if supplied.
+  if (!IS_UNDEFINED(enabled)) {
+    if (enabled) {
+      Debug.enableBreakPoint(break_point);
+    } else {
+      Debug.disableBreakPoint(break_point);
+    }
+  }
+
+  // Change condition if supplied
+  if (!IS_UNDEFINED(condition)) {
+    Debug.changeBreakPointCondition(break_point, condition);
+  }
+
+  // Change ignore count if supplied
+  if (!IS_UNDEFINED(ignoreCount)) {
+    Debug.changeBreakPointIgnoreCount(break_point, ignoreCount);
+  }
+}
+
+
+DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request, response) {
+  // Check for legal request.
+  if (!request.arguments) {
+    response.failed('Missing arguments');
+    return;
+  }
+
+  // Pull out arguments.
+  var group_id = request.arguments.groupId;
+
+  // Check for legal arguments.
+  if (!group_id) {
+    response.failed('Missing argument "groupId"');
+    return;
+  }
+
+  var cleared_break_points = [];
+  var new_script_break_points = [];
+  for (var i = 0; i < script_break_points.length; i++) {
+    var next_break_point = script_break_points[i];
+    if (next_break_point.groupId() == group_id) {
+      cleared_break_points.push(next_break_point.number());
+      next_break_point.clear();
+    } else {
+      new_script_break_points.push(next_break_point);
+    }
+  }
+  script_break_points = new_script_break_points;
+
+  // Add the cleared break point numbers to the response.
+  response.body = { breakpoints: cleared_break_points };
+}
+
+
+DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, response) {
+  // Check for legal request.
+  if (!request.arguments) {
+    response.failed('Missing arguments');
+    return;
+  }
+
+  // Pull out arguments.
+  var break_point = %ToNumber(request.arguments.breakpoint);
+
+  // Check for legal arguments.
+  if (!break_point) {
+    response.failed('Missing argument "breakpoint"');
+    return;
+  }
+
+  // Clear break point.
+  Debug.clearBreakPoint(break_point);
+
+  // Add the cleared break point number to the response.
+  response.body = { breakpoint: break_point }
+}
+
+
+DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) {
+  // Get the number of frames.
+  var total_frames = this.exec_state_.frameCount();
+
+  // Create simple response if there are no frames.
+  if (total_frames == 0) {
+    response.body = {
+      totalFrames: total_frames
+    }
+    return;
+  }
+
+  // Default frame range to include in backtrace.
+  var from_index = 0
+  var to_index = kDefaultBacktraceLength;
+
+  // Get the range from the arguments.
+  if (request.arguments) {
+    if (request.arguments.fromFrame) {
+      from_index = request.arguments.fromFrame;
+    }
+    if (request.arguments.toFrame) {
+      to_index = request.arguments.toFrame;
+    }
+    if (request.arguments.bottom) {
+      var tmp_index = total_frames - from_index;
+      from_index = total_frames - to_index
+      to_index = tmp_index;
+    }
+    if (from_index < 0 || to_index < 0) {
+      return response.failed('Invalid frame number');
+    }
+  }
+
+  // Adjust the index.
+  to_index = Math.min(total_frames, to_index);
+
+  if (to_index <= from_index) {
+    var error = 'Invalid frame range';
+    return response.failed(error);
+  }
+
+  // Create the response body.
+  var frames = [];
+  for (var i = from_index; i < to_index; i++) {
+    frames.push(this.exec_state_.frame(i));
+  }
+  response.body = {
+    fromFrame: from_index,
+    toFrame: to_index,
+    totalFrames: total_frames,
+    frames: frames
+  }
+};
+
+
+DebugCommandProcessor.prototype.backtracec = function(cmd, args) {
+  return this.exec_state_.cframesValue();
+};
+
+
+DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
+  // No frames no source.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No frames');
+  }
+
+  // With no arguments just keep the selected frame.
+  if (request.arguments) {
+    var index = request.arguments.number;
+    if (index < 0 || this.exec_state_.frameCount() <= index) {
+      return response.failed('Invalid frame number');
+    }
+
+    this.exec_state_.setSelectedFrame(request.arguments.number);
+  }
+  response.body = this.exec_state_.frame();
+};
+
+
+DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
+  // Get the frame for which the scope or scopes are requested. With no frameNumber
+  // argument use the currently selected frame.
+  if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
+    frame_index = request.arguments.frameNumber;
+    if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
+      return response.failed('Invalid frame number');
+    }
+    return this.exec_state_.frame(frame_index);
+  } else {
+    return this.exec_state_.frame();
+  }
+}
+
+
+DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
+  // No frames no scopes.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No scopes');
+  }
+
+  // Get the frame for which the scopes are requested.
+  var frame = this.frameForScopeRequest_(request);
+
+  // Fill all scopes for this frame.
+  var total_scopes = frame.scopeCount();
+  var scopes = [];
+  for (var i = 0; i < total_scopes; i++) {
+    scopes.push(frame.scope(i));
+  }
+  response.body = {
+    fromScope: 0,
+    toScope: total_scopes,
+    totalScopes: total_scopes,
+    scopes: scopes
+  }
+};
+
+
+DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
+  // No frames no scopes.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No scopes');
+  }
+
+  // Get the frame for which the scope is requested.
+  var frame = this.frameForScopeRequest_(request);
+
+  // With no scope argument just return top scope.
+  var scope_index = 0;
+  if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
+    scope_index = %ToNumber(request.arguments.number);
+    if (scope_index < 0 || frame.scopeCount() <= scope_index) {
+      return response.failed('Invalid scope number');
+    }
+  }
+
+  response.body = frame.scope(scope_index);
+};
+
+
+DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
+  if (!request.arguments) {
+    return response.failed('Missing arguments');
+  }
+
+  // Pull out arguments.
+  var expression = request.arguments.expression;
+  var frame = request.arguments.frame;
+  var global = request.arguments.global;
+  var disable_break = request.arguments.disable_break;
+
+  // The expression argument could be an integer so we convert it to a
+  // string.
+  try {
+    expression = String(expression);
+  } catch(e) {
+    return response.failed('Failed to convert expression argument to string');
+  }
+
+  // Check for legal arguments.
+  if (!IS_UNDEFINED(frame) && global) {
+    return response.failed('Arguments "frame" and "global" are exclusive');
+  }
+
+  // Global evaluate.
+  if (global) {
+    // Evaluate in the global context.
+    response.body =
+        this.exec_state_.evaluateGlobal(expression), Boolean(disable_break);
+    return;
+  }
+
+  // Default value for disable_break is true.
+  if (IS_UNDEFINED(disable_break)) {
+    disable_break = true;
+  }
+
+  // No frames no evaluate in frame.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No frames');
+  }
+
+  // Check whether a frame was specified.
+  if (!IS_UNDEFINED(frame)) {
+    var frame_number = %ToNumber(frame);
+    if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
+      return response.failed('Invalid frame "' + frame + '"');
+    }
+    // Evaluate in the specified frame.
+    response.body = this.exec_state_.frame(frame_number).evaluate(
+        expression, Boolean(disable_break));
+    return;
+  } else {
+    // Evaluate in the selected frame.
+    response.body = this.exec_state_.frame().evaluate(
+        expression, Boolean(disable_break));
+    return;
+  }
+};
+
+
+DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
+  if (!request.arguments) {
+    return response.failed('Missing arguments');
+  }
+
+  // Pull out arguments.
+  var handles = request.arguments.handles;
+
+  // Check for legal arguments.
+  if (IS_UNDEFINED(handles)) {
+    return response.failed('Argument "handles" missing');
+  }
+
+  // Set 'includeSource' option for script lookup.
+  if (!IS_UNDEFINED(request.arguments.includeSource)) {
+    includeSource = %ToBoolean(request.arguments.includeSource);
+    response.setOption('includeSource', includeSource);
+  }
+
+  // Lookup handles.
+  var mirrors = {};
+  for (var i = 0; i < handles.length; i++) {
+    var handle = handles[i];
+    var mirror = LookupMirror(handle);
+    if (!mirror) {
+      return response.failed('Object #' + handle + '# not found');
+    }
+    mirrors[handle] = mirror;
+  }
+  response.body = mirrors;
+};
+
+
+DebugCommandProcessor.prototype.referencesRequest_ =
+    function(request, response) {
+  if (!request.arguments) {
+    return response.failed('Missing arguments');
+  }
+
+  // Pull out arguments.
+  var type = request.arguments.type;
+  var handle = request.arguments.handle;
+
+  // Check for legal arguments.
+  if (IS_UNDEFINED(type)) {
+    return response.failed('Argument "type" missing');
+  }
+  if (IS_UNDEFINED(handle)) {
+    return response.failed('Argument "handle" missing');
+  }
+  if (type != 'referencedBy' && type != 'constructedBy') {
+    return response.failed('Invalid type "' + type + '"');
+  }
+
+  // Lookup handle and return objects with references the object.
+  var mirror = LookupMirror(handle);
+  if (mirror) {
+    if (type == 'referencedBy') {
+      response.body = mirror.referencedBy();
+    } else {
+      response.body = mirror.constructedBy();
+    }
+  } else {
+    return response.failed('Object #' + handle + '# not found');
+  }
+};
+
+
+DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
+  // No frames no source.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No source');
+  }
+
+  var from_line;
+  var to_line;
+  var frame = this.exec_state_.frame();
+  if (request.arguments) {
+    // Pull out arguments.
+    from_line = request.arguments.fromLine;
+    to_line = request.arguments.toLine;
+
+    if (!IS_UNDEFINED(request.arguments.frame)) {
+      var frame_number = %ToNumber(request.arguments.frame);
+      if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
+        return response.failed('Invalid frame "' + frame + '"');
+      }
+      frame = this.exec_state_.frame(frame_number);
+    }
+  }
+
+  // Get the script selected.
+  var script = frame.func().script();
+  if (!script) {
+    return response.failed('No source');
+  }
+
+  // Get the source slice and fill it into the response.
+  var slice = script.sourceSlice(from_line, to_line);
+  if (!slice) {
+    return response.failed('Invalid line interval');
+  }
+  response.body = {};
+  response.body.source = slice.sourceText();
+  response.body.fromLine = slice.from_line;
+  response.body.toLine = slice.to_line;
+  response.body.fromPosition = slice.from_position;
+  response.body.toPosition = slice.to_position;
+  response.body.totalLines = script.lineCount();
+};
+
+
+DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
+  var types = ScriptTypeFlag(Debug.ScriptType.Normal);
+  var includeSource = false;
+  var idsToInclude = null;
+  if (request.arguments) {
+    // Pull out arguments.
+    if (!IS_UNDEFINED(request.arguments.types)) {
+      types = %ToNumber(request.arguments.types);
+      if (isNaN(types) || types < 0) {
+        return response.failed('Invalid types "' + request.arguments.types + '"');
+      }
+    }
+    
+    if (!IS_UNDEFINED(request.arguments.includeSource)) {
+      includeSource = %ToBoolean(request.arguments.includeSource);
+      response.setOption('includeSource', includeSource);
+    }
+    
+    if (IS_ARRAY(request.arguments.ids)) {
+      idsToInclude = {};
+      var ids = request.arguments.ids;
+      for (var i = 0; i < ids.length; i++) {
+        idsToInclude[ids[i]] = true;
+      }
+    }
+  }
+
+  // Collect all scripts in the heap.
+  var scripts = %DebugGetLoadedScripts();
+
+  response.body = [];
+
+  for (var i = 0; i < scripts.length; i++) {
+    if (idsToInclude && !idsToInclude[scripts[i].id]) {
+      continue;
+    }
+    if (types & ScriptTypeFlag(scripts[i].type)) {
+      response.body.push(MakeMirror(scripts[i]));
+    }
+  }
+};
+
+
+DebugCommandProcessor.prototype.threadsRequest_ = function(request, response) {
+  // Get the number of threads.
+  var total_threads = this.exec_state_.threadCount();
+
+  // Get information for all threads.
+  var threads = [];
+  for (var i = 0; i < total_threads; i++) {
+    var details = %GetThreadDetails(this.exec_state_.break_id, i);
+    var thread_info = { current: details[0],
+                        id: details[1]
+                      }
+    threads.push(thread_info);
+  }
+
+  // Create the response body.
+  response.body = {
+    totalThreads: total_threads,
+    threads: threads
+  }
+};
+
+
+// Check whether the previously processed command caused the VM to become
+// running.
+DebugCommandProcessor.prototype.isRunning = function() {
+  return this.running_;
+}
+
+
+DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
+  return %SystemBreak();
+};
+
+
+function NumberToHex8Str(n) {
+  var r = "";
+  for (var i = 0; i < 8; ++i) {
+    var c = hexCharArray[n & 0x0F];  // hexCharArray is defined in uri.js
+    r = c + r;
+    n = n >>> 4;
+  }
+  return r;
+};
+
+DebugCommandProcessor.prototype.formatCFrames = function(cframes_value) {
+  var result = "";
+  if (cframes_value == null || cframes_value.length == 0) {
+    result += "(stack empty)";
+  } else {
+    for (var i = 0; i < cframes_value.length; ++i) {
+      if (i != 0) result += "\n";
+      result += this.formatCFrame(cframes_value[i]);
+    }
+  }
+  return result;
+};
+
+
+DebugCommandProcessor.prototype.formatCFrame = function(cframe_value) {
+  var result = "";
+  result += "0x" + NumberToHex8Str(cframe_value.address);
+  if (!IS_UNDEFINED(cframe_value.text)) {
+    result += " " + cframe_value.text;
+  }
+  return result;
+}
+
+
+/**
+ * Convert an Object to its debugger protocol representation. The representation
+ * may be serilized to a JSON object using JSON.stringify().
+ * This implementation simply runs through all string property names, converts
+ * each property value to a protocol value and adds the property to the result
+ * object. For type "object" the function will be called recursively. Note that
+ * circular structures will cause infinite recursion.
+ * @param {Object} object The object to format as protocol object.
+ * @param {MirrorSerializer} mirror_serializer The serializer to use if any
+ *     mirror objects are encountered.
+ * @return {Object} Protocol object value.
+ */
+function ObjectToProtocolObject_(object, mirror_serializer) {
+  var content = {};
+  for (var key in object) {
+    // Only consider string keys.
+    if (typeof key == 'string') {
+      // Format the value based on its type.
+      var property_value_json = ValueToProtocolValue_(object[key],
+                                                      mirror_serializer);
+      // Add the property if relevant.
+      if (!IS_UNDEFINED(property_value_json)) {
+        content[key] = property_value_json;
+      }
+    }
+  }
+  
+  return content;
+}
+
+
+/**
+ * Convert an array to its debugger protocol representation. It will convert
+ * each array element to a protocol value.
+ * @param {Array} array The array to format as protocol array.
+ * @param {MirrorSerializer} mirror_serializer The serializer to use if any
+ *     mirror objects are encountered.
+ * @return {Array} Protocol array value.
+ */
+function ArrayToProtocolArray_(array, mirror_serializer) {
+  var json = [];
+  for (var i = 0; i < array.length; i++) {
+    json.push(ValueToProtocolValue_(array[i], mirror_serializer));
+  }
+  return json;
+}
+
+
+/**
+ * Convert a value to its debugger protocol representation. 
+ * @param {*} value The value to format as protocol value.
+ * @param {MirrorSerializer} mirror_serializer The serializer to use if any
+ *     mirror objects are encountered.
+ * @return {*} Protocol value.
+ */
+function ValueToProtocolValue_(value, mirror_serializer) {
+  // Format the value based on its type.
+  var json;
+  switch (typeof value) {
+    case 'object':
+      if (value instanceof Mirror) {
+        json = mirror_serializer.serializeValue(value);
+      } else if (IS_ARRAY(value)){
+        json = ArrayToProtocolArray_(value, mirror_serializer);
+      } else {
+        json = ObjectToProtocolObject_(value, mirror_serializer);
+      }
+      break;
+
+    case 'boolean':
+    case 'string':
+    case 'number':
+      json = value;
+      break
+
+    default:
+      json = null;
+  }
+  return json;
+}
diff --git a/src/debug.cc b/src/debug.cc
new file mode 100644
index 0000000..ec658d6
--- /dev/null
+++ b/src/debug.cc
@@ -0,0 +1,2728 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "arguments.h"
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "compilation-cache.h"
+#include "compiler.h"
+#include "debug.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "ic.h"
+#include "ic-inl.h"
+#include "natives.h"
+#include "stub-cache.h"
+#include "log.h"
+
+#include "../include/v8-debug.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+static void PrintLn(v8::Local<v8::Value> value) {
+  v8::Local<v8::String> s = value->ToString();
+  char* data = NewArray<char>(s->Length() + 1);
+  if (data == NULL) {
+    V8::FatalProcessOutOfMemory("PrintLn");
+    return;
+  }
+  s->WriteAscii(data);
+  PrintF("%s\n", data);
+  DeleteArray(data);
+}
+
+
+static Handle<Code> ComputeCallDebugBreak(int argc) {
+  CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugBreak(argc), Code);
+}
+
+
+static Handle<Code> ComputeCallDebugPrepareStepIn(int argc) {
+  CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugPrepareStepIn(argc), Code);
+}
+
+
+BreakLocationIterator::BreakLocationIterator(Handle<DebugInfo> debug_info,
+                                             BreakLocatorType type) {
+  debug_info_ = debug_info;
+  type_ = type;
+  // Get the stub early to avoid possible GC during iterations. We may need
+  // this stub to detect debugger calls generated from debugger statements.
+  debug_break_stub_ = RuntimeStub(Runtime::kDebugBreak, 0).GetCode();
+  reloc_iterator_ = NULL;
+  reloc_iterator_original_ = NULL;
+  Reset();  // Initialize the rest of the member variables.
+}
+
+
+BreakLocationIterator::~BreakLocationIterator() {
+  ASSERT(reloc_iterator_ != NULL);
+  ASSERT(reloc_iterator_original_ != NULL);
+  delete reloc_iterator_;
+  delete reloc_iterator_original_;
+}
+
+
+void BreakLocationIterator::Next() {
+  AssertNoAllocation nogc;
+  ASSERT(!RinfoDone());
+
+  // Iterate through reloc info for code and original code stopping at each
+  // breakable code target.
+  bool first = break_point_ == -1;
+  while (!RinfoDone()) {
+    if (!first) RinfoNext();
+    first = false;
+    if (RinfoDone()) return;
+
+    // Whenever a statement position or (plain) position is passed update the
+    // current value of these.
+    if (RelocInfo::IsPosition(rmode())) {
+      if (RelocInfo::IsStatementPosition(rmode())) {
+        statement_position_ =
+            rinfo()->data() - debug_info_->shared()->start_position();
+      }
+      // Always update the position as we don't want that to be before the
+      // statement position.
+      position_ = rinfo()->data() - debug_info_->shared()->start_position();
+      ASSERT(position_ >= 0);
+      ASSERT(statement_position_ >= 0);
+    }
+
+    // Check for breakable code target. Look in the original code as setting
+    // break points can cause the code targets in the running (debugged) code to
+    // be of a different kind than in the original code.
+    if (RelocInfo::IsCodeTarget(rmode())) {
+      Address target = original_rinfo()->target_address();
+      Code* code = Code::GetCodeFromTargetAddress(target);
+      if (code->is_inline_cache_stub() || RelocInfo::IsConstructCall(rmode())) {
+        break_point_++;
+        return;
+      }
+      if (code->kind() == Code::STUB) {
+        if (IsDebuggerStatement()) {
+          break_point_++;
+          return;
+        }
+        if (type_ == ALL_BREAK_LOCATIONS) {
+          if (Debug::IsBreakStub(code)) {
+            break_point_++;
+            return;
+          }
+        } else {
+          ASSERT(type_ == SOURCE_BREAK_LOCATIONS);
+          if (Debug::IsSourceBreakStub(code)) {
+            break_point_++;
+            return;
+          }
+        }
+      }
+    }
+
+    // Check for break at return.
+    if (RelocInfo::IsJSReturn(rmode())) {
+      // Set the positions to the end of the function.
+      if (debug_info_->shared()->HasSourceCode()) {
+        position_ = debug_info_->shared()->end_position() -
+                    debug_info_->shared()->start_position();
+      } else {
+        position_ = 0;
+      }
+      statement_position_ = position_;
+      break_point_++;
+      return;
+    }
+  }
+}
+
+
+void BreakLocationIterator::Next(int count) {
+  while (count > 0) {
+    Next();
+    count--;
+  }
+}
+
+
+// Find the break point closest to the supplied address.
+void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
+  // Run through all break points to locate the one closest to the address.
+  int closest_break_point = 0;
+  int distance = kMaxInt;
+  while (!Done()) {
+    // Check if this break point is closer that what was previously found.
+    if (this->pc() < pc && pc - this->pc() < distance) {
+      closest_break_point = break_point();
+      distance = pc - this->pc();
+      // Check whether we can't get any closer.
+      if (distance == 0) break;
+    }
+    Next();
+  }
+
+  // Move to the break point found.
+  Reset();
+  Next(closest_break_point);
+}
+
+
+// Find the break point closest to the supplied source position.
+void BreakLocationIterator::FindBreakLocationFromPosition(int position) {
+  // Run through all break points to locate the one closest to the source
+  // position.
+  int closest_break_point = 0;
+  int distance = kMaxInt;
+  while (!Done()) {
+    // Check if this break point is closer that what was previously found.
+    if (position <= statement_position() &&
+        statement_position() - position < distance) {
+      closest_break_point = break_point();
+      distance = statement_position() - position;
+      // Check whether we can't get any closer.
+      if (distance == 0) break;
+    }
+    Next();
+  }
+
+  // Move to the break point found.
+  Reset();
+  Next(closest_break_point);
+}
+
+
+void BreakLocationIterator::Reset() {
+  // Create relocation iterators for the two code objects.
+  if (reloc_iterator_ != NULL) delete reloc_iterator_;
+  if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_;
+  reloc_iterator_ = new RelocIterator(debug_info_->code());
+  reloc_iterator_original_ = new RelocIterator(debug_info_->original_code());
+
+  // Position at the first break point.
+  break_point_ = -1;
+  position_ = 1;
+  statement_position_ = 1;
+  Next();
+}
+
+
+bool BreakLocationIterator::Done() const {
+  return RinfoDone();
+}
+
+
+void BreakLocationIterator::SetBreakPoint(Handle<Object> break_point_object) {
+  // If there is not already a real break point here patch code with debug
+  // break.
+  if (!HasBreakPoint()) {
+    SetDebugBreak();
+  }
+  ASSERT(IsDebugBreak() || IsDebuggerStatement());
+  // Set the break point information.
+  DebugInfo::SetBreakPoint(debug_info_, code_position(),
+                           position(), statement_position(),
+                           break_point_object);
+}
+
+
+void BreakLocationIterator::ClearBreakPoint(Handle<Object> break_point_object) {
+  // Clear the break point information.
+  DebugInfo::ClearBreakPoint(debug_info_, code_position(), break_point_object);
+  // If there are no more break points here remove the debug break.
+  if (!HasBreakPoint()) {
+    ClearDebugBreak();
+    ASSERT(!IsDebugBreak());
+  }
+}
+
+
+void BreakLocationIterator::SetOneShot() {
+  // Debugger statement always calls debugger. No need to modify it.
+  if (IsDebuggerStatement()) {
+    return;
+  }
+
+  // If there is a real break point here no more to do.
+  if (HasBreakPoint()) {
+    ASSERT(IsDebugBreak());
+    return;
+  }
+
+  // Patch code with debug break.
+  SetDebugBreak();
+}
+
+
+void BreakLocationIterator::ClearOneShot() {
+  // Debugger statement always calls debugger. No need to modify it.
+  if (IsDebuggerStatement()) {
+    return;
+  }
+
+  // If there is a real break point here no more to do.
+  if (HasBreakPoint()) {
+    ASSERT(IsDebugBreak());
+    return;
+  }
+
+  // Patch code removing debug break.
+  ClearDebugBreak();
+  ASSERT(!IsDebugBreak());
+}
+
+
+void BreakLocationIterator::SetDebugBreak() {
+  // Debugger statement always calls debugger. No need to modify it.
+  if (IsDebuggerStatement()) {
+    return;
+  }
+
+  // If there is already a break point here just return. This might happen if
+  // the same code is flooded with break points twice. Flooding the same
+  // function twice might happen when stepping in a function with an exception
+  // handler as the handler and the function is the same.
+  if (IsDebugBreak()) {
+    return;
+  }
+
+  if (RelocInfo::IsJSReturn(rmode())) {
+    // Patch the frame exit code with a break point.
+    SetDebugBreakAtReturn();
+  } else {
+    // Patch the IC call.
+    SetDebugBreakAtIC();
+  }
+  ASSERT(IsDebugBreak());
+}
+
+
+void BreakLocationIterator::ClearDebugBreak() {
+  // Debugger statement always calls debugger. No need to modify it.
+  if (IsDebuggerStatement()) {
+    return;
+  }
+
+  if (RelocInfo::IsJSReturn(rmode())) {
+    // Restore the frame exit code.
+    ClearDebugBreakAtReturn();
+  } else {
+    // Patch the IC call.
+    ClearDebugBreakAtIC();
+  }
+  ASSERT(!IsDebugBreak());
+}
+
+
+void BreakLocationIterator::PrepareStepIn() {
+  HandleScope scope;
+
+  // Step in can only be prepared if currently positioned on an IC call,
+  // construct call or CallFunction stub call.
+  Address target = rinfo()->target_address();
+  Handle<Code> code(Code::GetCodeFromTargetAddress(target));
+  if (code->is_call_stub()) {
+    // Step in through IC call is handled by the runtime system. Therefore make
+    // sure that the any current IC is cleared and the runtime system is
+    // called. If the executing code has a debug break at the location change
+    // the call in the original code as it is the code there that will be
+    // executed in place of the debug break call.
+    Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count());
+    if (IsDebugBreak()) {
+      original_rinfo()->set_target_address(stub->entry());
+    } else {
+      rinfo()->set_target_address(stub->entry());
+    }
+  } else {
+#ifdef DEBUG
+    // All the following stuff is needed only for assertion checks so the code
+    // is wrapped in ifdef.
+    Handle<Code> maybe_call_function_stub = code;
+    if (IsDebugBreak()) {
+      Address original_target = original_rinfo()->target_address();
+      maybe_call_function_stub =
+          Handle<Code>(Code::GetCodeFromTargetAddress(original_target));
+    }
+    bool is_call_function_stub =
+        (maybe_call_function_stub->kind() == Code::STUB &&
+         maybe_call_function_stub->major_key() == CodeStub::CallFunction);
+
+    // Step in through construct call requires no changes to the running code.
+    // Step in through getters/setters should already be prepared as well
+    // because caller of this function (Debug::PrepareStep) is expected to
+    // flood the top frame's function with one shot breakpoints.
+    // Step in through CallFunction stub should also be prepared by caller of
+    // this function (Debug::PrepareStep) which should flood target function
+    // with breakpoints.
+    ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()
+           || is_call_function_stub);
+#endif
+  }
+}
+
+
+// Check whether the break point is at a position which will exit the function.
+bool BreakLocationIterator::IsExit() const {
+  return (RelocInfo::IsJSReturn(rmode()));
+}
+
+
+bool BreakLocationIterator::HasBreakPoint() {
+  return debug_info_->HasBreakPoint(code_position());
+}
+
+
+// Check whether there is a debug break at the current position.
+bool BreakLocationIterator::IsDebugBreak() {
+  if (RelocInfo::IsJSReturn(rmode())) {
+    return IsDebugBreakAtReturn();
+  } else {
+    return Debug::IsDebugBreak(rinfo()->target_address());
+  }
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtIC() {
+  // Patch the original code with the current address as the current address
+  // might have changed by the inline caching since the code was copied.
+  original_rinfo()->set_target_address(rinfo()->target_address());
+
+  RelocInfo::Mode mode = rmode();
+  if (RelocInfo::IsCodeTarget(mode)) {
+    Address target = rinfo()->target_address();
+    Handle<Code> code(Code::GetCodeFromTargetAddress(target));
+
+    // Patch the code to invoke the builtin debug break function matching the
+    // calling convention used by the call site.
+    Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
+    rinfo()->set_target_address(dbgbrk_code->entry());
+
+    // For stubs that refer back to an inlined version clear the cached map for
+    // the inlined case to always go through the IC. As long as the break point
+    // is set the patching performed by the runtime system will take place in
+    // the code copy and will therefore have no effect on the running code
+    // keeping it from using the inlined code.
+    if (code->is_keyed_load_stub()) KeyedLoadIC::ClearInlinedVersion(pc());
+    if (code->is_keyed_store_stub()) KeyedStoreIC::ClearInlinedVersion(pc());
+  }
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtIC() {
+  // Patch the code to the original invoke.
+  rinfo()->set_target_address(original_rinfo()->target_address());
+
+  RelocInfo::Mode mode = rmode();
+  if (RelocInfo::IsCodeTarget(mode)) {
+    Address target = original_rinfo()->target_address();
+    Handle<Code> code(Code::GetCodeFromTargetAddress(target));
+
+    // Restore the inlined version of keyed stores to get back to the
+    // fast case.  We need to patch back the keyed store because no
+    // patching happens when running normally.  For keyed loads, the
+    // map check will get patched back when running normally after ICs
+    // have been cleared at GC.
+    if (code->is_keyed_store_stub()) KeyedStoreIC::RestoreInlinedVersion(pc());
+  }
+}
+
+
+bool BreakLocationIterator::IsDebuggerStatement() {
+  if (RelocInfo::IsCodeTarget(rmode())) {
+    Address target = original_rinfo()->target_address();
+    Code* code = Code::GetCodeFromTargetAddress(target);
+    if (code->kind() == Code::STUB) {
+      CodeStub::Major major_key = code->major_key();
+      if (major_key == CodeStub::Runtime) {
+        return (*debug_break_stub_ == code);
+      }
+    }
+  }
+  return false;
+}
+
+
+Object* BreakLocationIterator::BreakPointObjects() {
+  return debug_info_->GetBreakPointObjects(code_position());
+}
+
+
+// Clear out all the debug break code. This is ONLY supposed to be used when
+// shutting down the debugger as it will leave the break point information in
+// DebugInfo even though the code is patched back to the non break point state.
+void BreakLocationIterator::ClearAllDebugBreak() {
+  while (!Done()) {
+    ClearDebugBreak();
+    Next();
+  }
+}
+
+
+bool BreakLocationIterator::RinfoDone() const {
+  ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done());
+  return reloc_iterator_->done();
+}
+
+
+void BreakLocationIterator::RinfoNext() {
+  reloc_iterator_->next();
+  reloc_iterator_original_->next();
+#ifdef DEBUG
+  ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done());
+  if (!reloc_iterator_->done()) {
+    ASSERT(rmode() == original_rmode());
+  }
+#endif
+}
+
+
+bool Debug::has_break_points_ = false;
+ScriptCache* Debug::script_cache_ = NULL;
+DebugInfoListNode* Debug::debug_info_list_ = NULL;
+
+
+// Threading support.
+void Debug::ThreadInit() {
+  thread_local_.break_count_ = 0;
+  thread_local_.break_id_ = 0;
+  thread_local_.break_frame_id_ = StackFrame::NO_ID;
+  thread_local_.last_step_action_ = StepNone;
+  thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
+  thread_local_.step_count_ = 0;
+  thread_local_.last_fp_ = 0;
+  thread_local_.step_into_fp_ = 0;
+  thread_local_.step_out_fp_ = 0;
+  thread_local_.after_break_target_ = 0;
+  thread_local_.debugger_entry_ = NULL;
+  thread_local_.pending_interrupts_ = 0;
+}
+
+
+JSCallerSavedBuffer Debug::registers_;
+Debug::ThreadLocal Debug::thread_local_;
+
+
+char* Debug::ArchiveDebug(char* storage) {
+  char* to = storage;
+  memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+  to += sizeof(ThreadLocal);
+  memcpy(to, reinterpret_cast<char*>(&registers_), sizeof(registers_));
+  ThreadInit();
+  ASSERT(to <= storage + ArchiveSpacePerThread());
+  return storage + ArchiveSpacePerThread();
+}
+
+
+char* Debug::RestoreDebug(char* storage) {
+  char* from = storage;
+  memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+  from += sizeof(ThreadLocal);
+  memcpy(reinterpret_cast<char*>(&registers_), from, sizeof(registers_));
+  ASSERT(from <= storage + ArchiveSpacePerThread());
+  return storage + ArchiveSpacePerThread();
+}
+
+
+int Debug::ArchiveSpacePerThread() {
+  return sizeof(ThreadLocal) + sizeof(registers_);
+}
+
+
+// Default break enabled.
+bool Debug::disable_break_ = false;
+
+// Default call debugger on uncaught exception.
+bool Debug::break_on_exception_ = false;
+bool Debug::break_on_uncaught_exception_ = true;
+
+Handle<Context> Debug::debug_context_ = Handle<Context>();
+Code* Debug::debug_break_return_ = NULL;
+
+
+void ScriptCache::Add(Handle<Script> script) {
+  // Create an entry in the hash map for the script.
+  int id = Smi::cast(script->id())->value();
+  HashMap::Entry* entry =
+      HashMap::Lookup(reinterpret_cast<void*>(id), Hash(id), true);
+  if (entry->value != NULL) {
+    ASSERT(*script == *reinterpret_cast<Script**>(entry->value));
+    return;
+  }
+
+  // Globalize the script object, make it weak and use the location of the
+  // global handle as the value in the hash map.
+  Handle<Script> script_ =
+      Handle<Script>::cast((GlobalHandles::Create(*script)));
+  GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
+                          this, ScriptCache::HandleWeakScript);
+  entry->value = script_.location();
+}
+
+
+Handle<FixedArray> ScriptCache::GetScripts() {
+  Handle<FixedArray> instances = Factory::NewFixedArray(occupancy());
+  int count = 0;
+  for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
+    ASSERT(entry->value != NULL);
+    if (entry->value != NULL) {
+      instances->set(count, *reinterpret_cast<Script**>(entry->value));
+      count++;
+    }
+  }
+  return instances;
+}
+
+
+void ScriptCache::ProcessCollectedScripts() {
+  for (int i = 0; i < collected_scripts_.length(); i++) {
+    Debugger::OnScriptCollected(collected_scripts_[i]);
+  }
+  collected_scripts_.Clear();
+}
+
+
+void ScriptCache::Clear() {
+  // Iterate the script cache to get rid of all the weak handles.
+  for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
+    ASSERT(entry != NULL);
+    Object** location = reinterpret_cast<Object**>(entry->value);
+    ASSERT((*location)->IsScript());
+    GlobalHandles::ClearWeakness(location);
+    GlobalHandles::Destroy(location);
+  }
+  // Clear the content of the hash map.
+  HashMap::Clear();
+}
+
+
+void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
+  ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
+  // Find the location of the global handle.
+  Script** location =
+      reinterpret_cast<Script**>(Utils::OpenHandle(*obj).location());
+  ASSERT((*location)->IsScript());
+
+  // Remove the entry from the cache.
+  int id = Smi::cast((*location)->id())->value();
+  script_cache->Remove(reinterpret_cast<void*>(id), Hash(id));
+  script_cache->collected_scripts_.Add(id);
+
+  // Clear the weak handle.
+  obj.Dispose();
+  obj.Clear();
+}
+
+
+void Debug::Setup(bool create_heap_objects) {
+  ThreadInit();
+  if (create_heap_objects) {
+    // Get code to handle debug break on return.
+    debug_break_return_ =
+        Builtins::builtin(Builtins::Return_DebugBreak);
+    ASSERT(debug_break_return_->IsCode());
+  }
+}
+
+
+void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
+  DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
+  RemoveDebugInfo(node->debug_info());
+#ifdef DEBUG
+  node = Debug::debug_info_list_;
+  while (node != NULL) {
+    ASSERT(node != reinterpret_cast<DebugInfoListNode*>(data));
+    node = node->next();
+  }
+#endif
+}
+
+
+DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
+  // Globalize the request debug info object and make it weak.
+  debug_info_ = Handle<DebugInfo>::cast((GlobalHandles::Create(debug_info)));
+  GlobalHandles::MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
+                          this, Debug::HandleWeakDebugInfo);
+}
+
+
+DebugInfoListNode::~DebugInfoListNode() {
+  GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_.location()));
+}
+
+
+bool Debug::CompileDebuggerScript(int index) {
+  HandleScope scope;
+
+  // Bail out if the index is invalid.
+  if (index == -1) {
+    return false;
+  }
+
+  // Find source and name for the requested script.
+  Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
+  Vector<const char> name = Natives::GetScriptName(index);
+  Handle<String> script_name = Factory::NewStringFromAscii(name);
+
+  // Compile the script.
+  bool allow_natives_syntax = FLAG_allow_natives_syntax;
+  FLAG_allow_natives_syntax = true;
+  Handle<JSFunction> boilerplate;
+  boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL);
+  FLAG_allow_natives_syntax = allow_natives_syntax;
+
+  // Silently ignore stack overflows during compilation.
+  if (boilerplate.is_null()) {
+    ASSERT(Top::has_pending_exception());
+    Top::clear_pending_exception();
+    return false;
+  }
+
+  // Execute the boilerplate function in the debugger context.
+  Handle<Context> context = Top::global_context();
+  bool caught_exception = false;
+  Handle<JSFunction> function =
+      Factory::NewFunctionFromBoilerplate(boilerplate, context);
+  Handle<Object> result =
+      Execution::TryCall(function, Handle<Object>(context->global()),
+                         0, NULL, &caught_exception);
+
+  // Check for caught exceptions.
+  if (caught_exception) {
+    Handle<Object> message = MessageHandler::MakeMessageObject(
+        "error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
+        Handle<String>());
+    MessageHandler::ReportMessage(NULL, message);
+    return false;
+  }
+
+  // Mark this script as native and return successfully.
+  Handle<Script> script(Script::cast(function->shared()->script()));
+  script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
+  return true;
+}
+
+
+bool Debug::Load() {
+  // Return if debugger is already loaded.
+  if (IsLoaded()) return true;
+
+  // Bail out if we're already in the process of compiling the native
+  // JavaScript source code for the debugger.
+  if (Debugger::compiling_natives() || Debugger::is_loading_debugger())
+    return false;
+  Debugger::set_loading_debugger(true);
+
+  // Disable breakpoints and interrupts while compiling and running the
+  // debugger scripts including the context creation code.
+  DisableBreak disable(true);
+  PostponeInterruptsScope postpone;
+
+  // Create the debugger context.
+  HandleScope scope;
+  Handle<Context> context =
+      Bootstrapper::CreateEnvironment(Handle<Object>::null(),
+                                      v8::Handle<ObjectTemplate>(),
+                                      NULL);
+
+  // Use the debugger context.
+  SaveContext save;
+  Top::set_context(*context);
+
+  // Expose the builtins object in the debugger context.
+  Handle<String> key = Factory::LookupAsciiSymbol("builtins");
+  Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
+  SetProperty(global, key, Handle<Object>(global->builtins()), NONE);
+
+  // Compile the JavaScript for the debugger in the debugger context.
+  Debugger::set_compiling_natives(true);
+  bool caught_exception =
+      !CompileDebuggerScript(Natives::GetIndex("mirror")) ||
+      !CompileDebuggerScript(Natives::GetIndex("debug"));
+  Debugger::set_compiling_natives(false);
+
+  // Make sure we mark the debugger as not loading before we might
+  // return.
+  Debugger::set_loading_debugger(false);
+
+  // Check for caught exceptions.
+  if (caught_exception) return false;
+
+  // Debugger loaded.
+  debug_context_ = Handle<Context>::cast(GlobalHandles::Create(*context));
+
+  return true;
+}
+
+
+void Debug::Unload() {
+  // Return debugger is not loaded.
+  if (!IsLoaded()) {
+    return;
+  }
+
+  // Clear the script cache.
+  DestroyScriptCache();
+
+  // Clear debugger context global handle.
+  GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_context_.location()));
+  debug_context_ = Handle<Context>();
+}
+
+
+// Set the flag indicating that preemption happened during debugging.
+void Debug::PreemptionWhileInDebugger() {
+  ASSERT(InDebugger());
+  Debug::set_interrupts_pending(PREEMPT);
+}
+
+
+void Debug::Iterate(ObjectVisitor* v) {
+  v->VisitPointer(bit_cast<Object**, Code**>(&(debug_break_return_)));
+}
+
+
+Object* Debug::Break(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 0);
+
+  // Get the top-most JavaScript frame.
+  JavaScriptFrameIterator it;
+  JavaScriptFrame* frame = it.frame();
+
+  // Just continue if breaks are disabled or debugger cannot be loaded.
+  if (disable_break() || !Load()) {
+    SetAfterBreakTarget(frame);
+    return Heap::undefined_value();
+  }
+
+  // Enter the debugger.
+  EnterDebugger debugger;
+  if (debugger.FailedToEnter()) {
+    return Heap::undefined_value();
+  }
+
+  // Postpone interrupt during breakpoint processing.
+  PostponeInterruptsScope postpone;
+
+  // Get the debug info (create it if it does not exist).
+  Handle<SharedFunctionInfo> shared =
+      Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+  Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+
+  // Find the break point where execution has stopped.
+  BreakLocationIterator break_location_iterator(debug_info,
+                                                ALL_BREAK_LOCATIONS);
+  break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+
+  // Check whether step next reached a new statement.
+  if (!StepNextContinue(&break_location_iterator, frame)) {
+    // Decrease steps left if performing multiple steps.
+    if (thread_local_.step_count_ > 0) {
+      thread_local_.step_count_--;
+    }
+  }
+
+  // If there is one or more real break points check whether any of these are
+  // triggered.
+  Handle<Object> break_points_hit(Heap::undefined_value());
+  if (break_location_iterator.HasBreakPoint()) {
+    Handle<Object> break_point_objects =
+        Handle<Object>(break_location_iterator.BreakPointObjects());
+    break_points_hit = CheckBreakPoints(break_point_objects);
+  }
+
+  // If step out is active skip everything until the frame where we need to step
+  // out to is reached, unless real breakpoint is hit.
+  if (Debug::StepOutActive() && frame->fp() != Debug::step_out_fp() &&
+      break_points_hit->IsUndefined() ) {
+      // Step count should always be 0 for StepOut.
+      ASSERT(thread_local_.step_count_ == 0);
+  } else if (!break_points_hit->IsUndefined() ||
+             (thread_local_.last_step_action_ != StepNone &&
+              thread_local_.step_count_ == 0)) {
+    // Notify debugger if a real break point is triggered or if performing
+    // single stepping with no more steps to perform. Otherwise do another step.
+
+    // Clear all current stepping setup.
+    ClearStepping();
+
+    // Notify the debug event listeners.
+    Debugger::OnDebugBreak(break_points_hit, false);
+  } else if (thread_local_.last_step_action_ != StepNone) {
+    // Hold on to last step action as it is cleared by the call to
+    // ClearStepping.
+    StepAction step_action = thread_local_.last_step_action_;
+    int step_count = thread_local_.step_count_;
+
+    // Clear all current stepping setup.
+    ClearStepping();
+
+    // Set up for the remaining steps.
+    PrepareStep(step_action, step_count);
+  }
+
+  // Install jump to the call address which was overwritten.
+  SetAfterBreakTarget(frame);
+
+  return Heap::undefined_value();
+}
+
+
+// Check the break point objects for whether one or more are actually
+// triggered. This function returns a JSArray with the break point objects
+// which is triggered.
+Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
+  int break_points_hit_count = 0;
+  Handle<JSArray> break_points_hit = Factory::NewJSArray(1);
+
+  // If there are multiple break points they are in a FixedArray.
+  ASSERT(!break_point_objects->IsUndefined());
+  if (break_point_objects->IsFixedArray()) {
+    Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
+    for (int i = 0; i < array->length(); i++) {
+      Handle<Object> o(array->get(i));
+      if (CheckBreakPoint(o)) {
+        break_points_hit->SetElement(break_points_hit_count++, *o);
+      }
+    }
+  } else {
+    if (CheckBreakPoint(break_point_objects)) {
+      break_points_hit->SetElement(break_points_hit_count++,
+                                   *break_point_objects);
+    }
+  }
+
+  // Return undefined if no break points where triggered.
+  if (break_points_hit_count == 0) {
+    return Factory::undefined_value();
+  }
+  return break_points_hit;
+}
+
+
+// Check whether a single break point object is triggered.
+bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
+  HandleScope scope;
+
+  // Ignore check if break point object is not a JSObject.
+  if (!break_point_object->IsJSObject()) return true;
+
+  // Get the function CheckBreakPoint (defined in debug.js).
+  Handle<JSFunction> check_break_point =
+    Handle<JSFunction>(JSFunction::cast(
+      debug_context()->global()->GetProperty(
+          *Factory::LookupAsciiSymbol("IsBreakPointTriggered"))));
+
+  // Get the break id as an object.
+  Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id());
+
+  // Call HandleBreakPointx.
+  bool caught_exception = false;
+  const int argc = 2;
+  Object** argv[argc] = {
+    break_id.location(),
+    reinterpret_cast<Object**>(break_point_object.location())
+  };
+  Handle<Object> result = Execution::TryCall(check_break_point,
+                                             Top::builtins(), argc, argv,
+                                             &caught_exception);
+
+  // If exception or non boolean result handle as not triggered
+  if (caught_exception || !result->IsBoolean()) {
+    return false;
+  }
+
+  // Return whether the break point is triggered.
+  return *result == Heap::true_value();
+}
+
+
+// Check whether the function has debug information.
+bool Debug::HasDebugInfo(Handle<SharedFunctionInfo> shared) {
+  return !shared->debug_info()->IsUndefined();
+}
+
+
+// Return the debug info for this function. EnsureDebugInfo must be called
+// prior to ensure the debug info has been generated for shared.
+Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
+  ASSERT(HasDebugInfo(shared));
+  return Handle<DebugInfo>(DebugInfo::cast(shared->debug_info()));
+}
+
+
+void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
+                          int source_position,
+                          Handle<Object> break_point_object) {
+  HandleScope scope;
+
+  if (!EnsureDebugInfo(shared)) {
+    // Return if retrieving debug info failed.
+    return;
+  }
+
+  Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+  // Source positions starts with zero.
+  ASSERT(source_position >= 0);
+
+  // Find the break point and change it.
+  BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
+  it.FindBreakLocationFromPosition(source_position);
+  it.SetBreakPoint(break_point_object);
+
+  // At least one active break point now.
+  ASSERT(debug_info->GetBreakPointCount() > 0);
+}
+
+
+void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
+  HandleScope scope;
+
+  DebugInfoListNode* node = debug_info_list_;
+  while (node != NULL) {
+    Object* result = DebugInfo::FindBreakPointInfo(node->debug_info(),
+                                                   break_point_object);
+    if (!result->IsUndefined()) {
+      // Get information in the break point.
+      BreakPointInfo* break_point_info = BreakPointInfo::cast(result);
+      Handle<DebugInfo> debug_info = node->debug_info();
+      Handle<SharedFunctionInfo> shared(debug_info->shared());
+      int source_position =  break_point_info->statement_position()->value();
+
+      // Source positions starts with zero.
+      ASSERT(source_position >= 0);
+
+      // Find the break point and clear it.
+      BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
+      it.FindBreakLocationFromPosition(source_position);
+      it.ClearBreakPoint(break_point_object);
+
+      // If there are no more break points left remove the debug info for this
+      // function.
+      if (debug_info->GetBreakPointCount() == 0) {
+        RemoveDebugInfo(debug_info);
+      }
+
+      return;
+    }
+    node = node->next();
+  }
+}
+
+
+void Debug::ClearAllBreakPoints() {
+  DebugInfoListNode* node = debug_info_list_;
+  while (node != NULL) {
+    // Remove all debug break code.
+    BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
+    it.ClearAllDebugBreak();
+    node = node->next();
+  }
+
+  // Remove all debug info.
+  while (debug_info_list_ != NULL) {
+    RemoveDebugInfo(debug_info_list_->debug_info());
+  }
+}
+
+
+void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
+  // Make sure the function has setup the debug info.
+  if (!EnsureDebugInfo(shared)) {
+    // Return if we failed to retrieve the debug info.
+    return;
+  }
+
+  // Flood the function with break points.
+  BreakLocationIterator it(GetDebugInfo(shared), ALL_BREAK_LOCATIONS);
+  while (!it.Done()) {
+    it.SetOneShot();
+    it.Next();
+  }
+}
+
+
+void Debug::FloodHandlerWithOneShot() {
+  // Iterate through the JavaScript stack looking for handlers.
+  StackFrame::Id id = break_frame_id();
+  if (id == StackFrame::NO_ID) {
+    // If there is no JavaScript stack don't do anything.
+    return;
+  }
+  for (JavaScriptFrameIterator it(id); !it.done(); it.Advance()) {
+    JavaScriptFrame* frame = it.frame();
+    if (frame->HasHandler()) {
+      Handle<SharedFunctionInfo> shared =
+          Handle<SharedFunctionInfo>(
+              JSFunction::cast(frame->function())->shared());
+      // Flood the function with the catch block with break points
+      FloodWithOneShot(shared);
+      return;
+    }
+  }
+}
+
+
+void Debug::ChangeBreakOnException(ExceptionBreakType type, bool enable) {
+  if (type == BreakUncaughtException) {
+    break_on_uncaught_exception_ = enable;
+  } else {
+    break_on_exception_ = enable;
+  }
+}
+
+
+void Debug::PrepareStep(StepAction step_action, int step_count) {
+  HandleScope scope;
+  ASSERT(Debug::InDebugger());
+
+  // Remember this step action and count.
+  thread_local_.last_step_action_ = step_action;
+  if (step_action == StepOut) {
+    // For step out target frame will be found on the stack so there is no need
+    // to set step counter for it. It's expected to always be 0 for StepOut.
+    thread_local_.step_count_ = 0;
+  } else {
+    thread_local_.step_count_ = step_count;
+  }
+
+  // Get the frame where the execution has stopped and skip the debug frame if
+  // any. The debug frame will only be present if execution was stopped due to
+  // hitting a break point. In other situations (e.g. unhandled exception) the
+  // debug frame is not present.
+  StackFrame::Id id = break_frame_id();
+  if (id == StackFrame::NO_ID) {
+    // If there is no JavaScript stack don't do anything.
+    return;
+  }
+  JavaScriptFrameIterator frames_it(id);
+  JavaScriptFrame* frame = frames_it.frame();
+
+  // First of all ensure there is one-shot break points in the top handler
+  // if any.
+  FloodHandlerWithOneShot();
+
+  // If the function on the top frame is unresolved perform step out. This will
+  // be the case when calling unknown functions and having the debugger stopped
+  // in an unhandled exception.
+  if (!frame->function()->IsJSFunction()) {
+    // Step out: Find the calling JavaScript frame and flood it with
+    // breakpoints.
+    frames_it.Advance();
+    // Fill the function to return to with one-shot break points.
+    JSFunction* function = JSFunction::cast(frames_it.frame()->function());
+    FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+    return;
+  }
+
+  // Get the debug info (create it if it does not exist).
+  Handle<SharedFunctionInfo> shared =
+      Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+  if (!EnsureDebugInfo(shared)) {
+    // Return if ensuring debug info failed.
+    return;
+  }
+  Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+
+  // Find the break location where execution has stopped.
+  BreakLocationIterator it(debug_info, ALL_BREAK_LOCATIONS);
+  it.FindBreakLocationFromAddress(frame->pc());
+
+  // Compute whether or not the target is a call target.
+  bool is_call_target = false;
+  bool is_load_or_store = false;
+  bool is_inline_cache_stub = false;
+  Handle<Code> call_function_stub;
+  if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
+    Address target = it.rinfo()->target_address();
+    Code* code = Code::GetCodeFromTargetAddress(target);
+    if (code->is_call_stub()) {
+      is_call_target = true;
+    }
+    if (code->is_inline_cache_stub()) {
+      is_inline_cache_stub = true;
+      is_load_or_store = !is_call_target;
+    }
+
+    // Check if target code is CallFunction stub.
+    Code* maybe_call_function_stub = code;
+    // If there is a breakpoint at this line look at the original code to
+    // check if it is a CallFunction stub.
+    if (it.IsDebugBreak()) {
+      Address original_target = it.original_rinfo()->target_address();
+      maybe_call_function_stub =
+          Code::GetCodeFromTargetAddress(original_target);
+    }
+    if (maybe_call_function_stub->kind() == Code::STUB &&
+        maybe_call_function_stub->major_key() == CodeStub::CallFunction) {
+      // Save reference to the code as we may need it to find out arguments
+      // count for 'step in' later.
+      call_function_stub = Handle<Code>(maybe_call_function_stub);
+    }
+  }
+
+  // If this is the last break code target step out is the only possibility.
+  if (it.IsExit() || step_action == StepOut) {
+    if (step_action == StepOut) {
+      // Skip step_count frames starting with the current one.
+      while (step_count-- > 0 && !frames_it.done()) {
+        frames_it.Advance();
+      }
+    } else {
+      ASSERT(it.IsExit());
+      frames_it.Advance();
+    }
+    // Skip builtin functions on the stack.
+    while (!frames_it.done() &&
+           JSFunction::cast(frames_it.frame()->function())->IsBuiltin()) {
+      frames_it.Advance();
+    }
+    // Step out: If there is a JavaScript caller frame, we need to
+    // flood it with breakpoints.
+    if (!frames_it.done()) {
+      // Fill the function to return to with one-shot break points.
+      JSFunction* function = JSFunction::cast(frames_it.frame()->function());
+      FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+      // Set target frame pointer.
+      ActivateStepOut(frames_it.frame());
+    }
+  } else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode()) ||
+               !call_function_stub.is_null())
+             || step_action == StepNext || step_action == StepMin) {
+    // Step next or step min.
+
+    // Fill the current function with one-shot break points.
+    FloodWithOneShot(shared);
+
+    // Remember source position and frame to handle step next.
+    thread_local_.last_statement_position_ =
+        debug_info->code()->SourceStatementPosition(frame->pc());
+    thread_local_.last_fp_ = frame->fp();
+  } else {
+    // If it's CallFunction stub ensure target function is compiled and flood
+    // it with one shot breakpoints.
+    if (!call_function_stub.is_null()) {
+      // Find out number of arguments from the stub minor key.
+      // Reverse lookup required as the minor key cannot be retrieved
+      // from the code object.
+      Handle<Object> obj(
+          Heap::code_stubs()->SlowReverseLookup(*call_function_stub));
+      ASSERT(*obj != Heap::undefined_value());
+      ASSERT(obj->IsSmi());
+      // Get the STUB key and extract major and minor key.
+      uint32_t key = Smi::cast(*obj)->value();
+      // Argc in the stub is the number of arguments passed - not the
+      // expected arguments of the called function.
+      int call_function_arg_count = CodeStub::MinorKeyFromKey(key);
+      ASSERT(call_function_stub->major_key() ==
+             CodeStub::MajorKeyFromKey(key));
+
+      // Find target function on the expression stack.
+      // Expression stack lools like this (top to bottom):
+      // argN
+      // ...
+      // arg0
+      // Receiver
+      // Function to call
+      int expressions_count = frame->ComputeExpressionsCount();
+      ASSERT(expressions_count - 2 - call_function_arg_count >= 0);
+      Object* fun = frame->GetExpression(
+          expressions_count - 2 - call_function_arg_count);
+      if (fun->IsJSFunction()) {
+        Handle<JSFunction> js_function(JSFunction::cast(fun));
+        // Don't step into builtins.
+        if (!js_function->IsBuiltin()) {
+          // It will also compile target function if it's not compiled yet.
+          FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared()));
+        }
+      }
+    }
+
+    // Fill the current function with one-shot break points even for step in on
+    // a call target as the function called might be a native function for
+    // which step in will not stop. It also prepares for stepping in
+    // getters/setters.
+    FloodWithOneShot(shared);
+
+    if (is_load_or_store) {
+      // Remember source position and frame to handle step in getter/setter. If
+      // there is a custom getter/setter it will be handled in
+      // Object::Get/SetPropertyWithCallback, otherwise the step action will be
+      // propagated on the next Debug::Break.
+      thread_local_.last_statement_position_ =
+          debug_info->code()->SourceStatementPosition(frame->pc());
+      thread_local_.last_fp_ = frame->fp();
+    }
+
+    // Step in or Step in min
+    it.PrepareStepIn();
+    ActivateStepIn(frame);
+  }
+}
+
+
+// Check whether the current debug break should be reported to the debugger. It
+// is used to have step next and step in only report break back to the debugger
+// if on a different frame or in a different statement. In some situations
+// there will be several break points in the same statement when the code is
+// flooded with one-shot break points. This function helps to perform several
+// steps before reporting break back to the debugger.
+bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
+                             JavaScriptFrame* frame) {
+  // If the step last action was step next or step in make sure that a new
+  // statement is hit.
+  if (thread_local_.last_step_action_ == StepNext ||
+      thread_local_.last_step_action_ == StepIn) {
+    // Never continue if returning from function.
+    if (break_location_iterator->IsExit()) return false;
+
+    // Continue if we are still on the same frame and in the same statement.
+    int current_statement_position =
+        break_location_iterator->code()->SourceStatementPosition(frame->pc());
+    return thread_local_.last_fp_ == frame->fp() &&
+        thread_local_.last_statement_position_ == current_statement_position;
+  }
+
+  // No step next action - don't continue.
+  return false;
+}
+
+
+// Check whether the code object at the specified address is a debug break code
+// object.
+bool Debug::IsDebugBreak(Address addr) {
+  Code* code = Code::GetCodeFromTargetAddress(addr);
+  return code->ic_state() == DEBUG_BREAK;
+}
+
+
+// Check whether a code stub with the specified major key is a possible break
+// point location when looking for source break locations.
+bool Debug::IsSourceBreakStub(Code* code) {
+  CodeStub::Major major_key = code->major_key();
+  return major_key == CodeStub::CallFunction;
+}
+
+
+// Check whether a code stub with the specified major key is a possible break
+// location.
+bool Debug::IsBreakStub(Code* code) {
+  CodeStub::Major major_key = code->major_key();
+  return major_key == CodeStub::CallFunction ||
+         major_key == CodeStub::StackCheck;
+}
+
+
+// Find the builtin to use for invoking the debug break
+Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
+  // Find the builtin debug break function matching the calling convention
+  // used by the call site.
+  if (code->is_inline_cache_stub()) {
+    if (code->is_call_stub()) {
+      return ComputeCallDebugBreak(code->arguments_count());
+    }
+    if (code->is_load_stub()) {
+      return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
+    }
+    if (code->is_store_stub()) {
+      return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
+    }
+    if (code->is_keyed_load_stub()) {
+      Handle<Code> result =
+          Handle<Code>(Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
+      return result;
+    }
+    if (code->is_keyed_store_stub()) {
+      Handle<Code> result =
+          Handle<Code>(Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
+      return result;
+    }
+  }
+  if (RelocInfo::IsConstructCall(mode)) {
+    Handle<Code> result =
+        Handle<Code>(Builtins::builtin(Builtins::ConstructCall_DebugBreak));
+    return result;
+  }
+  if (code->kind() == Code::STUB) {
+    ASSERT(code->major_key() == CodeStub::CallFunction ||
+           code->major_key() == CodeStub::StackCheck);
+    Handle<Code> result =
+        Handle<Code>(Builtins::builtin(Builtins::StubNoRegisters_DebugBreak));
+    return result;
+  }
+
+  UNREACHABLE();
+  return Handle<Code>::null();
+}
+
+
+// Simple function for returning the source positions for active break points.
+Handle<Object> Debug::GetSourceBreakLocations(
+    Handle<SharedFunctionInfo> shared) {
+  if (!HasDebugInfo(shared)) return Handle<Object>(Heap::undefined_value());
+  Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+  if (debug_info->GetBreakPointCount() == 0) {
+    return Handle<Object>(Heap::undefined_value());
+  }
+  Handle<FixedArray> locations =
+      Factory::NewFixedArray(debug_info->GetBreakPointCount());
+  int count = 0;
+  for (int i = 0; i < debug_info->break_points()->length(); i++) {
+    if (!debug_info->break_points()->get(i)->IsUndefined()) {
+      BreakPointInfo* break_point_info =
+          BreakPointInfo::cast(debug_info->break_points()->get(i));
+      if (break_point_info->GetBreakPointCount() > 0) {
+        locations->set(count++, break_point_info->statement_position());
+      }
+    }
+  }
+  return locations;
+}
+
+
+void Debug::NewBreak(StackFrame::Id break_frame_id) {
+  thread_local_.break_frame_id_ = break_frame_id;
+  thread_local_.break_id_ = ++thread_local_.break_count_;
+}
+
+
+void Debug::SetBreak(StackFrame::Id break_frame_id, int break_id) {
+  thread_local_.break_frame_id_ = break_frame_id;
+  thread_local_.break_id_ = break_id;
+}
+
+
+// Handle stepping into a function.
+void Debug::HandleStepIn(Handle<JSFunction> function,
+                         Handle<Object> holder,
+                         Address fp,
+                         bool is_constructor) {
+  // If the frame pointer is not supplied by the caller find it.
+  if (fp == 0) {
+    StackFrameIterator it;
+    it.Advance();
+    // For constructor functions skip another frame.
+    if (is_constructor) {
+      ASSERT(it.frame()->is_construct());
+      it.Advance();
+    }
+    fp = it.frame()->fp();
+  }
+
+  // Flood the function with one-shot break points if it is called from where
+  // step into was requested.
+  if (fp == Debug::step_in_fp()) {
+    // Don't allow step into functions in the native context.
+    if (!function->IsBuiltin()) {
+      if (function->shared()->code() ==
+          Builtins::builtin(Builtins::FunctionApply) ||
+          function->shared()->code() ==
+          Builtins::builtin(Builtins::FunctionCall)) {
+        // Handle function.apply and function.call separately to flood the
+        // function to be called and not the code for Builtins::FunctionApply or
+        // Builtins::FunctionCall. The receiver of call/apply is the target
+        // function.
+        if (!holder.is_null() && holder->IsJSFunction() &&
+            !JSFunction::cast(*holder)->IsBuiltin()) {
+          Handle<SharedFunctionInfo> shared_info(
+              JSFunction::cast(*holder)->shared());
+          Debug::FloodWithOneShot(shared_info);
+        }
+      } else {
+        Debug::FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+      }
+    }
+  }
+}
+
+
+void Debug::ClearStepping() {
+  // Clear the various stepping setup.
+  ClearOneShot();
+  ClearStepIn();
+  ClearStepOut();
+  ClearStepNext();
+
+  // Clear multiple step counter.
+  thread_local_.step_count_ = 0;
+}
+
+// Clears all the one-shot break points that are currently set. Normally this
+// function is called each time a break point is hit as one shot break points
+// are used to support stepping.
+void Debug::ClearOneShot() {
+  // The current implementation just runs through all the breakpoints. When the
+  // last break point for a function is removed that function is automatically
+  // removed from the list.
+
+  DebugInfoListNode* node = debug_info_list_;
+  while (node != NULL) {
+    BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
+    while (!it.Done()) {
+      it.ClearOneShot();
+      it.Next();
+    }
+    node = node->next();
+  }
+}
+
+
+void Debug::ActivateStepIn(StackFrame* frame) {
+  ASSERT(!StepOutActive());
+  thread_local_.step_into_fp_ = frame->fp();
+}
+
+
+void Debug::ClearStepIn() {
+  thread_local_.step_into_fp_ = 0;
+}
+
+
+void Debug::ActivateStepOut(StackFrame* frame) {
+  ASSERT(!StepInActive());
+  thread_local_.step_out_fp_ = frame->fp();
+}
+
+
+void Debug::ClearStepOut() {
+  thread_local_.step_out_fp_ = 0;
+}
+
+
+void Debug::ClearStepNext() {
+  thread_local_.last_step_action_ = StepNone;
+  thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
+  thread_local_.last_fp_ = 0;
+}
+
+
+bool Debug::EnsureCompiled(Handle<SharedFunctionInfo> shared) {
+  if (shared->is_compiled()) return true;
+  return CompileLazyShared(shared, CLEAR_EXCEPTION, 0);
+}
+
+
+// Ensures the debug information is present for shared.
+bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
+  // Return if we already have the debug info for shared.
+  if (HasDebugInfo(shared)) return true;
+
+  // Ensure shared in compiled. Return false if this failed.
+  if (!EnsureCompiled(shared)) return false;
+
+  // Create the debug info object.
+  Handle<DebugInfo> debug_info = Factory::NewDebugInfo(shared);
+
+  // Add debug info to the list.
+  DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
+  node->set_next(debug_info_list_);
+  debug_info_list_ = node;
+
+  // Now there is at least one break point.
+  has_break_points_ = true;
+
+  return true;
+}
+
+
+void Debug::RemoveDebugInfo(Handle<DebugInfo> debug_info) {
+  ASSERT(debug_info_list_ != NULL);
+  // Run through the debug info objects to find this one and remove it.
+  DebugInfoListNode* prev = NULL;
+  DebugInfoListNode* current = debug_info_list_;
+  while (current != NULL) {
+    if (*current->debug_info() == *debug_info) {
+      // Unlink from list. If prev is NULL we are looking at the first element.
+      if (prev == NULL) {
+        debug_info_list_ = current->next();
+      } else {
+        prev->set_next(current->next());
+      }
+      current->debug_info()->shared()->set_debug_info(Heap::undefined_value());
+      delete current;
+
+      // If there are no more debug info objects there are not more break
+      // points.
+      has_break_points_ = debug_info_list_ != NULL;
+
+      return;
+    }
+    // Move to next in list.
+    prev = current;
+    current = current->next();
+  }
+  UNREACHABLE();
+}
+
+
+void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
+  HandleScope scope;
+
+  // Get the executing function in which the debug break occurred.
+  Handle<SharedFunctionInfo> shared =
+      Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+  if (!EnsureDebugInfo(shared)) {
+    // Return if we failed to retrieve the debug info.
+    return;
+  }
+  Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+  Handle<Code> code(debug_info->code());
+  Handle<Code> original_code(debug_info->original_code());
+#ifdef DEBUG
+  // Get the code which is actually executing.
+  Handle<Code> frame_code(frame->code());
+  ASSERT(frame_code.is_identical_to(code));
+#endif
+
+  // Find the call address in the running code. This address holds the call to
+  // either a DebugBreakXXX or to the debug break return entry code if the
+  // break point is still active after processing the break point.
+  Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
+
+  // Check if the location is at JS exit.
+  bool at_js_return = false;
+  bool break_at_js_return_active = false;
+  RelocIterator it(debug_info->code());
+  while (!it.done()) {
+    if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
+      at_js_return = (it.rinfo()->pc() ==
+          addr - Assembler::kPatchReturnSequenceAddressOffset);
+      break_at_js_return_active = it.rinfo()->IsCallInstruction();
+    }
+    it.next();
+  }
+
+  // Handle the jump to continue execution after break point depending on the
+  // break location.
+  if (at_js_return) {
+    // If the break point as return is still active jump to the corresponding
+    // place in the original code. If not the break point was removed during
+    // break point processing.
+    if (break_at_js_return_active) {
+      addr +=  original_code->instruction_start() - code->instruction_start();
+    }
+
+    // Move back to where the call instruction sequence started.
+    thread_local_.after_break_target_ =
+        addr - Assembler::kPatchReturnSequenceAddressOffset;
+  } else {
+    // Check if there still is a debug break call at the target address. If the
+    // break point has been removed it will have disappeared. If it have
+    // disappeared don't try to look in the original code as the running code
+    // will have the right address. This takes care of the case where the last
+    // break point is removed from the function and therefore no "original code"
+    // is available. If the debug break call is still there find the address in
+    // the original code.
+    if (IsDebugBreak(Assembler::target_address_at(addr))) {
+      // If the break point is still there find the call address which was
+      // overwritten in the original code by the call to DebugBreakXXX.
+
+      // Find the corresponding address in the original code.
+      addr += original_code->instruction_start() - code->instruction_start();
+    }
+
+    // Install jump to the call address in the original code. This will be the
+    // call which was overwritten by the call to DebugBreakXXX.
+    thread_local_.after_break_target_ = Assembler::target_address_at(addr);
+  }
+}
+
+
+bool Debug::IsDebugGlobal(GlobalObject* global) {
+  return IsLoaded() && global == Debug::debug_context()->global();
+}
+
+
+void Debug::ClearMirrorCache() {
+  HandleScope scope;
+  ASSERT(Top::context() == *Debug::debug_context());
+
+  // Clear the mirror cache.
+  Handle<String> function_name =
+      Factory::LookupSymbol(CStrVector("ClearMirrorCache"));
+  Handle<Object> fun(Top::global()->GetProperty(*function_name));
+  ASSERT(fun->IsJSFunction());
+  bool caught_exception;
+  Handle<Object> js_object = Execution::TryCall(
+      Handle<JSFunction>::cast(fun),
+      Handle<JSObject>(Debug::debug_context()->global()),
+      0, NULL, &caught_exception);
+}
+
+
+// If an object given is an external string, check that the underlying
+// resource is accessible. For other kinds of objects, always return true.
+static bool IsExternalStringValid(Object* str) {
+  if (!str->IsString() || !StringShape(String::cast(str)).IsExternal()) {
+    return true;
+  }
+  if (String::cast(str)->IsAsciiRepresentation()) {
+    return ExternalAsciiString::cast(str)->resource() != NULL;
+  } else if (String::cast(str)->IsTwoByteRepresentation()) {
+    return ExternalTwoByteString::cast(str)->resource() != NULL;
+  } else {
+    return true;
+  }
+}
+
+
+void Debug::CreateScriptCache() {
+  HandleScope scope;
+
+  // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
+  // rid of all the cached script wrappers and the second gets rid of the
+  // scripts which is no longer referenced.
+  Heap::CollectAllGarbage(false);
+  Heap::CollectAllGarbage(false);
+
+  ASSERT(script_cache_ == NULL);
+  script_cache_ = new ScriptCache();
+
+  // Scan heap for Script objects.
+  int count = 0;
+  HeapIterator iterator;
+  while (iterator.has_next()) {
+    HeapObject* obj = iterator.next();
+    ASSERT(obj != NULL);
+    if (obj->IsScript() && IsExternalStringValid(Script::cast(obj)->source())) {
+      script_cache_->Add(Handle<Script>(Script::cast(obj)));
+      count++;
+    }
+  }
+}
+
+
+void Debug::DestroyScriptCache() {
+  // Get rid of the script cache if it was created.
+  if (script_cache_ != NULL) {
+    delete script_cache_;
+    script_cache_ = NULL;
+  }
+}
+
+
+void Debug::AddScriptToScriptCache(Handle<Script> script) {
+  if (script_cache_ != NULL) {
+    script_cache_->Add(script);
+  }
+}
+
+
+Handle<FixedArray> Debug::GetLoadedScripts() {
+  // Create and fill the script cache when the loaded scripts is requested for
+  // the first time.
+  if (script_cache_ == NULL) {
+    CreateScriptCache();
+  }
+
+  // If the script cache is not active just return an empty array.
+  ASSERT(script_cache_ != NULL);
+  if (script_cache_ == NULL) {
+    Factory::NewFixedArray(0);
+  }
+
+  // Perform GC to get unreferenced scripts evicted from the cache before
+  // returning the content.
+  Heap::CollectAllGarbage(false);
+
+  // Get the scripts from the cache.
+  return script_cache_->GetScripts();
+}
+
+
+void Debug::AfterGarbageCollection() {
+  // Generate events for collected scripts.
+  if (script_cache_ != NULL) {
+    script_cache_->ProcessCollectedScripts();
+  }
+}
+
+
+Mutex* Debugger::debugger_access_ = OS::CreateMutex();
+Handle<Object> Debugger::event_listener_ = Handle<Object>();
+Handle<Object> Debugger::event_listener_data_ = Handle<Object>();
+bool Debugger::compiling_natives_ = false;
+bool Debugger::is_loading_debugger_ = false;
+bool Debugger::never_unload_debugger_ = false;
+v8::Debug::MessageHandler2 Debugger::message_handler_ = NULL;
+bool Debugger::debugger_unload_pending_ = false;
+v8::Debug::HostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
+int Debugger::host_dispatch_micros_ = 100 * 1000;
+DebuggerAgent* Debugger::agent_ = NULL;
+LockingCommandMessageQueue Debugger::command_queue_(kQueueInitialSize);
+Semaphore* Debugger::command_received_ = OS::CreateSemaphore(0);
+
+
+Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
+                                      int argc, Object*** argv,
+                                      bool* caught_exception) {
+  ASSERT(Top::context() == *Debug::debug_context());
+
+  // Create the execution state object.
+  Handle<String> constructor_str = Factory::LookupSymbol(constructor_name);
+  Handle<Object> constructor(Top::global()->GetProperty(*constructor_str));
+  ASSERT(constructor->IsJSFunction());
+  if (!constructor->IsJSFunction()) {
+    *caught_exception = true;
+    return Factory::undefined_value();
+  }
+  Handle<Object> js_object = Execution::TryCall(
+      Handle<JSFunction>::cast(constructor),
+      Handle<JSObject>(Debug::debug_context()->global()), argc, argv,
+      caught_exception);
+  return js_object;
+}
+
+
+Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
+  // Create the execution state object.
+  Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id());
+  const int argc = 1;
+  Object** argv[argc] = { break_id.location() };
+  return MakeJSObject(CStrVector("MakeExecutionState"),
+                      argc, argv, caught_exception);
+}
+
+
+Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
+                                        Handle<Object> break_points_hit,
+                                        bool* caught_exception) {
+  // Create the new break event object.
+  const int argc = 2;
+  Object** argv[argc] = { exec_state.location(),
+                          break_points_hit.location() };
+  return MakeJSObject(CStrVector("MakeBreakEvent"),
+                      argc,
+                      argv,
+                      caught_exception);
+}
+
+
+Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state,
+                                            Handle<Object> exception,
+                                            bool uncaught,
+                                            bool* caught_exception) {
+  // Create the new exception event object.
+  const int argc = 3;
+  Object** argv[argc] = { exec_state.location(),
+                          exception.location(),
+                          uncaught ? Factory::true_value().location() :
+                                     Factory::false_value().location()};
+  return MakeJSObject(CStrVector("MakeExceptionEvent"),
+                      argc, argv, caught_exception);
+}
+
+
+Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
+                                              bool* caught_exception) {
+  // Create the new function event object.
+  const int argc = 1;
+  Object** argv[argc] = { function.location() };
+  return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
+                      argc, argv, caught_exception);
+}
+
+
+Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
+                                          bool before,
+                                          bool* caught_exception) {
+  // Create the compile event object.
+  Handle<Object> exec_state = MakeExecutionState(caught_exception);
+  Handle<Object> script_wrapper = GetScriptWrapper(script);
+  const int argc = 3;
+  Object** argv[argc] = { exec_state.location(),
+                          script_wrapper.location(),
+                          before ? Factory::true_value().location() :
+                                   Factory::false_value().location() };
+
+  return MakeJSObject(CStrVector("MakeCompileEvent"),
+                      argc,
+                      argv,
+                      caught_exception);
+}
+
+
+Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
+                                                  bool* caught_exception) {
+  // Create the script collected event object.
+  Handle<Object> exec_state = MakeExecutionState(caught_exception);
+  Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
+  const int argc = 2;
+  Object** argv[argc] = { exec_state.location(), id_object.location() };
+
+  return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
+                      argc,
+                      argv,
+                      caught_exception);
+}
+
+
+void Debugger::OnException(Handle<Object> exception, bool uncaught) {
+  HandleScope scope;
+
+  // Bail out based on state or if there is no listener for this event
+  if (Debug::InDebugger()) return;
+  if (!Debugger::EventActive(v8::Exception)) return;
+
+  // Bail out if exception breaks are not active
+  if (uncaught) {
+    // Uncaught exceptions are reported by either flags.
+    if (!(Debug::break_on_uncaught_exception() ||
+          Debug::break_on_exception())) return;
+  } else {
+    // Caught exceptions are reported is activated.
+    if (!Debug::break_on_exception()) return;
+  }
+
+  // Enter the debugger.
+  EnterDebugger debugger;
+  if (debugger.FailedToEnter()) return;
+
+  // Clear all current stepping setup.
+  Debug::ClearStepping();
+  // Create the event data object.
+  bool caught_exception = false;
+  Handle<Object> exec_state = MakeExecutionState(&caught_exception);
+  Handle<Object> event_data;
+  if (!caught_exception) {
+    event_data = MakeExceptionEvent(exec_state, exception, uncaught,
+                                    &caught_exception);
+  }
+  // Bail out and don't call debugger if exception.
+  if (caught_exception) {
+    return;
+  }
+
+  // Process debug event.
+  ProcessDebugEvent(v8::Exception, Handle<JSObject>::cast(event_data), false);
+  // Return to continue execution from where the exception was thrown.
+}
+
+
+void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
+                            bool auto_continue) {
+  HandleScope scope;
+
+  // Debugger has already been entered by caller.
+  ASSERT(Top::context() == *Debug::debug_context());
+
+  // Bail out if there is no listener for this event
+  if (!Debugger::EventActive(v8::Break)) return;
+
+  // Debugger must be entered in advance.
+  ASSERT(Top::context() == *Debug::debug_context());
+
+  // Create the event data object.
+  bool caught_exception = false;
+  Handle<Object> exec_state = MakeExecutionState(&caught_exception);
+  Handle<Object> event_data;
+  if (!caught_exception) {
+    event_data = MakeBreakEvent(exec_state, break_points_hit,
+                                &caught_exception);
+  }
+  // Bail out and don't call debugger if exception.
+  if (caught_exception) {
+    return;
+  }
+
+  // Process debug event.
+  ProcessDebugEvent(v8::Break,
+                    Handle<JSObject>::cast(event_data),
+                    auto_continue);
+}
+
+
+void Debugger::OnBeforeCompile(Handle<Script> script) {
+  HandleScope scope;
+
+  // Bail out based on state or if there is no listener for this event
+  if (Debug::InDebugger()) return;
+  if (compiling_natives()) return;
+  if (!EventActive(v8::BeforeCompile)) return;
+
+  // Enter the debugger.
+  EnterDebugger debugger;
+  if (debugger.FailedToEnter()) return;
+
+  // Create the event data object.
+  bool caught_exception = false;
+  Handle<Object> event_data = MakeCompileEvent(script, true, &caught_exception);
+  // Bail out and don't call debugger if exception.
+  if (caught_exception) {
+    return;
+  }
+
+  // Process debug event.
+  ProcessDebugEvent(v8::BeforeCompile,
+                    Handle<JSObject>::cast(event_data),
+                    true);
+}
+
+
+// Handle debugger actions when a new script is compiled.
+void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
+  HandleScope scope;
+
+  // Add the newly compiled script to the script cache.
+  Debug::AddScriptToScriptCache(script);
+
+  // No more to do if not debugging.
+  if (!IsDebuggerActive()) return;
+
+  // No compile events while compiling natives.
+  if (compiling_natives()) return;
+
+  // Store whether in debugger before entering debugger.
+  bool in_debugger = Debug::InDebugger();
+
+  // Enter the debugger.
+  EnterDebugger debugger;
+  if (debugger.FailedToEnter()) return;
+
+  // If debugging there might be script break points registered for this
+  // script. Make sure that these break points are set.
+
+  // Get the function UpdateScriptBreakPoints (defined in debug-delay.js).
+  Handle<Object> update_script_break_points =
+      Handle<Object>(Debug::debug_context()->global()->GetProperty(
+          *Factory::LookupAsciiSymbol("UpdateScriptBreakPoints")));
+  if (!update_script_break_points->IsJSFunction()) {
+    return;
+  }
+  ASSERT(update_script_break_points->IsJSFunction());
+
+  // Wrap the script object in a proper JS object before passing it
+  // to JavaScript.
+  Handle<JSValue> wrapper = GetScriptWrapper(script);
+
+  // Call UpdateScriptBreakPoints expect no exceptions.
+  bool caught_exception = false;
+  const int argc = 1;
+  Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
+  Handle<Object> result = Execution::TryCall(
+      Handle<JSFunction>::cast(update_script_break_points),
+      Top::builtins(), argc, argv,
+      &caught_exception);
+  if (caught_exception) {
+    return;
+  }
+  // Bail out based on state or if there is no listener for this event
+  if (in_debugger) return;
+  if (!Debugger::EventActive(v8::AfterCompile)) return;
+
+  // Create the compile state object.
+  Handle<Object> event_data = MakeCompileEvent(script,
+                                               false,
+                                               &caught_exception);
+  // Bail out and don't call debugger if exception.
+  if (caught_exception) {
+    return;
+  }
+  // Process debug event.
+  ProcessDebugEvent(v8::AfterCompile,
+                    Handle<JSObject>::cast(event_data),
+                    true);
+}
+
+
+void Debugger::OnNewFunction(Handle<JSFunction> function) {
+  return;
+  HandleScope scope;
+
+  // Bail out based on state or if there is no listener for this event
+  if (Debug::InDebugger()) return;
+  if (compiling_natives()) return;
+  if (!Debugger::EventActive(v8::NewFunction)) return;
+
+  // Enter the debugger.
+  EnterDebugger debugger;
+  if (debugger.FailedToEnter()) return;
+
+  // Create the event object.
+  bool caught_exception = false;
+  Handle<Object> event_data = MakeNewFunctionEvent(function, &caught_exception);
+  // Bail out and don't call debugger if exception.
+  if (caught_exception) {
+    return;
+  }
+  // Process debug event.
+  ProcessDebugEvent(v8::NewFunction, Handle<JSObject>::cast(event_data), true);
+}
+
+
+void Debugger::OnScriptCollected(int id) {
+  HandleScope scope;
+
+  // No more to do if not debugging.
+  if (!IsDebuggerActive()) return;
+  if (!Debugger::EventActive(v8::ScriptCollected)) return;
+
+  // Enter the debugger.
+  EnterDebugger debugger;
+  if (debugger.FailedToEnter()) return;
+
+  // Create the script collected state object.
+  bool caught_exception = false;
+  Handle<Object> event_data = MakeScriptCollectedEvent(id,
+                                                       &caught_exception);
+  // Bail out and don't call debugger if exception.
+  if (caught_exception) {
+    return;
+  }
+
+  // Process debug event.
+  ProcessDebugEvent(v8::ScriptCollected,
+                    Handle<JSObject>::cast(event_data),
+                    true);
+}
+
+
+void Debugger::ProcessDebugEvent(v8::DebugEvent event,
+                                 Handle<JSObject> event_data,
+                                 bool auto_continue) {
+  HandleScope scope;
+
+  // Clear any pending debug break if this is a real break.
+  if (!auto_continue) {
+    Debug::clear_interrupt_pending(DEBUGBREAK);
+  }
+
+  // Create the execution state.
+  bool caught_exception = false;
+  Handle<Object> exec_state = MakeExecutionState(&caught_exception);
+  if (caught_exception) {
+    return;
+  }
+  // First notify the message handler if any.
+  if (message_handler_ != NULL) {
+    NotifyMessageHandler(event,
+                         Handle<JSObject>::cast(exec_state),
+                         event_data,
+                         auto_continue);
+  }
+  // Notify registered debug event listener. This can be either a C or a
+  // JavaScript function.
+  if (!event_listener_.is_null()) {
+    if (event_listener_->IsProxy()) {
+      // C debug event listener.
+      Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
+      v8::Debug::EventCallback callback =
+            FUNCTION_CAST<v8::Debug::EventCallback>(callback_obj->proxy());
+      callback(event,
+               v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
+               v8::Utils::ToLocal(event_data),
+               v8::Utils::ToLocal(Handle<Object>::cast(event_listener_data_)));
+    } else {
+      // JavaScript debug event listener.
+      ASSERT(event_listener_->IsJSFunction());
+      Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
+
+      // Invoke the JavaScript debug event listener.
+      const int argc = 4;
+      Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
+                              exec_state.location(),
+                              Handle<Object>::cast(event_data).location(),
+                              event_listener_data_.location() };
+      Handle<Object> result = Execution::TryCall(fun, Top::global(),
+                                                 argc, argv, &caught_exception);
+      // Silently ignore exceptions from debug event listeners.
+    }
+  }
+}
+
+
+void Debugger::UnloadDebugger() {
+  // Make sure that there are no breakpoints left.
+  Debug::ClearAllBreakPoints();
+
+  // Unload the debugger if feasible.
+  if (!never_unload_debugger_) {
+    Debug::Unload();
+  }
+
+  // Clear the flag indicating that the debugger should be unloaded.
+  debugger_unload_pending_ = false;
+}
+
+
+void Debugger::NotifyMessageHandler(v8::DebugEvent event,
+                                    Handle<JSObject> exec_state,
+                                    Handle<JSObject> event_data,
+                                    bool auto_continue) {
+  HandleScope scope;
+
+  if (!Debug::Load()) return;
+
+  // Process the individual events.
+  bool sendEventMessage = false;
+  switch (event) {
+    case v8::Break:
+      sendEventMessage = !auto_continue;
+      break;
+    case v8::Exception:
+      sendEventMessage = true;
+      break;
+    case v8::BeforeCompile:
+      break;
+    case v8::AfterCompile:
+      sendEventMessage = true;
+      break;
+    case v8::ScriptCollected:
+      sendEventMessage = true;
+      break;
+    case v8::NewFunction:
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  // The debug command interrupt flag might have been set when the command was
+  // added. It should be enough to clear the flag only once while we are in the
+  // debugger.
+  ASSERT(Debug::InDebugger());
+  StackGuard::Continue(DEBUGCOMMAND);
+
+  // Notify the debugger that a debug event has occurred unless auto continue is
+  // active in which case no event is send.
+  if (sendEventMessage) {
+    MessageImpl message = MessageImpl::NewEvent(
+        event,
+        auto_continue,
+        Handle<JSObject>::cast(exec_state),
+        Handle<JSObject>::cast(event_data));
+    InvokeMessageHandler(message);
+  }
+
+  // If auto continue don't make the event cause a break, but process messages
+  // in the queue if any. For script collected events don't even process
+  // messages in the queue as the execution state might not be what is expected
+  // by the client.
+  if ((auto_continue && !HasCommands()) || event == v8::ScriptCollected) {
+    return;
+  }
+
+  // Get the DebugCommandProcessor.
+  v8::Local<v8::Object> api_exec_state =
+      v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state));
+  v8::Local<v8::String> fun_name =
+      v8::String::New("debugCommandProcessor");
+  v8::Local<v8::Function> fun =
+      v8::Function::Cast(*api_exec_state->Get(fun_name));
+  v8::TryCatch try_catch;
+  v8::Local<v8::Object> cmd_processor =
+      v8::Object::Cast(*fun->Call(api_exec_state, 0, NULL));
+  if (try_catch.HasCaught()) {
+    PrintLn(try_catch.Exception());
+    return;
+  }
+
+  // Process requests from the debugger.
+  while (true) {
+    // Wait for new command in the queue.
+    if (Debugger::host_dispatch_handler_) {
+      // In case there is a host dispatch - do periodic dispatches.
+      if (!command_received_->Wait(host_dispatch_micros_)) {
+        // Timout expired, do the dispatch.
+        Debugger::host_dispatch_handler_();
+        continue;
+      }
+    } else {
+      // In case there is no host dispatch - just wait.
+      command_received_->Wait();
+    }
+
+    // Get the command from the queue.
+    CommandMessage command = command_queue_.Get();
+    Logger::DebugTag("Got request from command queue, in interactive loop.");
+    if (!Debugger::IsDebuggerActive()) {
+      // Delete command text and user data.
+      command.Dispose();
+      return;
+    }
+
+    // Invoke JavaScript to process the debug request.
+    v8::Local<v8::String> fun_name;
+    v8::Local<v8::Function> fun;
+    v8::Local<v8::Value> request;
+    v8::TryCatch try_catch;
+    fun_name = v8::String::New("processDebugRequest");
+    fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
+
+    request = v8::String::New(command.text().start(),
+                              command.text().length());
+    static const int kArgc = 1;
+    v8::Handle<Value> argv[kArgc] = { request };
+    v8::Local<v8::Value> response_val = fun->Call(cmd_processor, kArgc, argv);
+
+    // Get the response.
+    v8::Local<v8::String> response;
+    bool running = false;
+    if (!try_catch.HasCaught()) {
+      // Get response string.
+      if (!response_val->IsUndefined()) {
+        response = v8::String::Cast(*response_val);
+      } else {
+        response = v8::String::New("");
+      }
+
+      // Log the JSON request/response.
+      if (FLAG_trace_debug_json) {
+        PrintLn(request);
+        PrintLn(response);
+      }
+
+      // Get the running state.
+      fun_name = v8::String::New("isRunning");
+      fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
+      static const int kArgc = 1;
+      v8::Handle<Value> argv[kArgc] = { response };
+      v8::Local<v8::Value> running_val = fun->Call(cmd_processor, kArgc, argv);
+      if (!try_catch.HasCaught()) {
+        running = running_val->ToBoolean()->Value();
+      }
+    } else {
+      // In case of failure the result text is the exception text.
+      response = try_catch.Exception()->ToString();
+    }
+
+    // Return the result.
+    MessageImpl message = MessageImpl::NewResponse(
+        event,
+        running,
+        Handle<JSObject>::cast(exec_state),
+        Handle<JSObject>::cast(event_data),
+        Handle<String>(Utils::OpenHandle(*response)),
+        command.client_data());
+    InvokeMessageHandler(message);
+    command.Dispose();
+
+    // Return from debug event processing if either the VM is put into the
+    // runnning state (through a continue command) or auto continue is active
+    // and there are no more commands queued.
+    if (running || (auto_continue && !HasCommands())) {
+      return;
+    }
+  }
+}
+
+
+void Debugger::SetEventListener(Handle<Object> callback,
+                                Handle<Object> data) {
+  HandleScope scope;
+
+  // Clear the global handles for the event listener and the event listener data
+  // object.
+  if (!event_listener_.is_null()) {
+    GlobalHandles::Destroy(
+        reinterpret_cast<Object**>(event_listener_.location()));
+    event_listener_ = Handle<Object>();
+  }
+  if (!event_listener_data_.is_null()) {
+    GlobalHandles::Destroy(
+        reinterpret_cast<Object**>(event_listener_data_.location()));
+    event_listener_data_ = Handle<Object>();
+  }
+
+  // If there is a new debug event listener register it together with its data
+  // object.
+  if (!callback->IsUndefined() && !callback->IsNull()) {
+    event_listener_ = Handle<Object>::cast(GlobalHandles::Create(*callback));
+    if (data.is_null()) {
+      data = Factory::undefined_value();
+    }
+    event_listener_data_ = Handle<Object>::cast(GlobalHandles::Create(*data));
+  }
+
+  ListenersChanged();
+}
+
+
+void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
+  ScopedLock with(debugger_access_);
+
+  message_handler_ = handler;
+  ListenersChanged();
+  if (handler == NULL) {
+    // Send an empty command to the debugger if in a break to make JavaScript
+    // run again if the debugger is closed.
+    if (Debug::InDebugger()) {
+      ProcessCommand(Vector<const uint16_t>::empty());
+    }
+  }
+}
+
+
+void Debugger::ListenersChanged() {
+  if (IsDebuggerActive()) {
+    // Disable the compilation cache when the debugger is active.
+    CompilationCache::Disable();
+  } else {
+    CompilationCache::Enable();
+
+    // Unload the debugger if event listener and message handler cleared.
+    if (Debug::InDebugger()) {
+      // If we are in debugger set the flag to unload the debugger when last
+      // EnterDebugger on the current stack is destroyed.
+      debugger_unload_pending_ = true;
+    } else {
+      UnloadDebugger();
+    }
+  }
+}
+
+
+void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
+                                      int period) {
+  host_dispatch_handler_ = handler;
+  host_dispatch_micros_ = period * 1000;
+}
+
+
+// Calls the registered debug message handler. This callback is part of the
+// public API.
+void Debugger::InvokeMessageHandler(MessageImpl message) {
+  ScopedLock with(debugger_access_);
+
+  if (message_handler_ != NULL) {
+    message_handler_(message);
+  }
+}
+
+
+// Puts a command coming from the public API on the queue.  Creates
+// a copy of the command string managed by the debugger.  Up to this
+// point, the command data was managed by the API client.  Called
+// by the API client thread.
+void Debugger::ProcessCommand(Vector<const uint16_t> command,
+                              v8::Debug::ClientData* client_data) {
+  // Need to cast away const.
+  CommandMessage message = CommandMessage::New(
+      Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
+                       command.length()),
+      client_data);
+  Logger::DebugTag("Put command on command_queue.");
+  command_queue_.Put(message);
+  command_received_->Signal();
+
+  // Set the debug command break flag to have the command processed.
+  if (!Debug::InDebugger()) {
+    StackGuard::DebugCommand();
+  }
+}
+
+
+bool Debugger::HasCommands() {
+  return !command_queue_.IsEmpty();
+}
+
+
+bool Debugger::IsDebuggerActive() {
+  ScopedLock with(debugger_access_);
+
+  return message_handler_ != NULL || !event_listener_.is_null();
+}
+
+
+Handle<Object> Debugger::Call(Handle<JSFunction> fun,
+                              Handle<Object> data,
+                              bool* pending_exception) {
+  // When calling functions in the debugger prevent it from beeing unloaded.
+  Debugger::never_unload_debugger_ = true;
+
+  // Enter the debugger.
+  EnterDebugger debugger;
+  if (debugger.FailedToEnter() || !debugger.HasJavaScriptFrames()) {
+    return Factory::undefined_value();
+  }
+
+  // Create the execution state.
+  bool caught_exception = false;
+  Handle<Object> exec_state = MakeExecutionState(&caught_exception);
+  if (caught_exception) {
+    return Factory::undefined_value();
+  }
+
+  static const int kArgc = 2;
+  Object** argv[kArgc] = { exec_state.location(), data.location() };
+  Handle<Object> result = Execution::Call(fun, Factory::undefined_value(),
+                                          kArgc, argv, pending_exception);
+  return result;
+}
+
+
+bool Debugger::StartAgent(const char* name, int port) {
+  if (Socket::Setup()) {
+    agent_ = new DebuggerAgent(name, port);
+    agent_->Start();
+    return true;
+  }
+
+  return false;
+}
+
+
+void Debugger::StopAgent() {
+  if (agent_ != NULL) {
+    agent_->Shutdown();
+    agent_->Join();
+    delete agent_;
+    agent_ = NULL;
+  }
+}
+
+
+void Debugger::WaitForAgent() {
+  if (agent_ != NULL)
+    agent_->WaitUntilListening();
+}
+
+MessageImpl MessageImpl::NewEvent(DebugEvent event,
+                                  bool running,
+                                  Handle<JSObject> exec_state,
+                                  Handle<JSObject> event_data) {
+  MessageImpl message(true, event, running,
+                      exec_state, event_data, Handle<String>(), NULL);
+  return message;
+}
+
+
+MessageImpl MessageImpl::NewResponse(DebugEvent event,
+                                     bool running,
+                                     Handle<JSObject> exec_state,
+                                     Handle<JSObject> event_data,
+                                     Handle<String> response_json,
+                                     v8::Debug::ClientData* client_data) {
+  MessageImpl message(false, event, running,
+                      exec_state, event_data, response_json, client_data);
+  return message;
+}
+
+
+MessageImpl::MessageImpl(bool is_event,
+                         DebugEvent event,
+                         bool running,
+                         Handle<JSObject> exec_state,
+                         Handle<JSObject> event_data,
+                         Handle<String> response_json,
+                         v8::Debug::ClientData* client_data)
+    : is_event_(is_event),
+      event_(event),
+      running_(running),
+      exec_state_(exec_state),
+      event_data_(event_data),
+      response_json_(response_json),
+      client_data_(client_data) {}
+
+
+bool MessageImpl::IsEvent() const {
+  return is_event_;
+}
+
+
+bool MessageImpl::IsResponse() const {
+  return !is_event_;
+}
+
+
+DebugEvent MessageImpl::GetEvent() const {
+  return event_;
+}
+
+
+bool MessageImpl::WillStartRunning() const {
+  return running_;
+}
+
+
+v8::Handle<v8::Object> MessageImpl::GetExecutionState() const {
+  return v8::Utils::ToLocal(exec_state_);
+}
+
+
+v8::Handle<v8::Object> MessageImpl::GetEventData() const {
+  return v8::Utils::ToLocal(event_data_);
+}
+
+
+v8::Handle<v8::String> MessageImpl::GetJSON() const {
+  v8::HandleScope scope;
+
+  if (IsEvent()) {
+    // Call toJSONProtocol on the debug event object.
+    Handle<Object> fun = GetProperty(event_data_, "toJSONProtocol");
+    if (!fun->IsJSFunction()) {
+      return v8::Handle<v8::String>();
+    }
+    bool caught_exception;
+    Handle<Object> json = Execution::TryCall(Handle<JSFunction>::cast(fun),
+                                             event_data_,
+                                             0, NULL, &caught_exception);
+    if (caught_exception || !json->IsString()) {
+      return v8::Handle<v8::String>();
+    }
+    return scope.Close(v8::Utils::ToLocal(Handle<String>::cast(json)));
+  } else {
+    return v8::Utils::ToLocal(response_json_);
+  }
+}
+
+
+v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
+  Handle<Context> context = Debug::debugger_entry()->GetContext();
+  // Top::context() may have been NULL when "script collected" event occured.
+  if (*context == NULL) {
+    ASSERT(event_ == v8::ScriptCollected);
+    return v8::Local<v8::Context>();
+  }
+  Handle<Context> global_context(context->global_context());
+  return v8::Utils::ToLocal(global_context);
+}
+
+
+v8::Debug::ClientData* MessageImpl::GetClientData() const {
+  return client_data_;
+}
+
+
+CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
+                                   client_data_(NULL) {
+}
+
+
+CommandMessage::CommandMessage(const Vector<uint16_t>& text,
+                               v8::Debug::ClientData* data)
+    : text_(text),
+      client_data_(data) {
+}
+
+
+CommandMessage::~CommandMessage() {
+}
+
+
+void CommandMessage::Dispose() {
+  text_.Dispose();
+  delete client_data_;
+  client_data_ = NULL;
+}
+
+
+CommandMessage CommandMessage::New(const Vector<uint16_t>& command,
+                                   v8::Debug::ClientData* data) {
+  return CommandMessage(command.Clone(), data);
+}
+
+
+CommandMessageQueue::CommandMessageQueue(int size) : start_(0), end_(0),
+                                                     size_(size) {
+  messages_ = NewArray<CommandMessage>(size);
+}
+
+
+CommandMessageQueue::~CommandMessageQueue() {
+  while (!IsEmpty()) {
+    CommandMessage m = Get();
+    m.Dispose();
+  }
+  DeleteArray(messages_);
+}
+
+
+CommandMessage CommandMessageQueue::Get() {
+  ASSERT(!IsEmpty());
+  int result = start_;
+  start_ = (start_ + 1) % size_;
+  return messages_[result];
+}
+
+
+void CommandMessageQueue::Put(const CommandMessage& message) {
+  if ((end_ + 1) % size_ == start_) {
+    Expand();
+  }
+  messages_[end_] = message;
+  end_ = (end_ + 1) % size_;
+}
+
+
+void CommandMessageQueue::Expand() {
+  CommandMessageQueue new_queue(size_ * 2);
+  while (!IsEmpty()) {
+    new_queue.Put(Get());
+  }
+  CommandMessage* array_to_free = messages_;
+  *this = new_queue;
+  new_queue.messages_ = array_to_free;
+  // Make the new_queue empty so that it doesn't call Dispose on any messages.
+  new_queue.start_ = new_queue.end_;
+  // Automatic destructor called on new_queue, freeing array_to_free.
+}
+
+
+LockingCommandMessageQueue::LockingCommandMessageQueue(int size)
+    : queue_(size) {
+  lock_ = OS::CreateMutex();
+}
+
+
+LockingCommandMessageQueue::~LockingCommandMessageQueue() {
+  delete lock_;
+}
+
+
+bool LockingCommandMessageQueue::IsEmpty() const {
+  ScopedLock sl(lock_);
+  return queue_.IsEmpty();
+}
+
+
+CommandMessage LockingCommandMessageQueue::Get() {
+  ScopedLock sl(lock_);
+  CommandMessage result = queue_.Get();
+  Logger::DebugEvent("Get", result.text());
+  return result;
+}
+
+
+void LockingCommandMessageQueue::Put(const CommandMessage& message) {
+  ScopedLock sl(lock_);
+  queue_.Put(message);
+  Logger::DebugEvent("Put", message.text());
+}
+
+
+void LockingCommandMessageQueue::Clear() {
+  ScopedLock sl(lock_);
+  queue_.Clear();
+}
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+} }  // namespace v8::internal
diff --git a/src/debug.h b/src/debug.h
new file mode 100644
index 0000000..29c2bc2
--- /dev/null
+++ b/src/debug.h
@@ -0,0 +1,869 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DEBUG_H_
+#define V8_DEBUG_H_
+
+#include "assembler.h"
+#include "code-stubs.h"
+#include "debug-agent.h"
+#include "execution.h"
+#include "factory.h"
+#include "hashmap.h"
+#include "platform.h"
+#include "string-stream.h"
+#include "v8threads.h"
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+#include "../include/v8-debug.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Forward declarations.
+class EnterDebugger;
+
+
+// Step actions. NOTE: These values are in macros.py as well.
+enum StepAction {
+  StepNone = -1,  // Stepping not prepared.
+  StepOut = 0,   // Step out of the current function.
+  StepNext = 1,  // Step to the next statement in the current function.
+  StepIn = 2,    // Step into new functions invoked or the next statement
+                 // in the current function.
+  StepMin = 3,   // Perform a minimum step in the current function.
+  StepInMin = 4  // Step into new functions invoked or perform a minimum step
+                 // in the current function.
+};
+
+
+// Type of exception break. NOTE: These values are in macros.py as well.
+enum ExceptionBreakType {
+  BreakException = 0,
+  BreakUncaughtException = 1
+};
+
+
+// Type of exception break. NOTE: These values are in macros.py as well.
+enum BreakLocatorType {
+  ALL_BREAK_LOCATIONS = 0,
+  SOURCE_BREAK_LOCATIONS = 1
+};
+
+
+// Class for iterating through the break points in a function and changing
+// them.
+class BreakLocationIterator {
+ public:
+  explicit BreakLocationIterator(Handle<DebugInfo> debug_info,
+                                 BreakLocatorType type);
+  virtual ~BreakLocationIterator();
+
+  void Next();
+  void Next(int count);
+  void FindBreakLocationFromAddress(Address pc);
+  void FindBreakLocationFromPosition(int position);
+  void Reset();
+  bool Done() const;
+  void SetBreakPoint(Handle<Object> break_point_object);
+  void ClearBreakPoint(Handle<Object> break_point_object);
+  void SetOneShot();
+  void ClearOneShot();
+  void PrepareStepIn();
+  bool IsExit() const;
+  bool HasBreakPoint();
+  bool IsDebugBreak();
+  Object* BreakPointObjects();
+  void ClearAllDebugBreak();
+
+
+  inline int code_position() { return pc() - debug_info_->code()->entry(); }
+  inline int break_point() { return break_point_; }
+  inline int position() { return position_; }
+  inline int statement_position() { return statement_position_; }
+  inline Address pc() { return reloc_iterator_->rinfo()->pc(); }
+  inline Code* code() { return debug_info_->code(); }
+  inline RelocInfo* rinfo() { return reloc_iterator_->rinfo(); }
+  inline RelocInfo::Mode rmode() const {
+    return reloc_iterator_->rinfo()->rmode();
+  }
+  inline RelocInfo* original_rinfo() {
+    return reloc_iterator_original_->rinfo();
+  }
+  inline RelocInfo::Mode original_rmode() const {
+    return reloc_iterator_original_->rinfo()->rmode();
+  }
+
+  bool IsDebuggerStatement();
+
+ protected:
+  bool RinfoDone() const;
+  void RinfoNext();
+
+  BreakLocatorType type_;
+  int break_point_;
+  int position_;
+  int statement_position_;
+  Handle<DebugInfo> debug_info_;
+  Handle<Code> debug_break_stub_;
+  RelocIterator* reloc_iterator_;
+  RelocIterator* reloc_iterator_original_;
+
+ private:
+  void SetDebugBreak();
+  void ClearDebugBreak();
+
+  void SetDebugBreakAtIC();
+  void ClearDebugBreakAtIC();
+
+  bool IsDebugBreakAtReturn();
+  void SetDebugBreakAtReturn();
+  void ClearDebugBreakAtReturn();
+
+  DISALLOW_COPY_AND_ASSIGN(BreakLocationIterator);
+};
+
+
+// Cache of all script objects in the heap. When a script is added a weak handle
+// to it is created and that weak handle is stored in the cache. The weak handle
+// callback takes care of removing the script from the cache. The key used in
+// the cache is the script id.
+class ScriptCache : private HashMap {
+ public:
+  ScriptCache() : HashMap(ScriptMatch), collected_scripts_(10) {}
+  virtual ~ScriptCache() { Clear(); }
+
+  // Add script to the cache.
+  void Add(Handle<Script> script);
+
+  // Return the scripts in the cache.
+  Handle<FixedArray> GetScripts();
+
+  // Generate debugger events for collected scripts.
+  void ProcessCollectedScripts();
+
+ private:
+  // Calculate the hash value from the key (script id).
+  static uint32_t Hash(int key) { return ComputeIntegerHash(key); }
+
+  // Scripts match if their keys (script id) match.
+  static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; }
+
+  // Clear the cache releasing all the weak handles.
+  void Clear();
+
+  // Weak handle callback for scripts in the cache.
+  static void HandleWeakScript(v8::Persistent<v8::Value> obj, void* data);
+
+  // List used during GC to temporarily store id's of collected scripts.
+  List<int> collected_scripts_;
+};
+
+
+// Linked list holding debug info objects. The debug info objects are kept as
+// weak handles to avoid a debug info object to keep a function alive.
+class DebugInfoListNode {
+ public:
+  explicit DebugInfoListNode(DebugInfo* debug_info);
+  virtual ~DebugInfoListNode();
+
+  DebugInfoListNode* next() { return next_; }
+  void set_next(DebugInfoListNode* next) { next_ = next; }
+  Handle<DebugInfo> debug_info() { return debug_info_; }
+
+ private:
+  // Global (weak) handle to the debug info object.
+  Handle<DebugInfo> debug_info_;
+
+  // Next pointer for linked list.
+  DebugInfoListNode* next_;
+};
+
+
+// This class contains the debugger support. The main purpose is to handle
+// setting break points in the code.
+//
+// This class controls the debug info for all functions which currently have
+// active breakpoints in them. This debug info is held in the heap root object
+// debug_info which is a FixedArray. Each entry in this list is of class
+// DebugInfo.
+class Debug {
+ public:
+  static void Setup(bool create_heap_objects);
+  static bool Load();
+  static void Unload();
+  static bool IsLoaded() { return !debug_context_.is_null(); }
+  static bool InDebugger() { return thread_local_.debugger_entry_ != NULL; }
+  static void PreemptionWhileInDebugger();
+  static void Iterate(ObjectVisitor* v);
+
+  static Object* Break(Arguments args);
+  static void SetBreakPoint(Handle<SharedFunctionInfo> shared,
+                            int source_position,
+                            Handle<Object> break_point_object);
+  static void ClearBreakPoint(Handle<Object> break_point_object);
+  static void ClearAllBreakPoints();
+  static void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
+  static void FloodHandlerWithOneShot();
+  static void ChangeBreakOnException(ExceptionBreakType type, bool enable);
+  static void PrepareStep(StepAction step_action, int step_count);
+  static void ClearStepping();
+  static bool StepNextContinue(BreakLocationIterator* break_location_iterator,
+                               JavaScriptFrame* frame);
+  static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
+  static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
+
+  // Returns whether the operation succeeded.
+  static bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
+
+  // Returns true if the current stub call is patched to call the debugger.
+  static bool IsDebugBreak(Address addr);
+  // Returns true if the current return statement has been patched to be
+  // a debugger breakpoint.
+  static bool IsDebugBreakAtReturn(RelocInfo* rinfo);
+
+  // Check whether a code stub with the specified major key is a possible break
+  // point location.
+  static bool IsSourceBreakStub(Code* code);
+  static bool IsBreakStub(Code* code);
+
+  // Find the builtin to use for invoking the debug break
+  static Handle<Code> FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode);
+
+  static Handle<Object> GetSourceBreakLocations(
+      Handle<SharedFunctionInfo> shared);
+
+  // Getter for the debug_context.
+  inline static Handle<Context> debug_context() { return debug_context_; }
+
+  // Check whether a global object is the debug global object.
+  static bool IsDebugGlobal(GlobalObject* global);
+
+  // Fast check to see if any break points are active.
+  inline static bool has_break_points() { return has_break_points_; }
+
+  static void NewBreak(StackFrame::Id break_frame_id);
+  static void SetBreak(StackFrame::Id break_frame_id, int break_id);
+  static StackFrame::Id break_frame_id() {
+    return thread_local_.break_frame_id_;
+  }
+  static int break_id() { return thread_local_.break_id_; }
+
+  static bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
+  static void HandleStepIn(Handle<JSFunction> function,
+                           Handle<Object> holder,
+                           Address fp,
+                           bool is_constructor);
+  static Address step_in_fp() { return thread_local_.step_into_fp_; }
+  static Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; }
+
+  static bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
+  static Address step_out_fp() { return thread_local_.step_out_fp_; }
+
+  static EnterDebugger* debugger_entry() {
+    return thread_local_.debugger_entry_;
+  }
+  static void set_debugger_entry(EnterDebugger* entry) {
+    thread_local_.debugger_entry_ = entry;
+  }
+
+  // Check whether any of the specified interrupts are pending.
+  static bool is_interrupt_pending(InterruptFlag what) {
+    return (thread_local_.pending_interrupts_ & what) != 0;
+  }
+
+  // Set specified interrupts as pending.
+  static void set_interrupts_pending(InterruptFlag what) {
+    thread_local_.pending_interrupts_ |= what;
+  }
+
+  // Clear specified interrupts from pending.
+  static void clear_interrupt_pending(InterruptFlag what) {
+    thread_local_.pending_interrupts_ &= ~static_cast<int>(what);
+  }
+
+  // Getter and setter for the disable break state.
+  static bool disable_break() { return disable_break_; }
+  static void set_disable_break(bool disable_break) {
+    disable_break_ = disable_break;
+  }
+
+  // Getters for the current exception break state.
+  static bool break_on_exception() { return break_on_exception_; }
+  static bool break_on_uncaught_exception() {
+    return break_on_uncaught_exception_;
+  }
+
+  enum AddressId {
+    k_after_break_target_address,
+    k_debug_break_return_address,
+    k_register_address
+  };
+
+  // Support for setting the address to jump to when returning from break point.
+  static Address* after_break_target_address() {
+    return reinterpret_cast<Address*>(&thread_local_.after_break_target_);
+  }
+
+  // Support for saving/restoring registers when handling debug break calls.
+  static Object** register_address(int r) {
+    return &registers_[r];
+  }
+
+  // Access to the debug break on return code.
+  static Code* debug_break_return() { return debug_break_return_; }
+  static Code** debug_break_return_address() {
+    return &debug_break_return_;
+  }
+
+  static const int kEstimatedNofDebugInfoEntries = 16;
+  static const int kEstimatedNofBreakPointsInFunction = 16;
+
+  static void HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data);
+
+  friend class Debugger;
+  friend Handle<FixedArray> GetDebuggedFunctions();  // In test-debug.cc
+  friend void CheckDebuggerUnloaded(bool check_functions);  // In test-debug.cc
+
+  // Threading support.
+  static char* ArchiveDebug(char* to);
+  static char* RestoreDebug(char* from);
+  static int ArchiveSpacePerThread();
+  static void FreeThreadResources() { }
+
+  // Mirror cache handling.
+  static void ClearMirrorCache();
+
+  // Script cache handling.
+  static void CreateScriptCache();
+  static void DestroyScriptCache();
+  static void AddScriptToScriptCache(Handle<Script> script);
+  static Handle<FixedArray> GetLoadedScripts();
+
+  // Garbage collection notifications.
+  static void AfterGarbageCollection();
+
+  // Code generation assumptions.
+  static const int kIa32CallInstructionLength = 5;
+  static const int kIa32JSReturnSequenceLength = 6;
+
+  // The x64 JS return sequence is padded with int3 to make it large
+  // enough to hold a call instruction when the debugger patches it.
+  static const int kX64CallInstructionLength = 13;
+  static const int kX64JSReturnSequenceLength = 13;
+
+  // Code generator routines.
+  static void GenerateLoadICDebugBreak(MacroAssembler* masm);
+  static void GenerateStoreICDebugBreak(MacroAssembler* masm);
+  static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
+  static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
+  static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
+  static void GenerateReturnDebugBreak(MacroAssembler* masm);
+  static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
+
+  // Called from stub-cache.cc.
+  static void GenerateCallICDebugBreak(MacroAssembler* masm);
+
+ private:
+  static bool CompileDebuggerScript(int index);
+  static void ClearOneShot();
+  static void ActivateStepIn(StackFrame* frame);
+  static void ClearStepIn();
+  static void ActivateStepOut(StackFrame* frame);
+  static void ClearStepOut();
+  static void ClearStepNext();
+  // Returns whether the compile succeeded.
+  static bool EnsureCompiled(Handle<SharedFunctionInfo> shared);
+  static void RemoveDebugInfo(Handle<DebugInfo> debug_info);
+  static void SetAfterBreakTarget(JavaScriptFrame* frame);
+  static Handle<Object> CheckBreakPoints(Handle<Object> break_point);
+  static bool CheckBreakPoint(Handle<Object> break_point_object);
+
+  // Global handle to debug context where all the debugger JavaScript code is
+  // loaded.
+  static Handle<Context> debug_context_;
+
+  // Boolean state indicating whether any break points are set.
+  static bool has_break_points_;
+
+  // Cache of all scripts in the heap.
+  static ScriptCache* script_cache_;
+
+  // List of active debug info objects.
+  static DebugInfoListNode* debug_info_list_;
+
+  static bool disable_break_;
+  static bool break_on_exception_;
+  static bool break_on_uncaught_exception_;
+
+  // Per-thread data.
+  class ThreadLocal {
+   public:
+    // Counter for generating next break id.
+    int break_count_;
+
+    // Current break id.
+    int break_id_;
+
+    // Frame id for the frame of the current break.
+    StackFrame::Id break_frame_id_;
+
+    // Step action for last step performed.
+    StepAction last_step_action_;
+
+    // Source statement position from last step next action.
+    int last_statement_position_;
+
+    // Number of steps left to perform before debug event.
+    int step_count_;
+
+    // Frame pointer from last step next action.
+    Address last_fp_;
+
+    // Frame pointer for frame from which step in was performed.
+    Address step_into_fp_;
+
+    // Frame pointer for the frame where debugger should be called when current
+    // step out action is completed.
+    Address step_out_fp_;
+
+    // Storage location for jump when exiting debug break calls.
+    Address after_break_target_;
+
+    // Top debugger entry.
+    EnterDebugger* debugger_entry_;
+
+    // Pending interrupts scheduled while debugging.
+    int pending_interrupts_;
+  };
+
+  // Storage location for registers when handling debug break calls
+  static JSCallerSavedBuffer registers_;
+  static ThreadLocal thread_local_;
+  static void ThreadInit();
+
+  // Code to call for handling debug break on return.
+  static Code* debug_break_return_;
+
+  DISALLOW_COPY_AND_ASSIGN(Debug);
+};
+
+
+// Message delivered to the message handler callback. This is either a debugger
+// event or the response to a command.
+class MessageImpl: public v8::Debug::Message {
+ public:
+  // Create a message object for a debug event.
+  static MessageImpl NewEvent(DebugEvent event,
+                              bool running,
+                              Handle<JSObject> exec_state,
+                              Handle<JSObject> event_data);
+
+  // Create a message object for the response to a debug command.
+  static MessageImpl NewResponse(DebugEvent event,
+                                 bool running,
+                                 Handle<JSObject> exec_state,
+                                 Handle<JSObject> event_data,
+                                 Handle<String> response_json,
+                                 v8::Debug::ClientData* client_data);
+
+  // Implementation of interface v8::Debug::Message.
+  virtual bool IsEvent() const;
+  virtual bool IsResponse() const;
+  virtual DebugEvent GetEvent() const;
+  virtual bool WillStartRunning() const;
+  virtual v8::Handle<v8::Object> GetExecutionState() const;
+  virtual v8::Handle<v8::Object> GetEventData() const;
+  virtual v8::Handle<v8::String> GetJSON() const;
+  virtual v8::Handle<v8::Context> GetEventContext() const;
+  virtual v8::Debug::ClientData* GetClientData() const;
+
+ private:
+  MessageImpl(bool is_event,
+              DebugEvent event,
+              bool running,
+              Handle<JSObject> exec_state,
+              Handle<JSObject> event_data,
+              Handle<String> response_json,
+              v8::Debug::ClientData* client_data);
+
+  bool is_event_;  // Does this message represent a debug event?
+  DebugEvent event_;  // Debug event causing the break.
+  bool running_;  // Will the VM start running after this event?
+  Handle<JSObject> exec_state_;  // Current execution state.
+  Handle<JSObject> event_data_;  // Data associated with the event.
+  Handle<String> response_json_;  // Response JSON if message holds a response.
+  v8::Debug::ClientData* client_data_;  // Client data passed with the request.
+};
+
+
+// Message send by user to v8 debugger or debugger output message.
+// In addition to command text it may contain a pointer to some user data
+// which are expected to be passed along with the command reponse to message
+// handler.
+class CommandMessage {
+ public:
+  static CommandMessage New(const Vector<uint16_t>& command,
+                            v8::Debug::ClientData* data);
+  CommandMessage();
+  ~CommandMessage();
+
+  // Deletes user data and disposes of the text.
+  void Dispose();
+  Vector<uint16_t> text() const { return text_; }
+  v8::Debug::ClientData* client_data() const { return client_data_; }
+ private:
+  CommandMessage(const Vector<uint16_t>& text,
+                 v8::Debug::ClientData* data);
+
+  Vector<uint16_t> text_;
+  v8::Debug::ClientData* client_data_;
+};
+
+// A Queue of CommandMessage objects.  A thread-safe version is
+// LockingCommandMessageQueue, based on this class.
+class CommandMessageQueue BASE_EMBEDDED {
+ public:
+  explicit CommandMessageQueue(int size);
+  ~CommandMessageQueue();
+  bool IsEmpty() const { return start_ == end_; }
+  CommandMessage Get();
+  void Put(const CommandMessage& message);
+  void Clear() { start_ = end_ = 0; }  // Queue is empty after Clear().
+ private:
+  // Doubles the size of the message queue, and copies the messages.
+  void Expand();
+
+  CommandMessage* messages_;
+  int start_;
+  int end_;
+  int size_;  // The size of the queue buffer.  Queue can hold size-1 messages.
+};
+
+
+// LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage
+// messages.  The message data is not managed by LockingCommandMessageQueue.
+// Pointers to the data are passed in and out. Implemented by adding a
+// Mutex to CommandMessageQueue.  Includes logging of all puts and gets.
+class LockingCommandMessageQueue BASE_EMBEDDED {
+ public:
+  explicit LockingCommandMessageQueue(int size);
+  ~LockingCommandMessageQueue();
+  bool IsEmpty() const;
+  CommandMessage Get();
+  void Put(const CommandMessage& message);
+  void Clear();
+ private:
+  CommandMessageQueue queue_;
+  Mutex* lock_;
+  DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
+};
+
+
+class Debugger {
+ public:
+  static void DebugRequest(const uint16_t* json_request, int length);
+
+  static Handle<Object> MakeJSObject(Vector<const char> constructor_name,
+                                     int argc, Object*** argv,
+                                     bool* caught_exception);
+  static Handle<Object> MakeExecutionState(bool* caught_exception);
+  static Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
+                                       Handle<Object> break_points_hit,
+                                       bool* caught_exception);
+  static Handle<Object> MakeExceptionEvent(Handle<Object> exec_state,
+                                           Handle<Object> exception,
+                                           bool uncaught,
+                                           bool* caught_exception);
+  static Handle<Object> MakeNewFunctionEvent(Handle<Object> func,
+                                             bool* caught_exception);
+  static Handle<Object> MakeCompileEvent(Handle<Script> script,
+                                         bool before,
+                                         bool* caught_exception);
+  static Handle<Object> MakeScriptCollectedEvent(int id,
+                                                 bool* caught_exception);
+  static void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
+  static void OnException(Handle<Object> exception, bool uncaught);
+  static void OnBeforeCompile(Handle<Script> script);
+  static void OnAfterCompile(Handle<Script> script,
+                           Handle<JSFunction> fun);
+  static void OnNewFunction(Handle<JSFunction> fun);
+  static void OnScriptCollected(int id);
+  static void ProcessDebugEvent(v8::DebugEvent event,
+                                Handle<JSObject> event_data,
+                                bool auto_continue);
+  static void NotifyMessageHandler(v8::DebugEvent event,
+                                   Handle<JSObject> exec_state,
+                                   Handle<JSObject> event_data,
+                                   bool auto_continue);
+  static void SetEventListener(Handle<Object> callback, Handle<Object> data);
+  static void SetMessageHandler(v8::Debug::MessageHandler2 handler);
+  static void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
+                                     int period);
+
+  // Invoke the message handler function.
+  static void InvokeMessageHandler(MessageImpl message);
+
+  // Add a debugger command to the command queue.
+  static void ProcessCommand(Vector<const uint16_t> command,
+                             v8::Debug::ClientData* client_data = NULL);
+
+  // Check whether there are commands in the command queue.
+  static bool HasCommands();
+
+  static Handle<Object> Call(Handle<JSFunction> fun,
+                             Handle<Object> data,
+                             bool* pending_exception);
+
+  // Start the debugger agent listening on the provided port.
+  static bool StartAgent(const char* name, int port);
+
+  // Stop the debugger agent.
+  static void StopAgent();
+
+  // Blocks until the agent has started listening for connections
+  static void WaitForAgent();
+
+  // Unload the debugger if possible. Only called when no debugger is currently
+  // active.
+  static void UnloadDebugger();
+
+  inline static bool EventActive(v8::DebugEvent event) {
+    ScopedLock with(debugger_access_);
+
+    // Check whether the message handler was been cleared.
+    if (debugger_unload_pending_) {
+      UnloadDebugger();
+    }
+
+    // Currently argument event is not used.
+    return !compiling_natives_ && Debugger::IsDebuggerActive();
+  }
+
+  static void set_compiling_natives(bool compiling_natives) {
+    Debugger::compiling_natives_ = compiling_natives;
+  }
+  static bool compiling_natives() { return Debugger::compiling_natives_; }
+  static void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
+  static bool is_loading_debugger() { return Debugger::is_loading_debugger_; }
+
+ private:
+  static bool IsDebuggerActive();
+  static void ListenersChanged();
+
+  static Mutex* debugger_access_;  // Mutex guarding debugger variables.
+  static Handle<Object> event_listener_;  // Global handle to listener.
+  static Handle<Object> event_listener_data_;
+  static bool compiling_natives_;  // Are we compiling natives?
+  static bool is_loading_debugger_;  // Are we loading the debugger?
+  static bool never_unload_debugger_;  // Can we unload the debugger?
+  static v8::Debug::MessageHandler2 message_handler_;
+  static bool debugger_unload_pending_;  // Was message handler cleared?
+  static v8::Debug::HostDispatchHandler host_dispatch_handler_;
+  static int host_dispatch_micros_;
+
+  static DebuggerAgent* agent_;
+
+  static const int kQueueInitialSize = 4;
+  static LockingCommandMessageQueue command_queue_;
+  static Semaphore* command_received_;  // Signaled for each command received.
+
+  friend class EnterDebugger;
+};
+
+
+// This class is used for entering the debugger. Create an instance in the stack
+// to enter the debugger. This will set the current break state, make sure the
+// debugger is loaded and switch to the debugger context. If the debugger for
+// some reason could not be entered FailedToEnter will return true.
+class EnterDebugger BASE_EMBEDDED {
+ public:
+  EnterDebugger()
+      : prev_(Debug::debugger_entry()),
+        has_js_frames_(!it_.done()) {
+    ASSERT(prev_ != NULL || !Debug::is_interrupt_pending(PREEMPT));
+    ASSERT(prev_ != NULL || !Debug::is_interrupt_pending(DEBUGBREAK));
+
+    // Link recursive debugger entry.
+    Debug::set_debugger_entry(this);
+
+    // Store the previous break id and frame id.
+    break_id_ = Debug::break_id();
+    break_frame_id_ = Debug::break_frame_id();
+
+    // Create the new break info. If there is no JavaScript frames there is no
+    // break frame id.
+    if (has_js_frames_) {
+      Debug::NewBreak(it_.frame()->id());
+    } else {
+      Debug::NewBreak(StackFrame::NO_ID);
+    }
+
+    // Make sure that debugger is loaded and enter the debugger context.
+    load_failed_ = !Debug::Load();
+    if (!load_failed_) {
+      // NOTE the member variable save which saves the previous context before
+      // this change.
+      Top::set_context(*Debug::debug_context());
+    }
+  }
+
+  ~EnterDebugger() {
+    // Restore to the previous break state.
+    Debug::SetBreak(break_frame_id_, break_id_);
+
+    // Check for leaving the debugger.
+    if (prev_ == NULL) {
+      // Clear mirror cache when leaving the debugger. Skip this if there is a
+      // pending exception as clearing the mirror cache calls back into
+      // JavaScript. This can happen if the v8::Debug::Call is used in which
+      // case the exception should end up in the calling code.
+      if (!Top::has_pending_exception()) {
+        // Try to avoid any pending debug break breaking in the clear mirror
+        // cache JavaScript code.
+        if (StackGuard::IsDebugBreak()) {
+          Debug::set_interrupts_pending(DEBUGBREAK);
+          StackGuard::Continue(DEBUGBREAK);
+        }
+        Debug::ClearMirrorCache();
+      }
+
+      // Request preemption and debug break when leaving the last debugger entry
+      // if any of these where recorded while debugging.
+      if (Debug::is_interrupt_pending(PREEMPT)) {
+        // This re-scheduling of preemption is to avoid starvation in some
+        // debugging scenarios.
+        Debug::clear_interrupt_pending(PREEMPT);
+        StackGuard::Preempt();
+      }
+      if (Debug::is_interrupt_pending(DEBUGBREAK)) {
+        Debug::clear_interrupt_pending(DEBUGBREAK);
+        StackGuard::DebugBreak();
+      }
+
+      // If there are commands in the queue when leaving the debugger request
+      // that these commands are processed.
+      if (Debugger::HasCommands()) {
+        StackGuard::DebugCommand();
+      }
+
+      // If leaving the debugger with the debugger no longer active unload it.
+      if (!Debugger::IsDebuggerActive()) {
+        Debugger::UnloadDebugger();
+      }
+    }
+
+    // Leaving this debugger entry.
+    Debug::set_debugger_entry(prev_);
+  }
+
+  // Check whether the debugger could be entered.
+  inline bool FailedToEnter() { return load_failed_; }
+
+  // Check whether there are any JavaScript frames on the stack.
+  inline bool HasJavaScriptFrames() { return has_js_frames_; }
+
+  // Get the active context from before entering the debugger.
+  inline Handle<Context> GetContext() { return save_.context(); }
+
+ private:
+  EnterDebugger* prev_;  // Previous debugger entry if entered recursively.
+  JavaScriptFrameIterator it_;
+  const bool has_js_frames_;  // Were there any JavaScript frames?
+  StackFrame::Id break_frame_id_;  // Previous break frame id.
+  int break_id_;  // Previous break id.
+  bool load_failed_;  // Did the debugger fail to load?
+  SaveContext save_;  // Saves previous context.
+};
+
+
+// Stack allocated class for disabling break.
+class DisableBreak BASE_EMBEDDED {
+ public:
+  // Enter the debugger by storing the previous top context and setting the
+  // current top context to the debugger context.
+  explicit DisableBreak(bool disable_break)  {
+    prev_disable_break_ = Debug::disable_break();
+    Debug::set_disable_break(disable_break);
+  }
+  ~DisableBreak() {
+    Debug::set_disable_break(prev_disable_break_);
+  }
+
+ private:
+  // The previous state of the disable break used to restore the value when this
+  // object is destructed.
+  bool prev_disable_break_;
+};
+
+
+// Debug_Address encapsulates the Address pointers used in generating debug
+// code.
+class Debug_Address {
+ public:
+  Debug_Address(Debug::AddressId id, int reg = 0)
+    : id_(id), reg_(reg) {
+    ASSERT(reg == 0 || id == Debug::k_register_address);
+  }
+
+  static Debug_Address AfterBreakTarget() {
+    return Debug_Address(Debug::k_after_break_target_address);
+  }
+
+  static Debug_Address DebugBreakReturn() {
+    return Debug_Address(Debug::k_debug_break_return_address);
+  }
+
+  static Debug_Address Register(int reg) {
+    return Debug_Address(Debug::k_register_address, reg);
+  }
+
+  Address address() const {
+    switch (id_) {
+      case Debug::k_after_break_target_address:
+        return reinterpret_cast<Address>(Debug::after_break_target_address());
+      case Debug::k_debug_break_return_address:
+        return reinterpret_cast<Address>(Debug::debug_break_return_address());
+      case Debug::k_register_address:
+        return reinterpret_cast<Address>(Debug::register_address(reg_));
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+ private:
+  Debug::AddressId id_;
+  int reg_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+#endif  // V8_DEBUG_H_
diff --git a/src/disasm.h b/src/disasm.h
new file mode 100644
index 0000000..6ecd1c8
--- /dev/null
+++ b/src/disasm.h
@@ -0,0 +1,77 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DISASM_H_
+#define V8_DISASM_H_
+
+namespace disasm {
+
+typedef unsigned char byte;
+
+// Interface and default implementation for converting addresses and
+// register-numbers to text.  The default implementation is machine
+// specific.
+class NameConverter {
+ public:
+  virtual ~NameConverter() {}
+  virtual const char* NameOfCPURegister(int reg) const;
+  virtual const char* NameOfByteCPURegister(int reg) const;
+  virtual const char* NameOfXMMRegister(int reg) const;
+  virtual const char* NameOfAddress(byte* addr) const;
+  virtual const char* NameOfConstant(byte* addr) const;
+  virtual const char* NameInCode(byte* addr) const;
+};
+
+
+// A generic Disassembler interface
+class Disassembler {
+ public:
+  // Caller deallocates converter.
+  explicit Disassembler(const NameConverter& converter);
+
+  virtual ~Disassembler();
+
+  // Writes one disassembled instruction into 'buffer' (0-terminated).
+  // Returns the length of the disassembled machine instruction in bytes.
+  int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
+
+  // Returns -1 if instruction does not mark the beginning of a constant pool,
+  // or the number of entries in the constant pool beginning here.
+  int ConstantPoolSizeAt(byte* instruction);
+
+  // Write disassembly into specified file 'f' using specified NameConverter
+  // (see constructor).
+  static void Disassemble(FILE* f, byte* begin, byte* end);
+ private:
+  const NameConverter& converter_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Disassembler);
+};
+
+}  // namespace disasm
+
+#endif  // V8_DISASM_H_
diff --git a/src/disassembler.cc b/src/disassembler.cc
new file mode 100644
index 0000000..e2f908d
--- /dev/null
+++ b/src/disassembler.cc
@@ -0,0 +1,318 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "code-stubs.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "disasm.h"
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+#include "string-stream.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DISASSEMBLER
+
+void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
+  for (byte* pc = begin; pc < end; pc++) {
+    if (f == NULL) {
+      PrintF("%" V8PRIxPTR "  %4" V8PRIdPTR "  %02x\n", pc, pc - begin, *pc);
+    } else {
+      fprintf(f, "%" V8PRIxPTR "  %4" V8PRIdPTR "  %02x\n",
+              reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
+    }
+  }
+}
+
+
+class V8NameConverter: public disasm::NameConverter {
+ public:
+  explicit V8NameConverter(Code* code) : code_(code) {}
+  virtual const char* NameOfAddress(byte* pc) const;
+  virtual const char* NameInCode(byte* addr) const;
+  Code* code() const { return code_; }
+ private:
+  Code* code_;
+};
+
+
+const char* V8NameConverter::NameOfAddress(byte* pc) const {
+  static v8::internal::EmbeddedVector<char, 128> buffer;
+
+  const char* name = Builtins::Lookup(pc);
+  if (name != NULL) {
+    OS::SNPrintF(buffer, "%s  (%p)", name, pc);
+    return buffer.start();
+  }
+
+  if (code_ != NULL) {
+    int offs = pc - code_->instruction_start();
+    // print as code offset, if it seems reasonable
+    if (0 <= offs && offs < code_->instruction_size()) {
+      OS::SNPrintF(buffer, "%d  (%p)", offs, pc);
+      return buffer.start();
+    }
+  }
+
+  return disasm::NameConverter::NameOfAddress(pc);
+}
+
+
+const char* V8NameConverter::NameInCode(byte* addr) const {
+  // The V8NameConverter is used for well known code, so we can "safely"
+  // dereference pointers in generated code.
+  return (code_ != NULL) ? reinterpret_cast<const char*>(addr) : "";
+}
+
+
+static void DumpBuffer(FILE* f, char* buff) {
+  if (f == NULL) {
+    PrintF("%s", buff);
+  } else {
+    fprintf(f, "%s", buff);
+  }
+}
+
+static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
+static const int kRelocInfoPosition = 57;
+
+static int DecodeIt(FILE* f,
+                    const V8NameConverter& converter,
+                    byte* begin,
+                    byte* end) {
+  NoHandleAllocation ha;
+  AssertNoAllocation no_alloc;
+  ExternalReferenceEncoder ref_encoder;
+
+  v8::internal::EmbeddedVector<char, 128> decode_buffer;
+  v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
+  byte* pc = begin;
+  disasm::Disassembler d(converter);
+  RelocIterator* it = NULL;
+  if (converter.code() != NULL) {
+    it = new RelocIterator(converter.code());
+  } else {
+    // No relocation information when printing code stubs.
+  }
+  int constants = -1;  // no constants being decoded at the start
+
+  while (pc < end) {
+    // First decode instruction so that we know its length.
+    byte* prev_pc = pc;
+    if (constants > 0) {
+      OS::SNPrintF(decode_buffer,
+                   "%08x       constant",
+                   *reinterpret_cast<int32_t*>(pc));
+      constants--;
+      pc += 4;
+    } else {
+      int num_const = d.ConstantPoolSizeAt(pc);
+      if (num_const >= 0) {
+        OS::SNPrintF(decode_buffer,
+                     "%08x       constant pool begin",
+                     *reinterpret_cast<int32_t*>(pc));
+        constants = num_const;
+        pc += 4;
+      } else if (it != NULL && !it->done() && it->rinfo()->pc() == pc &&
+          it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
+        // raw pointer embedded in code stream, e.g., jump table
+        byte* ptr = *reinterpret_cast<byte**>(pc);
+        OS::SNPrintF(decode_buffer,
+                     "%08" V8PRIxPTR "      jump table entry %4" V8PRIdPTR,
+                     ptr,
+                     ptr - begin);
+        pc += 4;
+      } else {
+        decode_buffer[0] = '\0';
+        pc += d.InstructionDecode(decode_buffer, pc);
+      }
+    }
+
+    // Collect RelocInfo for this instruction (prev_pc .. pc-1)
+    List<const char*> comments(4);
+    List<byte*> pcs(1);
+    List<RelocInfo::Mode> rmodes(1);
+    List<intptr_t> datas(1);
+    if (it != NULL) {
+      while (!it->done() && it->rinfo()->pc() < pc) {
+        if (RelocInfo::IsComment(it->rinfo()->rmode())) {
+          // For comments just collect the text.
+          comments.Add(reinterpret_cast<const char*>(it->rinfo()->data()));
+        } else {
+          // For other reloc info collect all data.
+          pcs.Add(it->rinfo()->pc());
+          rmodes.Add(it->rinfo()->rmode());
+          datas.Add(it->rinfo()->data());
+        }
+        it->next();
+      }
+    }
+
+    StringBuilder out(out_buffer.start(), out_buffer.length());
+
+    // Comments.
+    for (int i = 0; i < comments.length(); i++) {
+      out.AddFormatted("                  %s\n", comments[i]);
+    }
+
+    // Write out comments, resets outp so that we can format the next line.
+    DumpBuffer(f, out.Finalize());
+    out.Reset();
+
+    // Instruction address and instruction offset.
+    out.AddFormatted("%p  %4d  ", prev_pc, prev_pc - begin);
+
+    // Instruction.
+    out.AddFormatted("%s", decode_buffer.start());
+
+    // Print all the reloc info for this instruction which are not comments.
+    for (int i = 0; i < pcs.length(); i++) {
+      // Put together the reloc info
+      RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
+
+      // Indent the printing of the reloc info.
+      if (i == 0) {
+        // The first reloc info is printed after the disassembled instruction.
+        out.AddPadding(' ', kRelocInfoPosition - out.position());
+      } else {
+        // Additional reloc infos are printed on separate lines.
+        out.AddFormatted("\n");
+        out.AddPadding(' ', kRelocInfoPosition);
+      }
+
+      RelocInfo::Mode rmode = relocinfo.rmode();
+      if (RelocInfo::IsPosition(rmode)) {
+        if (RelocInfo::IsStatementPosition(rmode)) {
+          out.AddFormatted("    ;; debug: statement %d", relocinfo.data());
+        } else {
+          out.AddFormatted("    ;; debug: position %d", relocinfo.data());
+        }
+      } else if (rmode == RelocInfo::EMBEDDED_OBJECT) {
+        HeapStringAllocator allocator;
+        StringStream accumulator(&allocator);
+        relocinfo.target_object()->ShortPrint(&accumulator);
+        SmartPointer<const char> obj_name = accumulator.ToCString();
+        out.AddFormatted("    ;; object: %s", *obj_name);
+      } else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+        const char* reference_name =
+            ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
+        out.AddFormatted("    ;; external reference (%s)", reference_name);
+      } else if (RelocInfo::IsCodeTarget(rmode)) {
+        out.AddFormatted("    ;; code:");
+        if (rmode == RelocInfo::CONSTRUCT_CALL) {
+          out.AddFormatted(" constructor,");
+        }
+        Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
+        Code::Kind kind = code->kind();
+        if (code->is_inline_cache_stub()) {
+          if (rmode == RelocInfo::CODE_TARGET_CONTEXT) {
+            out.AddFormatted(" contextual,");
+          }
+          InlineCacheState ic_state = code->ic_state();
+          out.AddFormatted(" %s, %s", Code::Kind2String(kind),
+              Code::ICState2String(ic_state));
+          if (ic_state == MONOMORPHIC) {
+            PropertyType type = code->type();
+            out.AddFormatted(", %s", Code::PropertyType2String(type));
+          }
+          if (code->ic_in_loop() == IN_LOOP) {
+            out.AddFormatted(", in_loop");
+          }
+          if (kind == Code::CALL_IC) {
+            out.AddFormatted(", argc = %d", code->arguments_count());
+          }
+        } else if (kind == Code::STUB) {
+          // Reverse lookup required as the minor key cannot be retrieved
+          // from the code object.
+          Object* obj = Heap::code_stubs()->SlowReverseLookup(code);
+          if (obj != Heap::undefined_value()) {
+            ASSERT(obj->IsSmi());
+            // Get the STUB key and extract major and minor key.
+            uint32_t key = Smi::cast(obj)->value();
+            uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
+            ASSERT(code->major_key() == CodeStub::MajorKeyFromKey(key));
+            out.AddFormatted(" %s, %s, ",
+                             Code::Kind2String(kind),
+                             CodeStub::MajorName(code->major_key()));
+            switch (code->major_key()) {
+              case CodeStub::CallFunction:
+                out.AddFormatted("argc = %d", minor_key);
+                break;
+              case CodeStub::Runtime: {
+                const char* name =
+                    RuntimeStub::GetNameFromMinorKey(minor_key);
+                out.AddFormatted("%s", name);
+                break;
+              }
+              default:
+                out.AddFormatted("minor: %d", minor_key);
+            }
+          }
+        } else {
+          out.AddFormatted(" %s", Code::Kind2String(kind));
+        }
+      } else {
+        out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
+      }
+    }
+    out.AddString("\n");
+    DumpBuffer(f, out.Finalize());
+    out.Reset();
+  }
+
+  delete it;
+  return pc - begin;
+}
+
+
+int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
+  V8NameConverter defaultConverter(NULL);
+  return DecodeIt(f, defaultConverter, begin, end);
+}
+
+
+// Called by Code::CodePrint.
+void Disassembler::Decode(FILE* f, Code* code) {
+  byte* begin = Code::cast(code)->instruction_start();
+  byte* end = begin + Code::cast(code)->instruction_size();
+  V8NameConverter v8NameConverter(code);
+  DecodeIt(f, v8NameConverter, begin, end);
+}
+
+#else  // ENABLE_DISASSEMBLER
+
+void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
+int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; }
+void Disassembler::Decode(FILE* f, Code* code) {}
+
+#endif  // ENABLE_DISASSEMBLER
+
+} }  // namespace v8::internal
diff --git a/src/disassembler.h b/src/disassembler.h
new file mode 100644
index 0000000..68a338d
--- /dev/null
+++ b/src/disassembler.h
@@ -0,0 +1,56 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DISASSEMBLER_H_
+#define V8_DISASSEMBLER_H_
+
+namespace v8 {
+namespace internal {
+
+class Disassembler : public AllStatic {
+ public:
+  // Print the bytes in the interval [begin, end) into f.
+  static void Dump(FILE* f, byte* begin, byte* end);
+
+  // Decode instructions in the the interval [begin, end) and print the
+  // code into f. Returns the number of bytes disassembled or 1 if no
+  // instruction could be decoded.
+  static int Decode(FILE* f, byte* begin, byte* end);
+
+  // Decode instructions in code.
+  static void Decode(FILE* f, Code* code);
+ private:
+  // Decode instruction at pc and print disassembled instruction into f.
+  // Returns the instruction length in bytes, or 1 if the instruction could
+  // not be decoded.  The number of characters written is written into
+  // the out parameter char_count.
+  static int Decode(FILE* f, byte* pc, int* char_count);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_DISASSEMBLER_H_
diff --git a/src/dtoa-config.c b/src/dtoa-config.c
new file mode 100644
index 0000000..bc0a58a
--- /dev/null
+++ b/src/dtoa-config.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2007-2008 the V8 project authors. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Dtoa needs to have a particular environment set up for it so
+ * instead of using it directly you should use this file.
+ *
+ * The way it works is that when you link with it, its definitions
+ * of dtoa, strtod etc. override the default ones.  So if you fail
+ * to link with this library everything will still work, it's just
+ * subtly wrong.
+ */
+
+#if !(defined(__APPLE__) && defined(__MACH__)) && \
+    !defined(WIN32) && !defined(__FreeBSD__)
+#include <endian.h>
+#endif
+#include <math.h>
+#include <float.h>
+
+/* The floating point word order on ARM is big endian when floating point
+ * emulation is used, even if the byte order is little endian */
+#if !(defined(__APPLE__) && defined(__MACH__)) && !defined(WIN32) && \
+    !defined(__FreeBSD__) && __FLOAT_WORD_ORDER == __BIG_ENDIAN
+#define  IEEE_MC68k
+#else
+#define  IEEE_8087
+#endif
+
+#define __MATH_H__
+#if defined(__APPLE__) && defined(__MACH__) || defined(__FreeBSD__)
+/* stdlib.h on FreeBSD and Apple's 10.5 and later SDKs will mangle the
+ * name of strtod.  If it's included after strtod is redefined as
+ * gay_strtod, it will mangle the name of gay_strtod, which is
+ * unwanted. */
+#include <stdlib.h>
+
+#endif
+/* stdlib.h on Windows adds __declspec(dllimport) to all functions when using
+ * the DLL version of the CRT (compiling with /MD or /MDd). If stdlib.h is
+ * included after strtod is redefined as gay_strtod, it will add
+ * __declspec(dllimport) to gay_strtod, which causes the compilation of
+ * gay_strtod in dtoa.c to fail.
+*/
+#if defined(WIN32) && defined(_DLL)
+#include "stdlib.h"
+#endif
+
+/* For MinGW, turn on __NO_ISOCEXT so that its strtod doesn't get added */
+#ifdef __MINGW32__
+#define __NO_ISOCEXT
+#endif  /* __MINGW32__ */
+
+/* On 64-bit systems, we need to make sure that a Long is only 32 bits. */
+#ifdef V8_TARGET_ARCH_X64
+#define Long int
+#endif /* V8_TARGET_ARCH_X64 */
+
+/* Make sure we use the David M. Gay version of strtod(). On Linux, we
+ * cannot use the same name (maybe the function does not have weak
+ * linkage?). */
+#define strtod gay_strtod
+#include "third_party/dtoa/dtoa.c"
diff --git a/src/execution.cc b/src/execution.cc
new file mode 100644
index 0000000..8bc6b74
--- /dev/null
+++ b/src/execution.cc
@@ -0,0 +1,698 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "codegen-inl.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/simulator-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/simulator-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/simulator-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+#include "debug.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+
+static Handle<Object> Invoke(bool construct,
+                             Handle<JSFunction> func,
+                             Handle<Object> receiver,
+                             int argc,
+                             Object*** args,
+                             bool* has_pending_exception) {
+  // Make sure we have a real function, not a boilerplate function.
+  ASSERT(!func->IsBoilerplate());
+
+  // Entering JavaScript.
+  VMState state(JS);
+
+  // Placeholder for return value.
+  Object* value = reinterpret_cast<Object*>(kZapValue);
+
+  typedef Object* (*JSEntryFunction)(
+    byte* entry,
+    Object* function,
+    Object* receiver,
+    int argc,
+    Object*** args);
+
+  Handle<Code> code;
+  if (construct) {
+    JSConstructEntryStub stub;
+    code = stub.GetCode();
+  } else {
+    JSEntryStub stub;
+    code = stub.GetCode();
+  }
+
+  // Convert calls on global objects to be calls on the global
+  // receiver instead to avoid having a 'this' pointer which refers
+  // directly to a global object.
+  if (receiver->IsGlobalObject()) {
+    Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+    receiver = Handle<JSObject>(global->global_receiver());
+  }
+
+  {
+    // Save and restore context around invocation and block the
+    // allocation of handles without explicit handle scopes.
+    SaveContext save;
+    NoHandleAllocation na;
+    JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
+
+    // Call the function through the right JS entry stub.
+    value = CALL_GENERATED_CODE(entry, func->code()->entry(), *func,
+                                *receiver, argc, args);
+  }
+
+#ifdef DEBUG
+  value->Verify();
+#endif
+
+  // Update the pending exception flag and return the value.
+  *has_pending_exception = value->IsException();
+  ASSERT(*has_pending_exception == Top::has_pending_exception());
+  if (*has_pending_exception) {
+    Top::ReportPendingMessages();
+    return Handle<Object>();
+  } else {
+    Top::clear_pending_message();
+  }
+
+  return Handle<Object>(value);
+}
+
+
+Handle<Object> Execution::Call(Handle<JSFunction> func,
+                               Handle<Object> receiver,
+                               int argc,
+                               Object*** args,
+                               bool* pending_exception) {
+  return Invoke(false, func, receiver, argc, args, pending_exception);
+}
+
+
+Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
+                              Object*** args, bool* pending_exception) {
+  return Invoke(true, func, Top::global(), argc, args, pending_exception);
+}
+
+
+Handle<Object> Execution::TryCall(Handle<JSFunction> func,
+                                  Handle<Object> receiver,
+                                  int argc,
+                                  Object*** args,
+                                  bool* caught_exception) {
+  // Enter a try-block while executing the JavaScript code. To avoid
+  // duplicate error printing it must be non-verbose.  Also, to avoid
+  // creating message objects during stack overflow we shouldn't
+  // capture messages.
+  v8::TryCatch catcher;
+  catcher.SetVerbose(false);
+  catcher.SetCaptureMessage(false);
+
+  Handle<Object> result = Invoke(false, func, receiver, argc, args,
+                                 caught_exception);
+
+  if (*caught_exception) {
+    ASSERT(catcher.HasCaught());
+    ASSERT(Top::has_pending_exception());
+    ASSERT(Top::external_caught_exception());
+    if (Top::pending_exception() == Heap::termination_exception()) {
+      result = Factory::termination_exception();
+    } else {
+      result = v8::Utils::OpenHandle(*catcher.Exception());
+    }
+    Top::OptionalRescheduleException(true);
+  }
+
+  ASSERT(!Top::has_pending_exception());
+  ASSERT(!Top::external_caught_exception());
+  return result;
+}
+
+
+Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
+  ASSERT(!object->IsJSFunction());
+
+  // If you return a function from here, it will be called when an
+  // attempt is made to call the given object as a function.
+
+  // Regular expressions can be called as functions in both Firefox
+  // and Safari so we allow it too.
+  if (object->IsJSRegExp()) {
+    Handle<String> exec = Factory::exec_symbol();
+    return Handle<Object>(object->GetProperty(*exec));
+  }
+
+  // Objects created through the API can have an instance-call handler
+  // that should be used when calling the object as a function.
+  if (object->IsHeapObject() &&
+      HeapObject::cast(*object)->map()->has_instance_call_handler()) {
+    return Handle<JSFunction>(
+        Top::global_context()->call_as_function_delegate());
+  }
+
+  return Factory::undefined_value();
+}
+
+
+Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
+  ASSERT(!object->IsJSFunction());
+
+  // If you return a function from here, it will be called when an
+  // attempt is made to call the given object as a constructor.
+
+  // Objects created through the API can have an instance-call handler
+  // that should be used when calling the object as a function.
+  if (object->IsHeapObject() &&
+      HeapObject::cast(*object)->map()->has_instance_call_handler()) {
+    return Handle<JSFunction>(
+        Top::global_context()->call_as_constructor_delegate());
+  }
+
+  return Factory::undefined_value();
+}
+
+
+// Static state for stack guards.
+StackGuard::ThreadLocal StackGuard::thread_local_;
+
+
+bool StackGuard::IsStackOverflow() {
+  ExecutionAccess access;
+  return (thread_local_.jslimit_ != kInterruptLimit &&
+          thread_local_.climit_ != kInterruptLimit);
+}
+
+
+void StackGuard::EnableInterrupts() {
+  ExecutionAccess access;
+  if (IsSet(access)) {
+    set_limits(kInterruptLimit, access);
+  }
+}
+
+
+void StackGuard::SetStackLimit(uintptr_t limit) {
+  ExecutionAccess access;
+  // If the current limits are special (eg due to a pending interrupt) then
+  // leave them alone.
+  uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
+  if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
+    thread_local_.jslimit_ = jslimit;
+    Heap::SetStackLimit(jslimit);
+  }
+  if (thread_local_.climit_ == thread_local_.initial_climit_) {
+    thread_local_.climit_ = limit;
+  }
+  thread_local_.initial_climit_ = limit;
+  thread_local_.initial_jslimit_ = jslimit;
+}
+
+
+void StackGuard::DisableInterrupts() {
+  ExecutionAccess access;
+  reset_limits(access);
+}
+
+
+bool StackGuard::IsSet(const ExecutionAccess& lock) {
+  return thread_local_.interrupt_flags_ != 0;
+}
+
+
+bool StackGuard::IsInterrupted() {
+  ExecutionAccess access;
+  return thread_local_.interrupt_flags_ & INTERRUPT;
+}
+
+
+void StackGuard::Interrupt() {
+  ExecutionAccess access;
+  thread_local_.interrupt_flags_ |= INTERRUPT;
+  set_limits(kInterruptLimit, access);
+}
+
+
+bool StackGuard::IsPreempted() {
+  ExecutionAccess access;
+  return thread_local_.interrupt_flags_ & PREEMPT;
+}
+
+
+void StackGuard::Preempt() {
+  ExecutionAccess access;
+  thread_local_.interrupt_flags_ |= PREEMPT;
+  set_limits(kInterruptLimit, access);
+}
+
+
+bool StackGuard::IsTerminateExecution() {
+  ExecutionAccess access;
+  return thread_local_.interrupt_flags_ & TERMINATE;
+}
+
+
+void StackGuard::TerminateExecution() {
+  ExecutionAccess access;
+  thread_local_.interrupt_flags_ |= TERMINATE;
+  set_limits(kInterruptLimit, access);
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+bool StackGuard::IsDebugBreak() {
+  ExecutionAccess access;
+  return thread_local_.interrupt_flags_ & DEBUGBREAK;
+}
+
+
+void StackGuard::DebugBreak() {
+  ExecutionAccess access;
+  thread_local_.interrupt_flags_ |= DEBUGBREAK;
+  set_limits(kInterruptLimit, access);
+}
+
+
+bool StackGuard::IsDebugCommand() {
+  ExecutionAccess access;
+  return thread_local_.interrupt_flags_ & DEBUGCOMMAND;
+}
+
+
+void StackGuard::DebugCommand() {
+  if (FLAG_debugger_auto_break) {
+    ExecutionAccess access;
+    thread_local_.interrupt_flags_ |= DEBUGCOMMAND;
+    set_limits(kInterruptLimit, access);
+  }
+}
+#endif
+
+void StackGuard::Continue(InterruptFlag after_what) {
+  ExecutionAccess access;
+  thread_local_.interrupt_flags_ &= ~static_cast<int>(after_what);
+  if (thread_local_.interrupt_flags_ == 0) {
+    reset_limits(access);
+  }
+}
+
+
+int StackGuard::ArchiveSpacePerThread() {
+  return sizeof(ThreadLocal);
+}
+
+
+char* StackGuard::ArchiveStackGuard(char* to) {
+  ExecutionAccess access;
+  memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+  ThreadLocal blank;
+  thread_local_ = blank;
+  return to + sizeof(ThreadLocal);
+}
+
+
+char* StackGuard::RestoreStackGuard(char* from) {
+  ExecutionAccess access;
+  memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+  Heap::SetStackLimit(thread_local_.jslimit_);
+  return from + sizeof(ThreadLocal);
+}
+
+
+static internal::Thread::LocalStorageKey stack_limit_key =
+    internal::Thread::CreateThreadLocalKey();
+
+
+void StackGuard::FreeThreadResources() {
+  Thread::SetThreadLocal(
+      stack_limit_key,
+      reinterpret_cast<void*>(thread_local_.initial_climit_));
+}
+
+
+void StackGuard::ThreadLocal::Clear() {
+  initial_jslimit_ = kIllegalLimit;
+  jslimit_ = kIllegalLimit;
+  initial_climit_ = kIllegalLimit;
+  climit_ = kIllegalLimit;
+  nesting_ = 0;
+  postpone_interrupts_nesting_ = 0;
+  interrupt_flags_ = 0;
+  Heap::SetStackLimit(kIllegalLimit);
+}
+
+
+void StackGuard::ThreadLocal::Initialize() {
+  if (initial_climit_ == kIllegalLimit) {
+    // Takes the address of the limit variable in order to find out where
+    // the top of stack is right now.
+    intptr_t limit = reinterpret_cast<intptr_t>(&limit) - kLimitSize;
+    initial_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+    jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+    initial_climit_ = limit;
+    climit_ = limit;
+    Heap::SetStackLimit(SimulatorStack::JsLimitFromCLimit(limit));
+  }
+  nesting_ = 0;
+  postpone_interrupts_nesting_ = 0;
+  interrupt_flags_ = 0;
+}
+
+
+void StackGuard::ClearThread(const ExecutionAccess& lock) {
+  thread_local_.Clear();
+}
+
+
+void StackGuard::InitThread(const ExecutionAccess& lock) {
+  thread_local_.Initialize();
+  void* stored_limit = Thread::GetThreadLocal(stack_limit_key);
+  // You should hold the ExecutionAccess lock when you call this.
+  if (stored_limit != NULL) {
+    StackGuard::SetStackLimit(reinterpret_cast<intptr_t>(stored_limit));
+  }
+}
+
+
+// --- C a l l s   t o   n a t i v e s ---
+
+#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
+  do {                                                              \
+    Object** args[argc] = argv;                                     \
+    ASSERT(has_pending_exception != NULL);                          \
+    return Call(Top::name##_fun(), Top::builtins(), argc, args,     \
+                has_pending_exception);                             \
+  } while (false)
+
+
+Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
+  // See the similar code in runtime.js:ToBoolean.
+  if (obj->IsBoolean()) return obj;
+  bool result = true;
+  if (obj->IsString()) {
+    result = Handle<String>::cast(obj)->length() != 0;
+  } else if (obj->IsNull() || obj->IsUndefined()) {
+    result = false;
+  } else if (obj->IsNumber()) {
+    double value = obj->Number();
+    result = !((value == 0) || isnan(value));
+  }
+  return Handle<Object>(Heap::ToBoolean(result));
+}
+
+
+Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
+  RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
+  RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
+  RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
+  if (obj->IsJSObject()) return obj;
+  RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
+  RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
+  RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
+  RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc);
+}
+
+
+Handle<Object> Execution::NewDate(double time, bool* exc) {
+  Handle<Object> time_obj = Factory::NewNumber(time);
+  RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
+}
+
+
+#undef RETURN_NATIVE_CALL
+
+
+Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
+  int int_index = static_cast<int>(index);
+  if (int_index < 0 || int_index >= string->length()) {
+    return Factory::undefined_value();
+  }
+
+  Handle<Object> char_at =
+      GetProperty(Top::builtins(), Factory::char_at_symbol());
+  if (!char_at->IsJSFunction()) {
+    return Factory::undefined_value();
+  }
+
+  bool caught_exception;
+  Handle<Object> index_object = Factory::NewNumberFromInt(int_index);
+  Object** index_arg[] = { index_object.location() };
+  Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
+                                  string,
+                                  ARRAY_SIZE(index_arg),
+                                  index_arg,
+                                  &caught_exception);
+  if (caught_exception) {
+    return Factory::undefined_value();
+  }
+  return result;
+}
+
+
+Handle<JSFunction> Execution::InstantiateFunction(
+    Handle<FunctionTemplateInfo> data, bool* exc) {
+  // Fast case: see if the function has already been instantiated
+  int serial_number = Smi::cast(data->serial_number())->value();
+  Object* elm =
+      Top::global_context()->function_cache()->GetElement(serial_number);
+  if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
+  // The function has not yet been instantiated in this context; do it.
+  Object** args[1] = { Handle<Object>::cast(data).location() };
+  Handle<Object> result =
+      Call(Top::instantiate_fun(), Top::builtins(), 1, args, exc);
+  if (*exc) return Handle<JSFunction>::null();
+  return Handle<JSFunction>::cast(result);
+}
+
+
+Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
+                                              bool* exc) {
+  if (data->property_list()->IsUndefined() &&
+      !data->constructor()->IsUndefined()) {
+    // Initialization to make gcc happy.
+    Object* result = NULL;
+    {
+      HandleScope scope;
+      Handle<FunctionTemplateInfo> cons_template =
+          Handle<FunctionTemplateInfo>(
+              FunctionTemplateInfo::cast(data->constructor()));
+      Handle<JSFunction> cons = InstantiateFunction(cons_template, exc);
+      if (*exc) return Handle<JSObject>::null();
+      Handle<Object> value = New(cons, 0, NULL, exc);
+      if (*exc) return Handle<JSObject>::null();
+      result = *value;
+    }
+    ASSERT(!*exc);
+    return Handle<JSObject>(JSObject::cast(result));
+  } else {
+    Object** args[1] = { Handle<Object>::cast(data).location() };
+    Handle<Object> result =
+        Call(Top::instantiate_fun(), Top::builtins(), 1, args, exc);
+    if (*exc) return Handle<JSObject>::null();
+    return Handle<JSObject>::cast(result);
+  }
+}
+
+
+void Execution::ConfigureInstance(Handle<Object> instance,
+                                  Handle<Object> instance_template,
+                                  bool* exc) {
+  Object** args[2] = { instance.location(), instance_template.location() };
+  Execution::Call(Top::configure_instance_fun(), Top::builtins(), 2, args, exc);
+}
+
+
+Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
+                                            Handle<JSFunction> fun,
+                                            Handle<Object> pos,
+                                            Handle<Object> is_global) {
+  const int argc = 4;
+  Object** args[argc] = { recv.location(),
+                          Handle<Object>::cast(fun).location(),
+                          pos.location(),
+                          is_global.location() };
+  bool caught_exception = false;
+  Handle<Object> result = TryCall(Top::get_stack_trace_line_fun(),
+                                  Top::builtins(), argc, args,
+                                  &caught_exception);
+  if (caught_exception || !result->IsString()) return Factory::empty_symbol();
+  return Handle<String>::cast(result);
+}
+
+
+static Object* RuntimePreempt() {
+  // Clear the preempt request flag.
+  StackGuard::Continue(PREEMPT);
+
+  ContextSwitcher::PreemptionReceived();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  if (Debug::InDebugger()) {
+    // If currently in the debugger don't do any actual preemption but record
+    // that preemption occoured while in the debugger.
+    Debug::PreemptionWhileInDebugger();
+  } else {
+    // Perform preemption.
+    v8::Unlocker unlocker;
+    Thread::YieldCPU();
+  }
+#else
+  // Perform preemption.
+  v8::Unlocker unlocker;
+  Thread::YieldCPU();
+#endif
+
+  return Heap::undefined_value();
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+Object* Execution::DebugBreakHelper() {
+  // Just continue if breaks are disabled.
+  if (Debug::disable_break()) {
+    return Heap::undefined_value();
+  }
+
+  {
+    JavaScriptFrameIterator it;
+    ASSERT(!it.done());
+    Object* fun = it.frame()->function();
+    if (fun && fun->IsJSFunction()) {
+      // Don't stop in builtin functions.
+      if (JSFunction::cast(fun)->IsBuiltin()) {
+        return Heap::undefined_value();
+      }
+      GlobalObject* global = JSFunction::cast(fun)->context()->global();
+      // Don't stop in debugger functions.
+      if (Debug::IsDebugGlobal(global)) {
+        return Heap::undefined_value();
+      }
+    }
+  }
+
+  // Collect the break state before clearing the flags.
+  bool debug_command_only =
+      StackGuard::IsDebugCommand() && !StackGuard::IsDebugBreak();
+
+  // Clear the debug request flags.
+  StackGuard::Continue(DEBUGBREAK);
+  StackGuard::Continue(DEBUGCOMMAND);
+
+  HandleScope scope;
+  // Enter the debugger. Just continue if we fail to enter the debugger.
+  EnterDebugger debugger;
+  if (debugger.FailedToEnter()) {
+    return Heap::undefined_value();
+  }
+
+  // Notify the debug event listeners. Indicate auto continue if the break was
+  // a debug command break.
+  Debugger::OnDebugBreak(Factory::undefined_value(), debug_command_only);
+
+  // Return to continue execution.
+  return Heap::undefined_value();
+}
+#endif
+
+Object* Execution::HandleStackGuardInterrupt() {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  if (StackGuard::IsDebugBreak() || StackGuard::IsDebugCommand()) {
+    DebugBreakHelper();
+  }
+#endif
+  if (StackGuard::IsPreempted()) RuntimePreempt();
+  if (StackGuard::IsTerminateExecution()) {
+    StackGuard::Continue(TERMINATE);
+    return Top::TerminateExecution();
+  }
+  if (StackGuard::IsInterrupted()) {
+    // interrupt
+    StackGuard::Continue(INTERRUPT);
+    return Top::StackOverflow();
+  }
+  return Heap::undefined_value();
+}
+
+// --- G C   E x t e n s i o n ---
+
+const char* GCExtension::kSource = "native function gc();";
+
+
+v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
+    v8::Handle<v8::String> str) {
+  return v8::FunctionTemplate::New(GCExtension::GC);
+}
+
+
+v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
+  // All allocation spaces other than NEW_SPACE have the same effect.
+  Heap::CollectAllGarbage(false);
+  return v8::Undefined();
+}
+
+
+static GCExtension kGCExtension;
+v8::DeclareExtension kGCExtensionDeclaration(&kGCExtension);
+
+} }  // namespace v8::internal
diff --git a/src/execution.h b/src/execution.h
new file mode 100644
index 0000000..55307f7
--- /dev/null
+++ b/src/execution.h
@@ -0,0 +1,296 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXECUTION_H_
+#define V8_EXECUTION_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Flag used to set the interrupt causes.
+enum InterruptFlag {
+  INTERRUPT = 1 << 0,
+  DEBUGBREAK = 1 << 1,
+  DEBUGCOMMAND = 1 << 2,
+  PREEMPT = 1 << 3,
+  TERMINATE = 1 << 4
+};
+
+class Execution : public AllStatic {
+ public:
+  // Call a function, the caller supplies a receiver and an array
+  // of arguments. Arguments are Object* type. After function returns,
+  // pointers in 'args' might be invalid.
+  //
+  // *pending_exception tells whether the invoke resulted in
+  // a pending exception.
+  //
+  static Handle<Object> Call(Handle<JSFunction> func,
+                             Handle<Object> receiver,
+                             int argc,
+                             Object*** args,
+                             bool* pending_exception);
+
+  // Construct object from function, the caller supplies an array of
+  // arguments. Arguments are Object* type. After function returns,
+  // pointers in 'args' might be invalid.
+  //
+  // *pending_exception tells whether the invoke resulted in
+  // a pending exception.
+  //
+  static Handle<Object> New(Handle<JSFunction> func,
+                            int argc,
+                            Object*** args,
+                            bool* pending_exception);
+
+  // Call a function, just like Call(), but make sure to silently catch
+  // any thrown exceptions. The return value is either the result of
+  // calling the function (if caught exception is false) or the exception
+  // that occurred (if caught exception is true).
+  static Handle<Object> TryCall(Handle<JSFunction> func,
+                                Handle<Object> receiver,
+                                int argc,
+                                Object*** args,
+                                bool* caught_exception);
+
+  // ECMA-262 9.2
+  static Handle<Object> ToBoolean(Handle<Object> obj);
+
+  // ECMA-262 9.3
+  static Handle<Object> ToNumber(Handle<Object> obj, bool* exc);
+
+  // ECMA-262 9.4
+  static Handle<Object> ToInteger(Handle<Object> obj, bool* exc);
+
+  // ECMA-262 9.5
+  static Handle<Object> ToInt32(Handle<Object> obj, bool* exc);
+
+  // ECMA-262 9.6
+  static Handle<Object> ToUint32(Handle<Object> obj, bool* exc);
+
+  // ECMA-262 9.8
+  static Handle<Object> ToString(Handle<Object> obj, bool* exc);
+
+  // ECMA-262 9.8
+  static Handle<Object> ToDetailString(Handle<Object> obj, bool* exc);
+
+  // ECMA-262 9.9
+  static Handle<Object> ToObject(Handle<Object> obj, bool* exc);
+
+  // Create a new date object from 'time'.
+  static Handle<Object> NewDate(double time, bool* exc);
+
+  // Used to implement [] notation on strings (calls JS code)
+  static Handle<Object> CharAt(Handle<String> str, uint32_t index);
+
+  static Handle<Object> GetFunctionFor();
+  static Handle<JSFunction> InstantiateFunction(
+      Handle<FunctionTemplateInfo> data, bool* exc);
+  static Handle<JSObject> InstantiateObject(Handle<ObjectTemplateInfo> data,
+                                            bool* exc);
+  static void ConfigureInstance(Handle<Object> instance,
+                                Handle<Object> data,
+                                bool* exc);
+  static Handle<String> GetStackTraceLine(Handle<Object> recv,
+                                          Handle<JSFunction> fun,
+                                          Handle<Object> pos,
+                                          Handle<Object> is_global);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  static Object* DebugBreakHelper();
+#endif
+
+  // If the stack guard is triggered, but it is not an actual
+  // stack overflow, then handle the interruption accordingly.
+  static Object* HandleStackGuardInterrupt();
+
+  // Get a function delegate (or undefined) for the given non-function
+  // object. Used for support calling objects as functions.
+  static Handle<Object> GetFunctionDelegate(Handle<Object> object);
+
+  // Get a function delegate (or undefined) for the given non-function
+  // object. Used for support calling objects as constructors.
+  static Handle<Object> GetConstructorDelegate(Handle<Object> object);
+};
+
+
+class ExecutionAccess;
+
+
+// StackGuard contains the handling of the limits that are used to limit the
+// number of nested invocations of JavaScript and the stack size used in each
+// invocation.
+class StackGuard : public AllStatic {
+ public:
+  // Pass the address beyond which the stack should not grow.  The stack
+  // is assumed to grow downwards.
+  static void SetStackLimit(uintptr_t limit);
+
+  static Address address_of_jslimit() {
+    return reinterpret_cast<Address>(&thread_local_.jslimit_);
+  }
+
+  // Threading support.
+  static char* ArchiveStackGuard(char* to);
+  static char* RestoreStackGuard(char* from);
+  static int ArchiveSpacePerThread();
+  static void FreeThreadResources();
+  // Sets up the default stack guard for this thread if it has not
+  // already been set up.
+  static void InitThread(const ExecutionAccess& lock);
+  // Clears the stack guard for this thread so it does not look as if
+  // it has been set up.
+  static void ClearThread(const ExecutionAccess& lock);
+
+  static bool IsStackOverflow();
+  static bool IsPreempted();
+  static void Preempt();
+  static bool IsInterrupted();
+  static void Interrupt();
+  static bool IsTerminateExecution();
+  static void TerminateExecution();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  static bool IsDebugBreak();
+  static void DebugBreak();
+  static bool IsDebugCommand();
+  static void DebugCommand();
+#endif
+  static void Continue(InterruptFlag after_what);
+
+  // This provides an asynchronous read of the stack limit for the current
+  // thread.  There are no locks protecting this, but it is assumed that you
+  // have the global V8 lock if you are using multiple V8 threads.
+  static uintptr_t climit() {
+    return thread_local_.climit_;
+  }
+
+  static uintptr_t jslimit() {
+    return thread_local_.jslimit_;
+  }
+
+ private:
+  // You should hold the ExecutionAccess lock when calling this method.
+  static bool IsSet(const ExecutionAccess& lock);
+
+  // You should hold the ExecutionAccess lock when calling this method.
+  static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
+    Heap::SetStackLimit(value);
+    thread_local_.jslimit_ = value;
+    thread_local_.climit_ = value;
+  }
+
+  // Reset limits to initial values. For example after handling interrupt.
+  // You should hold the ExecutionAccess lock when calling this method.
+  static void reset_limits(const ExecutionAccess& lock) {
+    thread_local_.jslimit_ = thread_local_.initial_jslimit_;
+    Heap::SetStackLimit(thread_local_.jslimit_);
+    thread_local_.climit_ = thread_local_.initial_climit_;
+  }
+
+  // Enable or disable interrupts.
+  static void EnableInterrupts();
+  static void DisableInterrupts();
+
+  static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
+#ifdef V8_TARGET_ARCH_X64
+  static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
+  static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
+#else
+  static const uintptr_t kInterruptLimit = 0xfffffffe;
+  static const uintptr_t kIllegalLimit = 0xfffffff8;
+#endif
+
+  class ThreadLocal {
+   public:
+    ThreadLocal() { Clear(); }
+    // You should hold the ExecutionAccess lock when you call Initialize or
+    // Clear.
+    void Initialize();
+    void Clear();
+    uintptr_t initial_jslimit_;
+    uintptr_t jslimit_;
+    uintptr_t initial_climit_;
+    uintptr_t climit_;
+    int nesting_;
+    int postpone_interrupts_nesting_;
+    int interrupt_flags_;
+  };
+
+  static ThreadLocal thread_local_;
+
+  friend class StackLimitCheck;
+  friend class PostponeInterruptsScope;
+};
+
+
+// Support for checking for stack-overflows in C++ code.
+class StackLimitCheck BASE_EMBEDDED {
+ public:
+  bool HasOverflowed() const {
+    // Stack has overflowed in C++ code only if stack pointer exceeds the C++
+    // stack guard and the limits are not set to interrupt values.
+    // TODO(214): Stack overflows are ignored if a interrupt is pending. This
+    // code should probably always use the initial C++ limit.
+    return (reinterpret_cast<uintptr_t>(this) < StackGuard::climit()) &&
+           StackGuard::IsStackOverflow();
+  }
+};
+
+
+// Support for temporarily postponing interrupts. When the outermost
+// postpone scope is left the interrupts will be re-enabled and any
+// interrupts that occurred while in the scope will be taken into
+// account.
+class PostponeInterruptsScope BASE_EMBEDDED {
+ public:
+  PostponeInterruptsScope() {
+    StackGuard::thread_local_.postpone_interrupts_nesting_++;
+    StackGuard::DisableInterrupts();
+  }
+
+  ~PostponeInterruptsScope() {
+    if (--StackGuard::thread_local_.postpone_interrupts_nesting_ == 0) {
+      StackGuard::EnableInterrupts();
+    }
+  }
+};
+
+
+class GCExtension : public v8::Extension {
+ public:
+  GCExtension() : v8::Extension("v8/gc", kSource) {}
+  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+      v8::Handle<v8::String> name);
+  static v8::Handle<v8::Value> GC(const v8::Arguments& args);
+ private:
+  static const char* kSource;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXECUTION_H_
diff --git a/src/factory.cc b/src/factory.cc
new file mode 100644
index 0000000..622055c
--- /dev/null
+++ b/src/factory.cc
@@ -0,0 +1,943 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "debug.h"
+#include "execution.h"
+#include "factory.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
+  ASSERT(0 <= size);
+  CALL_HEAP_FUNCTION(Heap::AllocateFixedArray(size, pretenure), FixedArray);
+}
+
+
+Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size) {
+  ASSERT(0 <= size);
+  CALL_HEAP_FUNCTION(Heap::AllocateFixedArrayWithHoles(size), FixedArray);
+}
+
+
+Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) {
+  ASSERT(0 <= at_least_space_for);
+  CALL_HEAP_FUNCTION(StringDictionary::Allocate(at_least_space_for),
+                     StringDictionary);
+}
+
+
+Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
+  ASSERT(0 <= at_least_space_for);
+  CALL_HEAP_FUNCTION(NumberDictionary::Allocate(at_least_space_for),
+                     NumberDictionary);
+}
+
+
+Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
+  ASSERT(0 <= number_of_descriptors);
+  CALL_HEAP_FUNCTION(DescriptorArray::Allocate(number_of_descriptors),
+                     DescriptorArray);
+}
+
+
+// Symbols are created in the old generation (data space).
+Handle<String> Factory::LookupSymbol(Vector<const char> string) {
+  CALL_HEAP_FUNCTION(Heap::LookupSymbol(string), String);
+}
+
+
+Handle<String> Factory::NewStringFromAscii(Vector<const char> string,
+                                           PretenureFlag pretenure) {
+  CALL_HEAP_FUNCTION(Heap::AllocateStringFromAscii(string, pretenure), String);
+}
+
+Handle<String> Factory::NewStringFromUtf8(Vector<const char> string,
+                                          PretenureFlag pretenure) {
+  CALL_HEAP_FUNCTION(Heap::AllocateStringFromUtf8(string, pretenure), String);
+}
+
+
+Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
+                                             PretenureFlag pretenure) {
+  CALL_HEAP_FUNCTION(Heap::AllocateStringFromTwoByte(string, pretenure),
+                     String);
+}
+
+
+Handle<String> Factory::NewRawTwoByteString(int length,
+                                            PretenureFlag pretenure) {
+  CALL_HEAP_FUNCTION(Heap::AllocateRawTwoByteString(length, pretenure), String);
+}
+
+
+Handle<String> Factory::NewConsString(Handle<String> first,
+                                      Handle<String> second) {
+  CALL_HEAP_FUNCTION(Heap::AllocateConsString(*first, *second), String);
+}
+
+
+Handle<String> Factory::NewStringSlice(Handle<String> str,
+                                       int begin,
+                                       int end) {
+  CALL_HEAP_FUNCTION(str->Slice(begin, end), String);
+}
+
+
+Handle<String> Factory::NewExternalStringFromAscii(
+    ExternalAsciiString::Resource* resource) {
+  CALL_HEAP_FUNCTION(Heap::AllocateExternalStringFromAscii(resource), String);
+}
+
+
+Handle<String> Factory::NewExternalStringFromTwoByte(
+    ExternalTwoByteString::Resource* resource) {
+  CALL_HEAP_FUNCTION(Heap::AllocateExternalStringFromTwoByte(resource), String);
+}
+
+
+Handle<Context> Factory::NewGlobalContext() {
+  CALL_HEAP_FUNCTION(Heap::AllocateGlobalContext(), Context);
+}
+
+
+Handle<Context> Factory::NewFunctionContext(int length,
+                                            Handle<JSFunction> closure) {
+  CALL_HEAP_FUNCTION(Heap::AllocateFunctionContext(length, *closure), Context);
+}
+
+
+Handle<Context> Factory::NewWithContext(Handle<Context> previous,
+                                        Handle<JSObject> extension,
+                                        bool is_catch_context) {
+  CALL_HEAP_FUNCTION(Heap::AllocateWithContext(*previous,
+                                               *extension,
+                                               is_catch_context),
+                     Context);
+}
+
+
+Handle<Struct> Factory::NewStruct(InstanceType type) {
+  CALL_HEAP_FUNCTION(Heap::AllocateStruct(type), Struct);
+}
+
+
+Handle<AccessorInfo> Factory::NewAccessorInfo() {
+  Handle<AccessorInfo> info =
+      Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE));
+  info->set_flag(0);  // Must clear the flag, it was initialized as undefined.
+  return info;
+}
+
+
+Handle<Script> Factory::NewScript(Handle<String> source) {
+  // Generate id for this script.
+  int id;
+  if (Heap::last_script_id()->IsUndefined()) {
+    // Script ids start from one.
+    id = 1;
+  } else {
+    // Increment id, wrap when positive smi is exhausted.
+    id = Smi::cast(Heap::last_script_id())->value();
+    id++;
+    if (!Smi::IsValid(id)) {
+      id = 0;
+    }
+  }
+  Heap::SetLastScriptId(Smi::FromInt(id));
+
+  // Create and initialize script object.
+  Handle<Proxy> wrapper = Factory::NewProxy(0, TENURED);
+  Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
+  script->set_source(*source);
+  script->set_name(Heap::undefined_value());
+  script->set_id(Heap::last_script_id());
+  script->set_line_offset(Smi::FromInt(0));
+  script->set_column_offset(Smi::FromInt(0));
+  script->set_data(Heap::undefined_value());
+  script->set_context_data(Heap::undefined_value());
+  script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
+  script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
+  script->set_wrapper(*wrapper);
+  script->set_line_ends(Heap::undefined_value());
+  script->set_eval_from_function(Heap::undefined_value());
+  script->set_eval_from_instructions_offset(Smi::FromInt(0));
+
+  return script;
+}
+
+
+Handle<Proxy> Factory::NewProxy(Address addr, PretenureFlag pretenure) {
+  CALL_HEAP_FUNCTION(Heap::AllocateProxy(addr, pretenure), Proxy);
+}
+
+
+Handle<Proxy> Factory::NewProxy(const AccessorDescriptor* desc) {
+  return NewProxy((Address) desc, TENURED);
+}
+
+
+Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
+  ASSERT(0 <= length);
+  CALL_HEAP_FUNCTION(Heap::AllocateByteArray(length, pretenure), ByteArray);
+}
+
+
+Handle<PixelArray> Factory::NewPixelArray(int length,
+                                          uint8_t* external_pointer,
+                                          PretenureFlag pretenure) {
+  ASSERT(0 <= length);
+  CALL_HEAP_FUNCTION(Heap::AllocatePixelArray(length,
+                                              external_pointer,
+                                              pretenure), PixelArray);
+}
+
+
+Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
+  CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
+}
+
+
+Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
+  CALL_HEAP_FUNCTION(Heap::AllocateFunctionPrototype(*function), JSObject);
+}
+
+
+Handle<Map> Factory::CopyMapDropDescriptors(Handle<Map> src) {
+  CALL_HEAP_FUNCTION(src->CopyDropDescriptors(), Map);
+}
+
+
+Handle<Map> Factory::CopyMap(Handle<Map> src,
+                             int extra_inobject_properties) {
+  Handle<Map> copy = CopyMapDropDescriptors(src);
+  // Check that we do not overflow the instance size when adding the
+  // extra inobject properties.
+  int instance_size_delta = extra_inobject_properties * kPointerSize;
+  int max_instance_size_delta =
+      JSObject::kMaxInstanceSize - copy->instance_size();
+  if (instance_size_delta > max_instance_size_delta) {
+    // If the instance size overflows, we allocate as many properties
+    // as we can as inobject properties.
+    instance_size_delta = max_instance_size_delta;
+    extra_inobject_properties = max_instance_size_delta >> kPointerSizeLog2;
+  }
+  // Adjust the map with the extra inobject properties.
+  int inobject_properties =
+      copy->inobject_properties() + extra_inobject_properties;
+  copy->set_inobject_properties(inobject_properties);
+  copy->set_unused_property_fields(inobject_properties);
+  copy->set_instance_size(copy->instance_size() + instance_size_delta);
+  return copy;
+}
+
+Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
+  CALL_HEAP_FUNCTION(src->CopyDropTransitions(), Map);
+}
+
+
+Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
+  CALL_HEAP_FUNCTION(array->Copy(), FixedArray);
+}
+
+
+Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate(
+    Handle<JSFunction> boilerplate,
+    Handle<Map> function_map) {
+  ASSERT(boilerplate->IsBoilerplate());
+  ASSERT(!boilerplate->has_initial_map());
+  ASSERT(!boilerplate->has_prototype());
+  ASSERT(boilerplate->properties() == Heap::empty_fixed_array());
+  ASSERT(boilerplate->elements() == Heap::empty_fixed_array());
+  CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map,
+                                            boilerplate->shared(),
+                                            Heap::the_hole_value()),
+                     JSFunction);
+}
+
+
+Handle<JSFunction> Factory::NewFunctionFromBoilerplate(
+    Handle<JSFunction> boilerplate,
+    Handle<Context> context) {
+  Handle<JSFunction> result =
+      BaseNewFunctionFromBoilerplate(boilerplate, Top::function_map());
+  result->set_context(*context);
+  int number_of_literals = boilerplate->NumberOfLiterals();
+  Handle<FixedArray> literals =
+      Factory::NewFixedArray(number_of_literals, TENURED);
+  if (number_of_literals > 0) {
+    // Store the object, regexp and array functions in the literals
+    // array prefix.  These functions will be used when creating
+    // object, regexp and array literals in this function.
+    literals->set(JSFunction::kLiteralGlobalContextIndex,
+                  context->global_context());
+  }
+  result->set_literals(*literals);
+  ASSERT(!result->IsBoilerplate());
+  return result;
+}
+
+
+Handle<Object> Factory::NewNumber(double value,
+                                  PretenureFlag pretenure) {
+  CALL_HEAP_FUNCTION(Heap::NumberFromDouble(value, pretenure), Object);
+}
+
+
+Handle<Object> Factory::NewNumberFromInt(int value) {
+  CALL_HEAP_FUNCTION(Heap::NumberFromInt32(value), Object);
+}
+
+
+Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
+  CALL_HEAP_FUNCTION(Heap::NumberFromUint32(value), Object);
+}
+
+
+Handle<JSObject> Factory::NewNeanderObject() {
+  CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(Heap::neander_map()),
+                     JSObject);
+}
+
+
+Handle<Object> Factory::NewTypeError(const char* type,
+                                     Vector< Handle<Object> > args) {
+  return NewError("MakeTypeError", type, args);
+}
+
+
+Handle<Object> Factory::NewTypeError(Handle<String> message) {
+  return NewError("$TypeError", message);
+}
+
+
+Handle<Object> Factory::NewRangeError(const char* type,
+                                      Vector< Handle<Object> > args) {
+  return NewError("MakeRangeError", type, args);
+}
+
+
+Handle<Object> Factory::NewRangeError(Handle<String> message) {
+  return NewError("$RangeError", message);
+}
+
+
+Handle<Object> Factory::NewSyntaxError(const char* type, Handle<JSArray> args) {
+  return NewError("MakeSyntaxError", type, args);
+}
+
+
+Handle<Object> Factory::NewSyntaxError(Handle<String> message) {
+  return NewError("$SyntaxError", message);
+}
+
+
+Handle<Object> Factory::NewReferenceError(const char* type,
+                                          Vector< Handle<Object> > args) {
+  return NewError("MakeReferenceError", type, args);
+}
+
+
+Handle<Object> Factory::NewReferenceError(Handle<String> message) {
+  return NewError("$ReferenceError", message);
+}
+
+
+Handle<Object> Factory::NewError(const char* maker, const char* type,
+    Vector< Handle<Object> > args) {
+  v8::HandleScope scope;  // Instantiate a closeable HandleScope for EscapeFrom.
+  Handle<FixedArray> array = Factory::NewFixedArray(args.length());
+  for (int i = 0; i < args.length(); i++) {
+    array->set(i, *args[i]);
+  }
+  Handle<JSArray> object = Factory::NewJSArrayWithElements(array);
+  Handle<Object> result = NewError(maker, type, object);
+  return result.EscapeFrom(&scope);
+}
+
+
+Handle<Object> Factory::NewEvalError(const char* type,
+                                     Vector< Handle<Object> > args) {
+  return NewError("MakeEvalError", type, args);
+}
+
+
+Handle<Object> Factory::NewError(const char* type,
+                                 Vector< Handle<Object> > args) {
+  return NewError("MakeError", type, args);
+}
+
+
+Handle<Object> Factory::NewError(const char* maker,
+                                 const char* type,
+                                 Handle<JSArray> args) {
+  Handle<String> make_str = Factory::LookupAsciiSymbol(maker);
+  Handle<Object> fun_obj(Top::builtins()->GetProperty(*make_str));
+  // If the builtins haven't been properly configured yet this error
+  // constructor may not have been defined.  Bail out.
+  if (!fun_obj->IsJSFunction())
+    return Factory::undefined_value();
+  Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
+  Handle<Object> type_obj = Factory::LookupAsciiSymbol(type);
+  Object** argv[2] = { type_obj.location(),
+                       Handle<Object>::cast(args).location() };
+
+  // Invoke the JavaScript factory method. If an exception is thrown while
+  // running the factory method, use the exception as the result.
+  bool caught_exception;
+  Handle<Object> result = Execution::TryCall(fun,
+                                             Top::builtins(),
+                                             2,
+                                             argv,
+                                             &caught_exception);
+  return result;
+}
+
+
+Handle<Object> Factory::NewError(Handle<String> message) {
+  return NewError("$Error", message);
+}
+
+
+Handle<Object> Factory::NewError(const char* constructor,
+                                 Handle<String> message) {
+  Handle<String> constr = Factory::LookupAsciiSymbol(constructor);
+  Handle<JSFunction> fun =
+      Handle<JSFunction>(
+          JSFunction::cast(
+              Top::builtins()->GetProperty(*constr)));
+  Object** argv[1] = { Handle<Object>::cast(message).location() };
+
+  // Invoke the JavaScript factory method. If an exception is thrown while
+  // running the factory method, use the exception as the result.
+  bool caught_exception;
+  Handle<Object> result = Execution::TryCall(fun,
+                                             Top::builtins(),
+                                             1,
+                                             argv,
+                                             &caught_exception);
+  return result;
+}
+
+
+Handle<JSFunction> Factory::NewFunction(Handle<String> name,
+                                        InstanceType type,
+                                        int instance_size,
+                                        Handle<Code> code,
+                                        bool force_initial_map) {
+  // Allocate the function
+  Handle<JSFunction> function = NewFunction(name, the_hole_value());
+  function->set_code(*code);
+
+  if (force_initial_map ||
+      type != JS_OBJECT_TYPE ||
+      instance_size != JSObject::kHeaderSize) {
+    Handle<Map> initial_map = NewMap(type, instance_size);
+    Handle<JSObject> prototype = NewFunctionPrototype(function);
+    initial_map->set_prototype(*prototype);
+    function->set_initial_map(*initial_map);
+    initial_map->set_constructor(*function);
+  } else {
+    ASSERT(!function->has_initial_map());
+    ASSERT(!function->has_prototype());
+  }
+
+  return function;
+}
+
+
+Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name,
+                                                   int number_of_literals,
+                                                   bool contains_array_literal,
+                                                   Handle<Code> code) {
+  Handle<JSFunction> function = NewFunctionBoilerplate(name);
+  function->set_code(*code);
+  int literals_array_size = number_of_literals;
+  // If the function contains object, regexp or array literals,
+  // allocate extra space for a literals array prefix containing the
+  // object, regexp and array constructor functions.
+  if (number_of_literals > 0 || contains_array_literal) {
+    literals_array_size += JSFunction::kLiteralsPrefixSize;
+  }
+  Handle<FixedArray> literals =
+      Factory::NewFixedArray(literals_array_size, TENURED);
+  function->set_literals(*literals);
+  ASSERT(!function->has_initial_map());
+  ASSERT(!function->has_prototype());
+  return function;
+}
+
+
+Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name) {
+  Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
+  CALL_HEAP_FUNCTION(Heap::AllocateFunction(Heap::boilerplate_function_map(),
+                                            *shared,
+                                            Heap::the_hole_value()),
+                     JSFunction);
+}
+
+
+Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
+                                                     InstanceType type,
+                                                     int instance_size,
+                                                     Handle<JSObject> prototype,
+                                                     Handle<Code> code,
+                                                     bool force_initial_map) {
+  // Allocate the function
+  Handle<JSFunction> function = NewFunction(name, prototype);
+
+  function->set_code(*code);
+
+  if (force_initial_map ||
+      type != JS_OBJECT_TYPE ||
+      instance_size != JSObject::kHeaderSize) {
+    Handle<Map> initial_map = NewMap(type, instance_size);
+    function->set_initial_map(*initial_map);
+    initial_map->set_constructor(*function);
+  }
+
+  // Set function.prototype and give the prototype a constructor
+  // property that refers to the function.
+  SetPrototypeProperty(function, prototype);
+  SetProperty(prototype, Factory::constructor_symbol(), function, DONT_ENUM);
+  return function;
+}
+
+
+Handle<Code> Factory::NewCode(const CodeDesc& desc,
+                              ZoneScopeInfo* sinfo,
+                              Code::Flags flags,
+                              Handle<Object> self_ref) {
+  CALL_HEAP_FUNCTION(Heap::CreateCode(desc, sinfo, flags, self_ref), Code);
+}
+
+
+Handle<Code> Factory::CopyCode(Handle<Code> code) {
+  CALL_HEAP_FUNCTION(Heap::CopyCode(*code), Code);
+}
+
+
+static inline Object* DoCopyInsert(DescriptorArray* array,
+                                   String* key,
+                                   Object* value,
+                                   PropertyAttributes attributes) {
+  CallbacksDescriptor desc(key, value, attributes);
+  Object* obj = array->CopyInsert(&desc, REMOVE_TRANSITIONS);
+  return obj;
+}
+
+
+// Allocate the new array.
+Handle<DescriptorArray> Factory::CopyAppendProxyDescriptor(
+    Handle<DescriptorArray> array,
+    Handle<String> key,
+    Handle<Object> value,
+    PropertyAttributes attributes) {
+  CALL_HEAP_FUNCTION(DoCopyInsert(*array, *key, *value, attributes),
+                     DescriptorArray);
+}
+
+
+Handle<String> Factory::SymbolFromString(Handle<String> value) {
+  CALL_HEAP_FUNCTION(Heap::LookupSymbol(*value), String);
+}
+
+
+Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
+    Handle<DescriptorArray> array,
+    Handle<Object> descriptors) {
+  v8::NeanderArray callbacks(descriptors);
+  int nof_callbacks = callbacks.length();
+  Handle<DescriptorArray> result =
+      NewDescriptorArray(array->number_of_descriptors() + nof_callbacks);
+
+  // Number of descriptors added to the result so far.
+  int descriptor_count = 0;
+
+  // Copy the descriptors from the array.
+  for (int i = 0; i < array->number_of_descriptors(); i++) {
+    if (array->GetType(i) != NULL_DESCRIPTOR) {
+      result->CopyFrom(descriptor_count++, *array, i);
+    }
+  }
+
+  // Number of duplicates detected.
+  int duplicates = 0;
+
+  // Fill in new callback descriptors.  Process the callbacks from
+  // back to front so that the last callback with a given name takes
+  // precedence over previously added callbacks with that name.
+  for (int i = nof_callbacks - 1; i >= 0; i--) {
+    Handle<AccessorInfo> entry =
+        Handle<AccessorInfo>(AccessorInfo::cast(callbacks.get(i)));
+    // Ensure the key is a symbol before writing into the instance descriptor.
+    Handle<String> key =
+        SymbolFromString(Handle<String>(String::cast(entry->name())));
+    // Check if a descriptor with this name already exists before writing.
+    if (result->LinearSearch(*key, descriptor_count) ==
+        DescriptorArray::kNotFound) {
+      CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
+      result->Set(descriptor_count, &desc);
+      descriptor_count++;
+    } else {
+      duplicates++;
+    }
+  }
+
+  // If duplicates were detected, allocate a result of the right size
+  // and transfer the elements.
+  if (duplicates > 0) {
+    int number_of_descriptors = result->number_of_descriptors() - duplicates;
+    Handle<DescriptorArray> new_result =
+        NewDescriptorArray(number_of_descriptors);
+    for (int i = 0; i < number_of_descriptors; i++) {
+      new_result->CopyFrom(i, *result, i);
+    }
+    result = new_result;
+  }
+
+  // Sort the result before returning.
+  result->Sort();
+  return result;
+}
+
+
+Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
+                                      PretenureFlag pretenure) {
+  CALL_HEAP_FUNCTION(Heap::AllocateJSObject(*constructor, pretenure), JSObject);
+}
+
+
+Handle<GlobalObject> Factory::NewGlobalObject(
+    Handle<JSFunction> constructor) {
+  CALL_HEAP_FUNCTION(Heap::AllocateGlobalObject(*constructor),
+                     GlobalObject);
+}
+
+
+
+Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
+  CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(*map, NOT_TENURED),
+                     JSObject);
+}
+
+
+Handle<JSArray> Factory::NewJSArray(int length,
+                                    PretenureFlag pretenure) {
+  Handle<JSObject> obj = NewJSObject(Top::array_function(), pretenure);
+  CALL_HEAP_FUNCTION(Handle<JSArray>::cast(obj)->Initialize(length), JSArray);
+}
+
+
+Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
+                                                PretenureFlag pretenure) {
+  Handle<JSArray> result =
+      Handle<JSArray>::cast(NewJSObject(Top::array_function(), pretenure));
+  result->SetContent(*elements);
+  return result;
+}
+
+
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
+  CALL_HEAP_FUNCTION(Heap::AllocateSharedFunctionInfo(*name),
+                     SharedFunctionInfo);
+}
+
+
+Handle<String> Factory::NumberToString(Handle<Object> number) {
+  CALL_HEAP_FUNCTION(Heap::NumberToString(*number), String);
+}
+
+
+Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
+    Handle<NumberDictionary> dictionary,
+    uint32_t key,
+    Handle<Object> value) {
+  CALL_HEAP_FUNCTION(dictionary->AtNumberPut(key, *value), NumberDictionary);
+}
+
+
+Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
+                                              Handle<Object> prototype) {
+  Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
+  CALL_HEAP_FUNCTION(Heap::AllocateFunction(*Top::function_map(),
+                                            *function_share,
+                                            *prototype),
+                     JSFunction);
+}
+
+
+Handle<JSFunction> Factory::NewFunction(Handle<String> name,
+                                        Handle<Object> prototype) {
+  Handle<JSFunction> fun = NewFunctionHelper(name, prototype);
+  fun->set_context(Top::context()->global_context());
+  return fun;
+}
+
+
+Handle<Object> Factory::ToObject(Handle<Object> object,
+                                 Handle<Context> global_context) {
+  CALL_HEAP_FUNCTION(object->ToObject(*global_context), Object);
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
+  // Get the original code of the function.
+  Handle<Code> code(shared->code());
+
+  // Create a copy of the code before allocating the debug info object to avoid
+  // allocation while setting up the debug info object.
+  Handle<Code> original_code(*Factory::CopyCode(code));
+
+  // Allocate initial fixed array for active break points before allocating the
+  // debug info object to avoid allocation while setting up the debug info
+  // object.
+  Handle<FixedArray> break_points(
+      Factory::NewFixedArray(Debug::kEstimatedNofBreakPointsInFunction));
+
+  // Create and set up the debug info object. Debug info contains function, a
+  // copy of the original code, the executing code and initial fixed array for
+  // active break points.
+  Handle<DebugInfo> debug_info =
+      Handle<DebugInfo>::cast(Factory::NewStruct(DEBUG_INFO_TYPE));
+  debug_info->set_shared(*shared);
+  debug_info->set_original_code(*original_code);
+  debug_info->set_code(*code);
+  debug_info->set_break_points(*break_points);
+
+  // Link debug info to function.
+  shared->set_debug_info(*debug_info);
+
+  return debug_info;
+}
+#endif
+
+
+Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,
+                                             int length) {
+  CALL_HEAP_FUNCTION(Heap::AllocateArgumentsObject(*callee, length), JSObject);
+}
+
+
+Handle<JSFunction> Factory::CreateApiFunction(
+    Handle<FunctionTemplateInfo> obj, ApiInstanceType instance_type) {
+  Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::HandleApiCall));
+
+  int internal_field_count = 0;
+  if (!obj->instance_template()->IsUndefined()) {
+    Handle<ObjectTemplateInfo> instance_template =
+        Handle<ObjectTemplateInfo>(
+            ObjectTemplateInfo::cast(obj->instance_template()));
+    internal_field_count =
+        Smi::cast(instance_template->internal_field_count())->value();
+  }
+
+  int instance_size = kPointerSize * internal_field_count;
+  InstanceType type = INVALID_TYPE;
+  switch (instance_type) {
+    case JavaScriptObject:
+      type = JS_OBJECT_TYPE;
+      instance_size += JSObject::kHeaderSize;
+      break;
+    case InnerGlobalObject:
+      type = JS_GLOBAL_OBJECT_TYPE;
+      instance_size += JSGlobalObject::kSize;
+      break;
+    case OuterGlobalObject:
+      type = JS_GLOBAL_PROXY_TYPE;
+      instance_size += JSGlobalProxy::kSize;
+      break;
+    default:
+      break;
+  }
+  ASSERT(type != INVALID_TYPE);
+
+  Handle<JSFunction> result =
+      Factory::NewFunction(Factory::empty_symbol(),
+                           type,
+                           instance_size,
+                           code,
+                           true);
+  // Set class name.
+  Handle<Object> class_name = Handle<Object>(obj->class_name());
+  if (class_name->IsString()) {
+    result->shared()->set_instance_class_name(*class_name);
+    result->shared()->set_name(*class_name);
+  }
+
+  Handle<Map> map = Handle<Map>(result->initial_map());
+
+  // Mark as undetectable if needed.
+  if (obj->undetectable()) {
+    map->set_is_undetectable();
+  }
+
+  // Mark as hidden for the __proto__ accessor if needed.
+  if (obj->hidden_prototype()) {
+    map->set_is_hidden_prototype();
+  }
+
+  // Mark as needs_access_check if needed.
+  if (obj->needs_access_check()) {
+    map->set_is_access_check_needed(true);
+  }
+
+  // Set interceptor information in the map.
+  if (!obj->named_property_handler()->IsUndefined()) {
+    map->set_has_named_interceptor();
+  }
+  if (!obj->indexed_property_handler()->IsUndefined()) {
+    map->set_has_indexed_interceptor();
+  }
+
+  // Set instance call-as-function information in the map.
+  if (!obj->instance_call_handler()->IsUndefined()) {
+    map->set_has_instance_call_handler();
+  }
+
+  result->shared()->set_function_data(*obj);
+  result->shared()->DontAdaptArguments();
+
+  // Recursively copy parent templates' accessors, 'data' may be modified.
+  Handle<DescriptorArray> array =
+      Handle<DescriptorArray>(map->instance_descriptors());
+  while (true) {
+    Handle<Object> props = Handle<Object>(obj->property_accessors());
+    if (!props->IsUndefined()) {
+      array = Factory::CopyAppendCallbackDescriptors(array, props);
+    }
+    Handle<Object> parent = Handle<Object>(obj->parent_template());
+    if (parent->IsUndefined()) break;
+    obj = Handle<FunctionTemplateInfo>::cast(parent);
+  }
+  if (!array->IsEmpty()) {
+    map->set_instance_descriptors(*array);
+  }
+
+  return result;
+}
+
+
+Handle<MapCache> Factory::NewMapCache(int at_least_space_for) {
+  CALL_HEAP_FUNCTION(MapCache::Allocate(at_least_space_for), MapCache);
+}
+
+
+static Object* UpdateMapCacheWith(Context* context,
+                                  FixedArray* keys,
+                                  Map* map) {
+  Object* result = MapCache::cast(context->map_cache())->Put(keys, map);
+  if (!result->IsFailure()) context->set_map_cache(MapCache::cast(result));
+  return result;
+}
+
+
+Handle<MapCache> Factory::AddToMapCache(Handle<Context> context,
+                                        Handle<FixedArray> keys,
+                                        Handle<Map> map) {
+  CALL_HEAP_FUNCTION(UpdateMapCacheWith(*context, *keys, *map), MapCache);
+}
+
+
+Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
+                                               Handle<FixedArray> keys) {
+  if (context->map_cache()->IsUndefined()) {
+    // Allocate the new map cache for the global context.
+    Handle<MapCache> new_cache = NewMapCache(24);
+    context->set_map_cache(*new_cache);
+  }
+  // Check to see whether there is a matching element in the cache.
+  Handle<MapCache> cache =
+      Handle<MapCache>(MapCache::cast(context->map_cache()));
+  Handle<Object> result = Handle<Object>(cache->Lookup(*keys));
+  if (result->IsMap()) return Handle<Map>::cast(result);
+  // Create a new map and add it to the cache.
+  Handle<Map> map =
+      CopyMap(Handle<Map>(context->object_function()->initial_map()),
+              keys->length());
+  AddToMapCache(context, keys, map);
+  return Handle<Map>(map);
+}
+
+
+void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp,
+                                JSRegExp::Type type,
+                                Handle<String> source,
+                                JSRegExp::Flags flags,
+                                Handle<Object> data) {
+  Handle<FixedArray> store = NewFixedArray(JSRegExp::kAtomDataSize);
+
+  store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
+  store->set(JSRegExp::kSourceIndex, *source);
+  store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
+  store->set(JSRegExp::kAtomPatternIndex, *data);
+  regexp->set_data(*store);
+}
+
+void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
+                                    JSRegExp::Type type,
+                                    Handle<String> source,
+                                    JSRegExp::Flags flags,
+                                    int capture_count) {
+  Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
+
+  store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
+  store->set(JSRegExp::kSourceIndex, *source);
+  store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
+  store->set(JSRegExp::kIrregexpASCIICodeIndex, Heap::the_hole_value());
+  store->set(JSRegExp::kIrregexpUC16CodeIndex, Heap::the_hole_value());
+  store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
+  store->set(JSRegExp::kIrregexpCaptureCountIndex,
+             Smi::FromInt(capture_count));
+  regexp->set_data(*store);
+}
+
+
+
+void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
+                                Handle<JSObject> instance,
+                                bool* pending_exception) {
+  // Configure the instance by adding the properties specified by the
+  // instance template.
+  Handle<Object> instance_template = Handle<Object>(desc->instance_template());
+  if (!instance_template->IsUndefined()) {
+    Execution::ConfigureInstance(instance,
+                                 instance_template,
+                                 pending_exception);
+  } else {
+    *pending_exception = false;
+  }
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/factory.h b/src/factory.h
new file mode 100644
index 0000000..0596fbf
--- /dev/null
+++ b/src/factory.h
@@ -0,0 +1,386 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FACTORY_H_
+#define V8_FACTORY_H_
+
+#include "globals.h"
+#include "heap.h"
+#include "zone-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Interface for handle based allocation.
+
+class Factory : public AllStatic {
+ public:
+  // Allocate a new fixed array with undefined entries.
+  static Handle<FixedArray> NewFixedArray(
+      int size,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocate a new fixed array with non-existing entries (the hole).
+  static Handle<FixedArray> NewFixedArrayWithHoles(int size);
+
+  static Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
+
+  static Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
+
+  static Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
+
+  static Handle<String> LookupSymbol(Vector<const char> str);
+  static Handle<String> LookupAsciiSymbol(const char* str) {
+    return LookupSymbol(CStrVector(str));
+  }
+
+
+  // String creation functions.  Most of the string creation functions take
+  // a Heap::PretenureFlag argument to optionally request that they be
+  // allocated in the old generation.  The pretenure flag defaults to
+  // DONT_TENURE.
+  //
+  // Creates a new String object.  There are two String encodings: ASCII and
+  // two byte.  One should choose between the three string factory functions
+  // based on the encoding of the string buffer that the string is
+  // initialized from.
+  //   - ...FromAscii initializes the string from a buffer that is ASCII
+  //     encoded (it does not check that the buffer is ASCII encoded) and
+  //     the result will be ASCII encoded.
+  //   - ...FromUtf8 initializes the string from a buffer that is UTF-8
+  //     encoded.  If the characters are all single-byte characters, the
+  //     result will be ASCII encoded, otherwise it will converted to two
+  //     byte.
+  //   - ...FromTwoByte initializes the string from a buffer that is two
+  //     byte encoded.  If the characters are all single-byte characters,
+  //     the result will be converted to ASCII, otherwise it will be left as
+  //     two byte.
+  //
+  // ASCII strings are pretenured when used as keys in the SourceCodeCache.
+  static Handle<String> NewStringFromAscii(
+      Vector<const char> str,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  // UTF8 strings are pretenured when used for regexp literal patterns and
+  // flags in the parser.
+  static Handle<String> NewStringFromUtf8(
+      Vector<const char> str,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  static Handle<String> NewStringFromTwoByte(Vector<const uc16> str,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocates and partially initializes a TwoByte String. The characters of
+  // the string are uninitialized. Currently used in regexp code only, where
+  // they are pretenured.
+  static Handle<String> NewRawTwoByteString(
+      int length,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  // Create a new cons string object which consists of a pair of strings.
+  static Handle<String> NewConsString(Handle<String> first,
+                                      Handle<String> second);
+
+  // Create a new sliced string object which represents a substring of a
+  // backing string.
+  static Handle<String> NewStringSlice(Handle<String> str,
+                                       int begin,
+                                       int end);
+
+  // Creates a new external String object.  There are two String encodings
+  // in the system: ASCII and two byte.  Unlike other String types, it does
+  // not make sense to have a UTF-8 factory function for external strings,
+  // because we cannot change the underlying buffer.
+  static Handle<String> NewExternalStringFromAscii(
+      ExternalAsciiString::Resource* resource);
+  static Handle<String> NewExternalStringFromTwoByte(
+      ExternalTwoByteString::Resource* resource);
+
+  // Create a global (but otherwise uninitialized) context.
+  static Handle<Context> NewGlobalContext();
+
+  // Create a function context.
+  static Handle<Context> NewFunctionContext(int length,
+                                            Handle<JSFunction> closure);
+
+  // Create a 'with' context.
+  static Handle<Context> NewWithContext(Handle<Context> previous,
+                                        Handle<JSObject> extension,
+                                        bool is_catch_context);
+
+  // Return the Symbol matching the passed in string.
+  static Handle<String> SymbolFromString(Handle<String> value);
+
+  // Allocate a new struct.  The struct is pretenured (allocated directly in
+  // the old generation).
+  static Handle<Struct> NewStruct(InstanceType type);
+
+  static Handle<AccessorInfo> NewAccessorInfo();
+
+  static Handle<Script> NewScript(Handle<String> source);
+
+  // Proxies are pretenured when allocated by the bootstrapper.
+  static Handle<Proxy> NewProxy(Address addr,
+                                PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocate a new proxy.  The proxy is pretenured (allocated directly in
+  // the old generation).
+  static Handle<Proxy> NewProxy(const AccessorDescriptor* proxy);
+
+  static Handle<ByteArray> NewByteArray(int length,
+                                        PretenureFlag pretenure = NOT_TENURED);
+
+  static Handle<PixelArray> NewPixelArray(int length,
+      uint8_t* external_pointer,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  static Handle<Map> NewMap(InstanceType type, int instance_size);
+
+  static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
+
+  static Handle<Map> CopyMapDropDescriptors(Handle<Map> map);
+
+  // Copy the map adding more inobject properties if possible without
+  // overflowing the instance size.
+  static Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
+
+  static Handle<Map> CopyMapDropTransitions(Handle<Map> map);
+
+  static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
+
+  // Numbers (eg, literals) are pretenured by the parser.
+  static Handle<Object> NewNumber(double value,
+                                  PretenureFlag pretenure = NOT_TENURED);
+
+  static Handle<Object> NewNumberFromInt(int value);
+  static Handle<Object> NewNumberFromUint(uint32_t value);
+
+  // These objects are used by the api to create env-independent data
+  // structures in the heap.
+  static Handle<JSObject> NewNeanderObject();
+
+  static Handle<JSObject> NewArgumentsObject(Handle<Object> callee, int length);
+
+  // JS objects are pretenured when allocated by the bootstrapper and
+  // runtime.
+  static Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
+                                      PretenureFlag pretenure = NOT_TENURED);
+
+  // Global objects are pretenured.
+  static Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
+
+  // JS objects are pretenured when allocated by the bootstrapper and
+  // runtime.
+  static Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
+
+  // JS arrays are pretenured when allocated by the parser.
+  static Handle<JSArray> NewJSArray(int init_length,
+                                    PretenureFlag pretenure = NOT_TENURED);
+
+  static Handle<JSArray> NewJSArrayWithElements(
+      Handle<FixedArray> elements,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  static Handle<JSFunction> NewFunction(Handle<String> name,
+                                        Handle<Object> prototype);
+
+  static Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
+
+  static Handle<JSFunction> NewFunctionFromBoilerplate(
+      Handle<JSFunction> boilerplate,
+      Handle<Context> context);
+
+  static Handle<Code> NewCode(const CodeDesc& desc,
+                              ZoneScopeInfo* sinfo,
+                              Code::Flags flags,
+                              Handle<Object> self_reference);
+
+  static Handle<Code> CopyCode(Handle<Code> code);
+
+  static Handle<Object> ToObject(Handle<Object> object,
+                                 Handle<Context> global_context);
+
+  // Interface for creating error objects.
+
+  static Handle<Object> NewError(const char* maker, const char* type,
+                                 Handle<JSArray> args);
+  static Handle<Object> NewError(const char* maker, const char* type,
+                                 Vector< Handle<Object> > args);
+  static Handle<Object> NewError(const char* type,
+                                 Vector< Handle<Object> > args);
+  static Handle<Object> NewError(Handle<String> message);
+  static Handle<Object> NewError(const char* constructor,
+                                 Handle<String> message);
+
+  static Handle<Object> NewTypeError(const char* type,
+                                     Vector< Handle<Object> > args);
+  static Handle<Object> NewTypeError(Handle<String> message);
+
+  static Handle<Object> NewRangeError(const char* type,
+                                      Vector< Handle<Object> > args);
+  static Handle<Object> NewRangeError(Handle<String> message);
+
+  static Handle<Object> NewSyntaxError(const char* type, Handle<JSArray> args);
+  static Handle<Object> NewSyntaxError(Handle<String> message);
+
+  static Handle<Object> NewReferenceError(const char* type,
+                                          Vector< Handle<Object> > args);
+  static Handle<Object> NewReferenceError(Handle<String> message);
+
+  static Handle<Object> NewEvalError(const char* type,
+                                     Vector< Handle<Object> > args);
+
+
+  static Handle<JSFunction> NewFunction(Handle<String> name,
+                                        InstanceType type,
+                                        int instance_size,
+                                        Handle<Code> code,
+                                        bool force_initial_map);
+
+  static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name,
+                                                   int number_of_literals,
+                                                   bool contains_array_literal,
+                                                   Handle<Code> code);
+
+  static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name);
+
+  static Handle<JSFunction> NewFunction(Handle<Map> function_map,
+      Handle<SharedFunctionInfo> shared, Handle<Object> prototype);
+
+
+  static Handle<JSFunction> NewFunctionWithPrototype(Handle<String> name,
+                                                     InstanceType type,
+                                                     int instance_size,
+                                                     Handle<JSObject> prototype,
+                                                     Handle<Code> code,
+                                                     bool force_initial_map);
+
+  static Handle<DescriptorArray> CopyAppendProxyDescriptor(
+      Handle<DescriptorArray> array,
+      Handle<String> key,
+      Handle<Object> value,
+      PropertyAttributes attributes);
+
+  static Handle<String> NumberToString(Handle<Object> number);
+
+  enum ApiInstanceType {
+    JavaScriptObject,
+    InnerGlobalObject,
+    OuterGlobalObject
+  };
+
+  static Handle<JSFunction> CreateApiFunction(
+      Handle<FunctionTemplateInfo> data,
+      ApiInstanceType type = JavaScriptObject);
+
+  static Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
+
+  // Installs interceptors on the instance.  'desc' is a function template,
+  // and instance is an object instance created by the function of this
+  // function template.
+  static void ConfigureInstance(Handle<FunctionTemplateInfo> desc,
+                                Handle<JSObject> instance,
+                                bool* pending_exception);
+
+#define ROOT_ACCESSOR(type, name, camel_name)                                  \
+  static inline Handle<type> name() {                                          \
+    return Handle<type>(bit_cast<type**, Object**>(                            \
+        &Heap::roots_[Heap::k##camel_name##RootIndex]));                       \
+  }
+  ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, str) \
+  static inline Handle<String> name() {                                        \
+    return Handle<String>(bit_cast<String**, Object**>(                        \
+        &Heap::roots_[Heap::k##name##RootIndex]));                             \
+  }
+  SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+  static Handle<String> hidden_symbol() {
+    return Handle<String>(&Heap::hidden_symbol_);
+  }
+
+  static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
+
+  static Handle<NumberDictionary> DictionaryAtNumberPut(
+      Handle<NumberDictionary>,
+      uint32_t key,
+      Handle<Object> value);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  static Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
+#endif
+
+  // Return a map using the map cache in the global context.
+  // The key the an ordered set of property names.
+  static Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
+                                               Handle<FixedArray> keys);
+
+  // Creates a new FixedArray that holds the data associated with the
+  // atom regexp and stores it in the regexp.
+  static void SetRegExpAtomData(Handle<JSRegExp> regexp,
+                                JSRegExp::Type type,
+                                Handle<String> source,
+                                JSRegExp::Flags flags,
+                                Handle<Object> match_pattern);
+
+  // Creates a new FixedArray that holds the data associated with the
+  // irregexp regexp and stores it in the regexp.
+  static void SetRegExpIrregexpData(Handle<JSRegExp> regexp,
+                                    JSRegExp::Type type,
+                                    Handle<String> source,
+                                    JSRegExp::Flags flags,
+                                    int capture_count);
+
+ private:
+  static Handle<JSFunction> NewFunctionHelper(Handle<String> name,
+                                              Handle<Object> prototype);
+
+  static Handle<DescriptorArray> CopyAppendCallbackDescriptors(
+      Handle<DescriptorArray> array,
+      Handle<Object> descriptors);
+
+  static Handle<JSFunction> BaseNewFunctionFromBoilerplate(
+      Handle<JSFunction> boilerplate,
+      Handle<Map> function_map);
+
+  // Create a new map cache.
+  static Handle<MapCache> NewMapCache(int at_least_space_for);
+
+  // Update the map cache in the global context with (keys, map)
+  static Handle<MapCache> AddToMapCache(Handle<Context> context,
+                                        Handle<FixedArray> keys,
+                                        Handle<Map> map);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_FACTORY_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
new file mode 100644
index 0000000..91c5bca
--- /dev/null
+++ b/src/flag-definitions.h
@@ -0,0 +1,393 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file defines all of the flags.  It is separated into different section,
+// for Debug, Release, Logging and Profiling, etc.  To add a new flag, find the
+// correct section, and use one of the DEFINE_ macros, without a trailing ';'.
+//
+// This include does not have a guard, because it is a template-style include,
+// which can be included multiple times in different modes.  It expects to have
+// a mode defined before it's included.  The modes are FLAG_MODE_... below:
+
+// We want to declare the names of the variables for the header file.  Normally
+// this will just be an extern declaration, but for a readonly flag we let the
+// compiler make better optimizations by giving it the value.
+#if defined(FLAG_MODE_DECLARE)
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
+  extern ctype FLAG_##nam;
+#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
+  static ctype const FLAG_##nam = def;
+
+// We want to supply the actual storage and value for the flag variable in the
+// .cc file.  We only do this for writable flags.
+#elif defined(FLAG_MODE_DEFINE)
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
+  ctype FLAG_##nam = def;
+#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+
+// We need to define all of our default values so that the Flag structure can
+// access them by pointer.  These are just used internally inside of one .cc,
+// for MODE_META, so there is no impact on the flags interface.
+#elif defined(FLAG_MODE_DEFINE_DEFAULTS)
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
+  static ctype const FLAGDEFAULT_##nam = def;
+#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+
+
+// We want to write entries into our meta data table, for internal parsing and
+// printing / etc in the flag parser code.  We only do this for writable flags.
+#elif defined(FLAG_MODE_META)
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
+  { Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false },
+#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+
+#else
+#error No mode supplied when including flags.defs
+#endif
+
+#ifdef FLAG_MODE_DECLARE
+// Structure used to hold a collection of arguments to the JavaScript code.
+struct JSArguments {
+public:
+  JSArguments();
+  JSArguments(int argc, const char** argv);
+  int argc() const;
+  const char** argv();
+  const char*& operator[](int idx);
+  JSArguments& operator=(JSArguments args);
+private:
+  int argc_;
+  const char** argv_;
+};
+#endif
+
+#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
+#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
+#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
+#define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
+#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt)
+
+//
+// Flags in all modes.
+//
+#define FLAG FLAG_FULL
+
+// assembler-ia32.cc / assembler-arm.cc
+DEFINE_bool(debug_code, false,
+            "generate extra code (comments, assertions) for debugging")
+DEFINE_bool(emit_branch_hints, false, "emit branch hints")
+DEFINE_bool(push_pop_elimination, true,
+            "eliminate redundant push/pops in assembly code")
+DEFINE_bool(print_push_pop_elimination, false,
+            "print elimination of redundant push/pops in assembly code")
+
+// bootstrapper.cc
+DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
+DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
+DEFINE_string(natives_file, NULL, "alternative natives file")
+DEFINE_bool(expose_gc, false, "expose gc extension")
+DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
+
+// builtins-ia32.cc
+DEFINE_bool(inline_new, true, "use fast inline allocation")
+
+// checks.cc
+DEFINE_bool(stack_trace_on_abort, true,
+            "print a stack trace if an assertion failure occurs")
+
+// codegen-ia32.cc / codegen-arm.cc
+DEFINE_bool(trace, false, "trace function calls")
+DEFINE_bool(defer_negation, true, "defer negation operation")
+DEFINE_bool(check_stack, true,
+            "check stack for overflow, interrupt, breakpoint")
+
+// codegen.cc
+DEFINE_bool(lazy, true, "use lazy compilation")
+DEFINE_bool(debug_info, true, "add debug information to compiled functions")
+
+// compiler.cc
+DEFINE_bool(strict, false, "strict error checking")
+DEFINE_int(min_preparse_length, 1024,
+           "Minimum length for automatic enable preparsing")
+
+// compilation-cache.cc
+DEFINE_bool(compilation_cache, true, "enable compilation cache")
+
+// debug.cc
+DEFINE_bool(remote_debugging, false, "enable remote debugging")
+DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
+DEFINE_bool(debugger_auto_break, false,
+            "automatically set the debug break flag when debugger commands are "
+            "in the queue (experimental)")
+
+// frames.cc
+DEFINE_int(max_stack_trace_source_length, 300,
+           "maximum length of function source code printed in a stack trace.")
+
+// heap.cc
+DEFINE_int(new_space_size, 0, "size of (each semispace in) the new generation")
+DEFINE_int(old_space_size, 0, "size of the old generation")
+DEFINE_bool(gc_global, false, "always perform global GCs")
+DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
+DEFINE_bool(trace_gc, false,
+            "print one trace line following each garbage collection")
+DEFINE_bool(trace_gc_verbose, false,
+            "print more details following each garbage collection")
+DEFINE_bool(collect_maps, true,
+            "garbage collect maps from which no objects can be reached")
+
+// v8.cc
+DEFINE_bool(use_idle_notification, true,
+            "Use idle notification to reduce memory footprint.")
+// ic.cc
+DEFINE_bool(use_ic, true, "use inline caching")
+
+// macro-assembler-ia32.cc
+DEFINE_bool(native_code_counters, false,
+            "generate extra code for manipulating stats counters")
+
+// mark-compact.cc
+DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
+DEFINE_bool(never_compact, false,
+            "Never perform compaction on full GC - testing only")
+DEFINE_bool(cleanup_ics_at_gc, true,
+            "Flush inline caches prior to mark compact collection.")
+DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
+            "Flush code caches in maps during mark compact cycle.")
+
+DEFINE_bool(canonicalize_object_literal_maps, true,
+            "Canonicalize maps for object literals.")
+
+// mksnapshot.cc
+DEFINE_bool(h, false, "print this message")
+
+// parser.cc
+DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
+
+// rewriter.cc
+DEFINE_bool(optimize_ast, true, "optimize the ast")
+
+// simulator-arm.cc
+DEFINE_bool(trace_sim, false, "trace simulator execution")
+DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
+
+// top.cc
+DEFINE_bool(trace_exception, false,
+            "print stack trace when throwing exceptions")
+DEFINE_bool(preallocate_message_memory, false,
+            "preallocate some memory to build stack traces.")
+
+// usage-analyzer.cc
+DEFINE_bool(usage_computation, true, "compute variable usage counts")
+
+// v8.cc
+DEFINE_bool(preemption, false,
+            "activate a 100ms timer that switches between V8 threads")
+
+// Regexp
+DEFINE_bool(trace_regexps, false, "trace regexp execution")
+DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
+
+// Testing flags test/cctest/test-{flags,api,serialization}.cc
+DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
+DEFINE_int(testing_int_flag, 13, "testing_int_flag")
+DEFINE_float(testing_float_flag, 2.5, "float-flag")
+DEFINE_string(testing_string_flag, "Hello, world!", "string-flag")
+DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness")
+#ifdef WIN32
+DEFINE_string(testing_serialization_file, "C:\\Windows\\Temp\\serdes",
+              "file in which to testing_serialize heap")
+#else
+DEFINE_string(testing_serialization_file, "/tmp/serdes",
+              "file in which to serialize heap")
+#endif
+
+//
+// Dev shell flags
+//
+
+DEFINE_bool(help, false, "Print usage message, including flags, on console")
+DEFINE_bool(dump_counters, false, "Dump counters on exit")
+DEFINE_bool(debugger, true, "Enable JavaScript debugger")
+DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
+                                    "debugger agent in another process")
+DEFINE_bool(debugger_agent, false, "Enable debugger agent")
+DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
+DEFINE_string(map_counters, false, "Map counters to a file")
+DEFINE_args(js_arguments, JSArguments(),
+            "Pass all remaining arguments to the script. Alias for \"--\".")
+
+//
+// Debug only flags
+//
+#undef FLAG
+#ifdef DEBUG
+#define FLAG FLAG_FULL
+#else
+#define FLAG FLAG_READONLY
+#endif
+
+// checks.cc
+DEFINE_bool(enable_slow_asserts, false,
+            "enable asserts that are slow to execute")
+
+// codegen-ia32.cc / codegen-arm.cc
+DEFINE_bool(trace_codegen, false,
+            "print name of functions for which code is generated")
+DEFINE_bool(print_source, false, "pretty print source code")
+DEFINE_bool(print_builtin_source, false,
+            "pretty print source code for builtins")
+DEFINE_bool(print_ast, false, "print source AST")
+DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
+DEFINE_bool(trace_calls, false, "trace calls")
+DEFINE_bool(trace_builtin_calls, false, "trace builtins calls")
+DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
+
+// compiler.cc
+DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
+DEFINE_bool(print_scopes, false, "print scopes")
+
+// contexts.cc
+DEFINE_bool(trace_contexts, false, "trace contexts operations")
+
+// heap.cc
+DEFINE_bool(gc_greedy, false, "perform GC prior to some allocations")
+DEFINE_bool(gc_verbose, false, "print stuff during garbage collection")
+DEFINE_bool(heap_stats, false, "report heap statistics before and after GC")
+DEFINE_bool(code_stats, false, "report code statistics after GC")
+DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
+DEFINE_bool(print_handles, false, "report handles after GC")
+DEFINE_bool(print_global_handles, false, "report global handles after GC")
+DEFINE_bool(print_rset, false, "print remembered sets before GC")
+
+// ic.cc
+DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
+
+// objects.cc
+DEFINE_bool(trace_normalization,
+            false,
+            "prints when objects are turned into dictionaries.")
+
+// runtime.cc
+DEFINE_bool(trace_lazy, false, "trace lazy compilation")
+
+// serialize.cc
+DEFINE_bool(debug_serialization, false,
+            "write debug information into the snapshot.")
+
+// spaces.cc
+DEFINE_bool(collect_heap_spill_statistics, false,
+            "report heap spill statistics along with heap_stats "
+            "(requires heap_stats)")
+
+// Regexp
+DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
+DEFINE_bool(trace_regexp_assembler,
+            false,
+            "trace regexp macro assembler calls.")
+
+//
+// Logging and profiling only flags
+//
+#undef FLAG
+#ifdef ENABLE_LOGGING_AND_PROFILING
+#define FLAG FLAG_FULL
+#else
+#define FLAG FLAG_READONLY
+#endif
+
+// log.cc
+DEFINE_bool(log, false,
+            "Minimal logging (no API, code, GC, suspect, or handles samples).")
+DEFINE_bool(log_all, false, "Log all events to the log file.")
+DEFINE_bool(log_runtime, false, "Activate runtime system %Log call.")
+DEFINE_bool(log_api, false, "Log API events to the log file.")
+DEFINE_bool(log_code, false,
+            "Log code events to the log file without profiling.")
+DEFINE_bool(log_gc, false,
+            "Log heap samples on garbage collection for the hp2ps tool.")
+DEFINE_bool(log_handles, false, "Log global handle events.")
+DEFINE_bool(log_state_changes, false, "Log state changes.")
+DEFINE_bool(log_suspect, false, "Log suspect operations.")
+DEFINE_bool(compress_log, false,
+            "Compress log to save space (makes log less human-readable).")
+DEFINE_bool(prof, false,
+            "Log statistical profiling information (implies --log-code).")
+DEFINE_bool(prof_auto, true,
+            "Used with --prof, starts profiling automatically")
+DEFINE_bool(prof_lazy, false,
+            "Used with --prof, only does sampling and logging"
+            " when profiler is active (implies --noprof_auto).")
+DEFINE_bool(log_regexp, false, "Log regular expression execution.")
+DEFINE_bool(sliding_state_window, false,
+            "Update sliding state window counters.")
+DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
+DEFINE_bool(oprofile, false, "Enable JIT agent for OProfile.")
+
+//
+// Heap protection flags
+// Using heap protection requires ENABLE_LOGGING_AND_PROFILING as well.
+//
+#ifdef ENABLE_HEAP_PROTECTION
+#undef FLAG
+#define FLAG FLAG_FULL
+
+DEFINE_bool(protect_heap, false,
+            "Protect/unprotect V8's heap when leaving/entring the VM.")
+
+#endif
+
+//
+// Disassembler only flags
+//
+#undef FLAG
+#ifdef ENABLE_DISASSEMBLER
+#define FLAG FLAG_FULL
+#else
+#define FLAG FLAG_READONLY
+#endif
+
+// code-stubs.cc
+DEFINE_bool(print_code_stubs, false, "print code stubs")
+
+// codegen-ia32.cc / codegen-arm.cc
+DEFINE_bool(print_code, false, "print generated code")
+DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
+
+// Cleanup...
+#undef FLAG_FULL
+#undef FLAG_READONLY
+#undef FLAG
+
+#undef DEFINE_bool
+#undef DEFINE_int
+#undef DEFINE_string
+
+#undef FLAG_MODE_DECLARE
+#undef FLAG_MODE_DEFINE
+#undef FLAG_MODE_DEFINE_DEFAULTS
+#undef FLAG_MODE_META
diff --git a/src/flags.cc b/src/flags.cc
new file mode 100644
index 0000000..5df3afd
--- /dev/null
+++ b/src/flags.cc
@@ -0,0 +1,555 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <ctype.h>
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "platform.h"
+#include "smart-pointer.h"
+#include "string-stream.h"
+
+
+namespace v8 {
+namespace internal {
+
+// Define all of our flags.
+#define FLAG_MODE_DEFINE
+#include "flag-definitions.h"
+
+// Define all of our flags default values.
+#define FLAG_MODE_DEFINE_DEFAULTS
+#include "flag-definitions.h"
+
+namespace {
+
+// This structure represents a single entry in the flag system, with a pointer
+// to the actual flag, default value, comment, etc.  This is designed to be POD
+// initialized as to avoid requiring static constructors.
+struct Flag {
+  enum FlagType { TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS };
+
+  FlagType type_;           // What type of flag, bool, int, or string.
+  const char* name_;        // Name of the flag, ex "my_flag".
+  void* valptr_;            // Pointer to the global flag variable.
+  const void* defptr_;      // Pointer to the default value.
+  const char* cmt_;         // A comment about the flags purpose.
+  bool owns_ptr_;           // Does the flag own its string value?
+
+  FlagType type() const { return type_; }
+
+  const char* name() const { return name_; }
+
+  const char* comment() const { return cmt_; }
+
+  bool* bool_variable() const {
+    ASSERT(type_ == TYPE_BOOL);
+    return reinterpret_cast<bool*>(valptr_);
+  }
+
+  int* int_variable() const {
+    ASSERT(type_ == TYPE_INT);
+    return reinterpret_cast<int*>(valptr_);
+  }
+
+  double* float_variable() const {
+    ASSERT(type_ == TYPE_FLOAT);
+    return reinterpret_cast<double*>(valptr_);
+  }
+
+  const char* string_value() const {
+    ASSERT(type_ == TYPE_STRING);
+    return *reinterpret_cast<const char**>(valptr_);
+  }
+
+  void set_string_value(const char* value, bool owns_ptr) {
+    ASSERT(type_ == TYPE_STRING);
+    const char** ptr = reinterpret_cast<const char**>(valptr_);
+    if (owns_ptr_ && *ptr != NULL) DeleteArray(*ptr);
+    *ptr = value;
+    owns_ptr_ = owns_ptr;
+  }
+
+  JSArguments* args_variable() const {
+    ASSERT(type_ == TYPE_ARGS);
+    return reinterpret_cast<JSArguments*>(valptr_);
+  }
+
+  bool bool_default() const {
+    ASSERT(type_ == TYPE_BOOL);
+    return *reinterpret_cast<const bool*>(defptr_);
+  }
+
+  int int_default() const {
+    ASSERT(type_ == TYPE_INT);
+    return *reinterpret_cast<const int*>(defptr_);
+  }
+
+  double float_default() const {
+    ASSERT(type_ == TYPE_FLOAT);
+    return *reinterpret_cast<const double*>(defptr_);
+  }
+
+  const char* string_default() const {
+    ASSERT(type_ == TYPE_STRING);
+    return *reinterpret_cast<const char* const *>(defptr_);
+  }
+
+  JSArguments args_default() const {
+    ASSERT(type_ == TYPE_ARGS);
+    return *reinterpret_cast<const JSArguments*>(defptr_);
+  }
+
+  // Compare this flag's current value against the default.
+  bool IsDefault() const {
+    switch (type_) {
+      case TYPE_BOOL:
+        return *bool_variable() == bool_default();
+      case TYPE_INT:
+        return *int_variable() == int_default();
+      case TYPE_FLOAT:
+        return *float_variable() == float_default();
+      case TYPE_STRING: {
+        const char* str1 = string_value();
+        const char* str2 = string_default();
+        if (str2 == NULL) return str1 == NULL;
+        if (str1 == NULL) return str2 == NULL;
+        return strcmp(str1, str2) == 0;
+      }
+      case TYPE_ARGS:
+        return args_variable()->argc() == 0;
+    }
+    UNREACHABLE();
+    return true;
+  }
+
+  // Set a flag back to it's default value.
+  void Reset() {
+    switch (type_) {
+      case TYPE_BOOL:
+        *bool_variable() = bool_default();
+        break;
+      case TYPE_INT:
+        *int_variable() = int_default();
+        break;
+      case TYPE_FLOAT:
+        *float_variable() = float_default();
+        break;
+      case TYPE_STRING:
+        set_string_value(string_default(), false);
+        break;
+      case TYPE_ARGS:
+        *args_variable() = args_default();
+        break;
+    }
+  }
+};
+
+Flag flags[] = {
+#define FLAG_MODE_META
+#include "flag-definitions.h"
+};
+
+const size_t num_flags = sizeof(flags) / sizeof(*flags);
+
+}  // namespace
+
+
+static const char* Type2String(Flag::FlagType type) {
+  switch (type) {
+    case Flag::TYPE_BOOL: return "bool";
+    case Flag::TYPE_INT: return "int";
+    case Flag::TYPE_FLOAT: return "float";
+    case Flag::TYPE_STRING: return "string";
+    case Flag::TYPE_ARGS: return "arguments";
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+static SmartPointer<const char> ToString(Flag* flag) {
+  HeapStringAllocator string_allocator;
+  StringStream buffer(&string_allocator);
+  switch (flag->type()) {
+    case Flag::TYPE_BOOL:
+      buffer.Add("%s", (*flag->bool_variable() ? "true" : "false"));
+      break;
+    case Flag::TYPE_INT:
+      buffer.Add("%d", *flag->int_variable());
+      break;
+    case Flag::TYPE_FLOAT:
+      buffer.Add("%f", FmtElm(*flag->float_variable()));
+      break;
+    case Flag::TYPE_STRING: {
+      const char* str = flag->string_value();
+      buffer.Add("%s", str ? str : "NULL");
+      break;
+    }
+    case Flag::TYPE_ARGS: {
+      JSArguments args = *flag->args_variable();
+      if (args.argc() > 0) {
+        buffer.Add("%s",  args[0]);
+        for (int i = 1; i < args.argc(); i++) {
+          buffer.Add(" %s", args[i]);
+        }
+      }
+      break;
+    }
+  }
+  return buffer.ToCString();
+}
+
+
+// static
+List<const char*>* FlagList::argv() {
+  List<const char*>* args = new List<const char*>(8);
+  Flag* args_flag = NULL;
+  for (size_t i = 0; i < num_flags; ++i) {
+    Flag* f = &flags[i];
+    if (!f->IsDefault()) {
+      if (f->type() == Flag::TYPE_ARGS) {
+        ASSERT(args_flag == NULL);
+        args_flag = f;  // Must be last in arguments.
+        continue;
+      }
+      HeapStringAllocator string_allocator;
+      StringStream buffer(&string_allocator);
+      if (f->type() != Flag::TYPE_BOOL || *(f->bool_variable())) {
+        buffer.Add("--%s", f->name());
+      } else {
+        buffer.Add("--no%s", f->name());
+      }
+      args->Add(buffer.ToCString().Detach());
+      if (f->type() != Flag::TYPE_BOOL) {
+        args->Add(ToString(f).Detach());
+      }
+    }
+  }
+  if (args_flag != NULL) {
+    HeapStringAllocator string_allocator;
+    StringStream buffer(&string_allocator);
+    buffer.Add("--%s", args_flag->name());
+    args->Add(buffer.ToCString().Detach());
+    JSArguments jsargs = *args_flag->args_variable();
+    for (int j = 0; j < jsargs.argc(); j++) {
+      args->Add(StrDup(jsargs[j]));
+    }
+  }
+  return args;
+}
+
+
+// Helper function to parse flags: Takes an argument arg and splits it into
+// a flag name and flag value (or NULL if they are missing). is_bool is set
+// if the arg started with "-no" or "--no". The buffer may be used to NUL-
+// terminate the name, it must be large enough to hold any possible name.
+static void SplitArgument(const char* arg,
+                          char* buffer,
+                          int buffer_size,
+                          const char** name,
+                          const char** value,
+                          bool* is_bool) {
+  *name = NULL;
+  *value = NULL;
+  *is_bool = false;
+
+  if (*arg == '-') {
+    // find the begin of the flag name
+    arg++;  // remove 1st '-'
+    if (*arg == '-') {
+      arg++;  // remove 2nd '-'
+      if (arg[0] == '\0') {
+        const char* kJSArgumentsFlagName = "js_arguments";
+        *name = kJSArgumentsFlagName;
+        return;
+      }
+    }
+    if (arg[0] == 'n' && arg[1] == 'o') {
+      arg += 2;  // remove "no"
+      *is_bool = true;
+    }
+    *name = arg;
+
+    // find the end of the flag name
+    while (*arg != '\0' && *arg != '=')
+      arg++;
+
+    // get the value if any
+    if (*arg == '=') {
+      // make a copy so we can NUL-terminate flag name
+      int n = arg - *name;
+      CHECK(n < buffer_size);  // buffer is too small
+      memcpy(buffer, *name, n);
+      buffer[n] = '\0';
+      *name = buffer;
+      // get the value
+      *value = arg + 1;
+    }
+  }
+}
+
+
+inline char NormalizeChar(char ch) {
+  return ch == '_' ? '-' : ch;
+}
+
+
+static bool EqualNames(const char* a, const char* b) {
+  for (int i = 0; NormalizeChar(a[i]) == NormalizeChar(b[i]); i++) {
+    if (a[i] == '\0') {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+static Flag* FindFlag(const char* name) {
+  for (size_t i = 0; i < num_flags; ++i) {
+    if (EqualNames(name, flags[i].name()))
+      return &flags[i];
+  }
+  return NULL;
+}
+
+
+// static
+int FlagList::SetFlagsFromCommandLine(int* argc,
+                                      char** argv,
+                                      bool remove_flags) {
+  // parse arguments
+  for (int i = 1; i < *argc;) {
+    int j = i;  // j > 0
+    const char* arg = argv[i++];
+
+    // split arg into flag components
+    char buffer[1*KB];
+    const char* name;
+    const char* value;
+    bool is_bool;
+    SplitArgument(arg, buffer, sizeof buffer, &name, &value, &is_bool);
+
+    if (name != NULL) {
+      // lookup the flag
+      Flag* flag = FindFlag(name);
+      if (flag == NULL) {
+        if (remove_flags) {
+          // We don't recognize this flag but since we're removing
+          // the flags we recognize we assume that the remaining flags
+          // will be processed somewhere else so this flag might make
+          // sense there.
+          continue;
+        } else {
+          fprintf(stderr, "Error: unrecognized flag %s\n"
+                  "Try --help for options\n", arg);
+          return j;
+        }
+      }
+
+      // if we still need a flag value, use the next argument if available
+      if (flag->type() != Flag::TYPE_BOOL &&
+          flag->type() != Flag::TYPE_ARGS &&
+          value == NULL) {
+        if (i < *argc) {
+          value = argv[i++];
+        } else {
+          fprintf(stderr, "Error: missing value for flag %s of type %s\n"
+                  "Try --help for options\n",
+                  arg, Type2String(flag->type()));
+          return j;
+        }
+      }
+
+      // set the flag
+      char* endp = const_cast<char*>("");  // *endp is only read
+      switch (flag->type()) {
+        case Flag::TYPE_BOOL:
+          *flag->bool_variable() = !is_bool;
+          break;
+        case Flag::TYPE_INT:
+          *flag->int_variable() = strtol(value, &endp, 10);  // NOLINT
+          break;
+        case Flag::TYPE_FLOAT:
+          *flag->float_variable() = strtod(value, &endp);
+          break;
+        case Flag::TYPE_STRING:
+          flag->set_string_value(value ? StrDup(value) : NULL, true);
+          break;
+        case Flag::TYPE_ARGS: {
+          int start_pos = (value == NULL) ? i : i - 1;
+          int js_argc = *argc - start_pos;
+          const char** js_argv = NewArray<const char*>(js_argc);
+          if (value != NULL) {
+            js_argv[0] = StrDup(value);
+          }
+          for (int k = i; k < *argc; k++) {
+            js_argv[k - start_pos] = StrDup(argv[k]);
+          }
+          *flag->args_variable() = JSArguments(js_argc, js_argv);
+          i = *argc;  // Consume all arguments
+          break;
+        }
+      }
+
+      // handle errors
+      if ((flag->type() == Flag::TYPE_BOOL && value != NULL) ||
+          (flag->type() != Flag::TYPE_BOOL && is_bool) ||
+          *endp != '\0') {
+        fprintf(stderr, "Error: illegal value for flag %s of type %s\n"
+                "Try --help for options\n",
+                arg, Type2String(flag->type()));
+        return j;
+      }
+
+      // remove the flag & value from the command
+      if (remove_flags) {
+        while (j < i) {
+          argv[j++] = NULL;
+        }
+      }
+    }
+  }
+
+  // shrink the argument list
+  if (remove_flags) {
+    int j = 1;
+    for (int i = 1; i < *argc; i++) {
+      if (argv[i] != NULL)
+        argv[j++] = argv[i];
+    }
+    *argc = j;
+  }
+
+  if (FLAG_help) {
+    PrintHelp();
+    exit(0);
+  }
+  // parsed all flags successfully
+  return 0;
+}
+
+
+static char* SkipWhiteSpace(char* p) {
+  while (*p != '\0' && isspace(*p) != 0) p++;
+  return p;
+}
+
+
+static char* SkipBlackSpace(char* p) {
+  while (*p != '\0' && isspace(*p) == 0) p++;
+  return p;
+}
+
+
+// static
+int FlagList::SetFlagsFromString(const char* str, int len) {
+  // make a 0-terminated copy of str
+  char* copy0 = NewArray<char>(len + 1);
+  memcpy(copy0, str, len);
+  copy0[len] = '\0';
+
+  // strip leading white space
+  char* copy = SkipWhiteSpace(copy0);
+
+  // count the number of 'arguments'
+  int argc = 1;  // be compatible with SetFlagsFromCommandLine()
+  for (char* p = copy; *p != '\0'; argc++) {
+    p = SkipBlackSpace(p);
+    p = SkipWhiteSpace(p);
+  }
+
+  // allocate argument array
+  char** argv = NewArray<char*>(argc);
+
+  // split the flags string into arguments
+  argc = 1;  // be compatible with SetFlagsFromCommandLine()
+  for (char* p = copy; *p != '\0'; argc++) {
+    argv[argc] = p;
+    p = SkipBlackSpace(p);
+    if (*p != '\0') *p++ = '\0';  // 0-terminate argument
+    p = SkipWhiteSpace(p);
+  }
+
+  // set the flags
+  int result = SetFlagsFromCommandLine(&argc, argv, false);
+
+  // cleanup
+  DeleteArray(argv);
+  DeleteArray(copy0);
+
+  return result;
+}
+
+
+// static
+void FlagList::ResetAllFlags() {
+  for (size_t i = 0; i < num_flags; ++i) {
+    flags[i].Reset();
+  }
+}
+
+
+// static
+void FlagList::PrintHelp() {
+  printf("Usage:\n");
+  printf("  shell [options] -e string\n");
+  printf("    execute string in V8\n");
+  printf("  shell [options] file1 file2 ... filek\n");
+  printf("    run JavaScript scripts in file1, file2, ..., filek\n");
+  printf("  shell [options]\n");
+  printf("  shell [options] --shell [file1 file2 ... filek]\n");
+  printf("    run an interactive JavaScript shell\n");
+  printf("  d8 [options] file1 file2 ... filek\n");
+  printf("  d8 [options]\n");
+  printf("  d8 [options] --shell [file1 file2 ... filek]\n");
+  printf("    run the new debugging shell\n\n");
+  printf("Options:\n");
+  for (size_t i = 0; i < num_flags; ++i) {
+    Flag* f = &flags[i];
+    SmartPointer<const char> value = ToString(f);
+    printf("  --%s (%s)\n        type: %s  default: %s\n",
+           f->name(), f->comment(), Type2String(f->type()), *value);
+  }
+}
+
+JSArguments::JSArguments()
+    : argc_(0), argv_(NULL) {}
+JSArguments::JSArguments(int argc, const char** argv)
+    : argc_(argc), argv_(argv) {}
+int JSArguments::argc() const { return argc_; }
+const char** JSArguments::argv() { return argv_; }
+const char*& JSArguments::operator[](int idx) { return argv_[idx]; }
+JSArguments& JSArguments::operator=(JSArguments args) {
+    argc_ = args.argc_;
+    argv_ = args.argv_;
+    return *this;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/flags.h b/src/flags.h
new file mode 100644
index 0000000..a8eca95
--- /dev/null
+++ b/src/flags.h
@@ -0,0 +1,81 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_FLAGS_H_
+#define V8_FLAGS_H_
+
+#include "checks.h"
+
+namespace v8 {
+namespace internal {
+
+// Declare all of our flags.
+#define FLAG_MODE_DECLARE
+#include "flag-definitions.h"
+
+// The global list of all flags.
+class FlagList {
+ public:
+  // The list of all flags with a value different from the default
+  // and their values. The format of the list is like the format of the
+  // argv array passed to the main function, e.g.
+  // ("--prof", "--log-file", "v8.prof", "--nolazy").
+  //
+  // The caller is responsible for disposing the list, as well
+  // as every element of it.
+  static List<const char*>* argv();
+
+  // Set the flag values by parsing the command line. If remove_flags is
+  // set, the flags and associated values are removed from (argc,
+  // argv). Returns 0 if no error occurred. Otherwise, returns the argv
+  // index > 0 for the argument where an error occurred. In that case,
+  // (argc, argv) will remain unchanged independent of the remove_flags
+  // value, and no assumptions about flag settings should be made.
+  //
+  // The following syntax for flags is accepted (both '-' and '--' are ok):
+  //
+  //   --flag        (bool flags only)
+  //   --noflag      (bool flags only)
+  //   --flag=value  (non-bool flags only, no spaces around '=')
+  //   --flag value  (non-bool flags only)
+  //   --            (equivalent to --js_arguments, captures all remaining args)
+  static int SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags);
+
+  // Set the flag values by parsing the string str. Splits string into argc
+  // substrings argv[], each of which consisting of non-white-space chars,
+  // and then calls SetFlagsFromCommandLine() and returns its result.
+  static int SetFlagsFromString(const char* str, int len);
+
+  // Reset all flags to their default value.
+  static void ResetAllFlags();
+
+  // Print help to stdout with flags, types, and default values.
+  static void PrintHelp();
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_FLAGS_H_
diff --git a/src/frame-element.cc b/src/frame-element.cc
new file mode 100644
index 0000000..e6bc2ea
--- /dev/null
+++ b/src/frame-element.cc
@@ -0,0 +1,45 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "frame-element.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// FrameElement implementation.
+
+
+FrameElement::ZoneObjectList* FrameElement::ConstantList() {
+  static ZoneObjectList list(10);
+  return &list;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/frame-element.h b/src/frame-element.h
new file mode 100644
index 0000000..ccdecf1
--- /dev/null
+++ b/src/frame-element.h
@@ -0,0 +1,235 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FRAME_ELEMENT_H_
+#define V8_FRAME_ELEMENT_H_
+
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frame elements
+//
+// The internal elements of the virtual frames.  There are several kinds of
+// elements:
+//   * Invalid: elements that are uninitialized or not actually part
+//     of the virtual frame.  They should not be read.
+//   * Memory: an element that resides in the actual frame.  Its address is
+//     given by its position in the virtual frame.
+//   * Register: an element that resides in a register.
+//   * Constant: an element whose value is known at compile time.
+
+class FrameElement BASE_EMBEDDED {
+ public:
+  enum SyncFlag {
+    NOT_SYNCED,
+    SYNCED
+  };
+
+  // The default constructor creates an invalid frame element.
+  FrameElement() {
+    value_ = TypeField::encode(INVALID)
+        | CopiedField::encode(false)
+        | SyncedField::encode(false)
+        | DataField::encode(0);
+  }
+
+  // Factory function to construct an invalid frame element.
+  static FrameElement InvalidElement() {
+    FrameElement result;
+    return result;
+  }
+
+  // Factory function to construct an in-memory frame element.
+  static FrameElement MemoryElement() {
+    FrameElement result(MEMORY, no_reg, SYNCED);
+    return result;
+  }
+
+  // Factory function to construct an in-register frame element.
+  static FrameElement RegisterElement(Register reg,
+                                      SyncFlag is_synced) {
+    return FrameElement(REGISTER, reg, is_synced);
+  }
+
+  // Factory function to construct a frame element whose value is known at
+  // compile time.
+  static FrameElement ConstantElement(Handle<Object> value,
+                                      SyncFlag is_synced) {
+    FrameElement result(value, is_synced);
+    return result;
+  }
+
+  // Static indirection table for handles to constants.  If a frame
+  // element represents a constant, the data contains an index into
+  // this table of handles to the actual constants.
+  typedef ZoneList<Handle<Object> > ZoneObjectList;
+
+  static ZoneObjectList* ConstantList();
+
+  // Clear the constants indirection table.
+  static void ClearConstantList() {
+    ConstantList()->Clear();
+  }
+
+  bool is_synced() const { return SyncedField::decode(value_); }
+
+  void set_sync() {
+    ASSERT(type() != MEMORY);
+    value_ = value_ | SyncedField::encode(true);
+  }
+
+  void clear_sync() {
+    ASSERT(type() != MEMORY);
+    value_ = value_ & ~SyncedField::mask();
+  }
+
+  bool is_valid() const { return type() != INVALID; }
+  bool is_memory() const { return type() == MEMORY; }
+  bool is_register() const { return type() == REGISTER; }
+  bool is_constant() const { return type() == CONSTANT; }
+  bool is_copy() const { return type() == COPY; }
+
+  bool is_copied() const { return CopiedField::decode(value_); }
+  void set_copied() { value_ = value_ | CopiedField::encode(true); }
+  void clear_copied() { value_ = value_ & ~CopiedField::mask(); }
+
+  Register reg() const {
+    ASSERT(is_register());
+    uint32_t reg = DataField::decode(value_);
+    Register result;
+    result.code_ = reg;
+    return result;
+  }
+
+  Handle<Object> handle() const {
+    ASSERT(is_constant());
+    return ConstantList()->at(DataField::decode(value_));
+  }
+
+  int index() const {
+    ASSERT(is_copy());
+    return DataField::decode(value_);
+  }
+
+  bool Equals(FrameElement other) {
+    uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask();
+    if (!masked_difference) {
+      // The elements are equal if they agree exactly except on copied field.
+      return true;
+    } else {
+      // If two constants have the same value, and agree otherwise, return true.
+       return !(masked_difference & ~DataField::mask()) &&
+              is_constant() &&
+              handle().is_identical_to(other.handle());
+    }
+  }
+
+  // Test if two FrameElements refer to the same memory or register location.
+  bool SameLocation(FrameElement* other) {
+    if (type() == other->type()) {
+      if (value_ == other->value_) return true;
+      if (is_constant() && handle().is_identical_to(other->handle())) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // Given a pair of non-null frame element pointers, return one of them
+  // as an entry frame candidate or null if they are incompatible.
+  FrameElement* Combine(FrameElement* other) {
+    // If either is invalid, the result is.
+    if (!is_valid()) return this;
+    if (!other->is_valid()) return other;
+
+    if (!SameLocation(other)) return NULL;
+    // If either is unsynced, the result is.
+    FrameElement* result = is_synced() ? other : this;
+    return result;
+  }
+
+ private:
+  enum Type {
+    INVALID,
+    MEMORY,
+    REGISTER,
+    CONSTANT,
+    COPY
+  };
+
+  // Used to construct memory and register elements.
+  FrameElement(Type type, Register reg, SyncFlag is_synced) {
+    value_ = TypeField::encode(type)
+        | CopiedField::encode(false)
+        | SyncedField::encode(is_synced != NOT_SYNCED)
+        | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
+  }
+
+  // Used to construct constant elements.
+  FrameElement(Handle<Object> value, SyncFlag is_synced) {
+    value_ = TypeField::encode(CONSTANT)
+        | CopiedField::encode(false)
+        | SyncedField::encode(is_synced != NOT_SYNCED)
+        | DataField::encode(ConstantList()->length());
+    ConstantList()->Add(value);
+  }
+
+  Type type() const { return TypeField::decode(value_); }
+  void set_type(Type type) {
+    value_ = value_ & ~TypeField::mask();
+    value_ = value_ | TypeField::encode(type);
+  }
+
+  void set_index(int new_index) {
+    ASSERT(is_copy());
+    value_ = value_ & ~DataField::mask();
+    value_ = value_ | DataField::encode(new_index);
+  }
+
+  void set_reg(Register new_reg) {
+    ASSERT(is_register());
+    value_ = value_ & ~DataField::mask();
+    value_ = value_ | DataField::encode(new_reg.code_);
+  }
+
+  // Encode type, copied, synced and data in one 32 bit integer.
+  uint32_t value_;
+
+  class TypeField: public BitField<Type, 0, 3> {};
+  class CopiedField: public BitField<uint32_t, 3, 1> {};
+  class SyncedField: public BitField<uint32_t, 4, 1> {};
+  class DataField: public BitField<uint32_t, 5, 32 - 6> {};
+
+  friend class VirtualFrame;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_FRAME_ELEMENT_H_
diff --git a/src/frames-inl.h b/src/frames-inl.h
new file mode 100644
index 0000000..c5f2f1a
--- /dev/null
+++ b/src/frames-inl.h
@@ -0,0 +1,215 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FRAMES_INL_H_
+#define V8_FRAMES_INL_H_
+
+#include "frames.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/frames-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/frames-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/frames-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+inline Address StackHandler::address() const {
+  return reinterpret_cast<Address>(const_cast<StackHandler*>(this));
+}
+
+
+inline StackHandler* StackHandler::next() const {
+  const int offset = StackHandlerConstants::kNextOffset;
+  return FromAddress(Memory::Address_at(address() + offset));
+}
+
+
+inline bool StackHandler::includes(Address address) const {
+  Address start = this->address();
+  Address end = start + StackHandlerConstants::kSize;
+  return start <= address && address <= end;
+}
+
+
+inline void StackHandler::Iterate(ObjectVisitor* v) const {
+  // Stack handlers do not contain any pointers that need to be
+  // traversed.
+}
+
+
+inline StackHandler* StackHandler::FromAddress(Address address) {
+  return reinterpret_cast<StackHandler*>(address);
+}
+
+
+inline StackHandler::State StackHandler::state() const {
+  const int offset = StackHandlerConstants::kStateOffset;
+  return static_cast<State>(Memory::int_at(address() + offset));
+}
+
+
+inline Address StackHandler::pc() const {
+  const int offset = StackHandlerConstants::kPCOffset;
+  return Memory::Address_at(address() + offset);
+}
+
+
+inline void StackHandler::set_pc(Address value) {
+  const int offset = StackHandlerConstants::kPCOffset;
+  Memory::Address_at(address() + offset) = value;
+}
+
+
+inline StackHandler* StackFrame::top_handler() const {
+  return iterator_->handler();
+}
+
+
+inline Object* StandardFrame::GetExpression(int index) const {
+  return Memory::Object_at(GetExpressionAddress(index));
+}
+
+
+inline void StandardFrame::SetExpression(int index, Object* value) {
+  Memory::Object_at(GetExpressionAddress(index)) = value;
+}
+
+
+inline Object* StandardFrame::context() const {
+  const int offset = StandardFrameConstants::kContextOffset;
+  return Memory::Object_at(fp() + offset);
+}
+
+
+inline Address StandardFrame::caller_fp() const {
+  return Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset);
+}
+
+
+inline Address StandardFrame::caller_pc() const {
+  return Memory::Address_at(ComputePCAddress(fp()));
+}
+
+
+inline Address StandardFrame::ComputePCAddress(Address fp) {
+  return fp + StandardFrameConstants::kCallerPCOffset;
+}
+
+
+inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
+  Object* marker =
+      Memory::Object_at(fp + StandardFrameConstants::kContextOffset);
+  return marker == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
+}
+
+
+inline bool StandardFrame::IsConstructFrame(Address fp) {
+  Object* marker =
+      Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset);
+  return marker == Smi::FromInt(CONSTRUCT);
+}
+
+
+inline Object* JavaScriptFrame::receiver() const {
+  const int offset = JavaScriptFrameConstants::kReceiverOffset;
+  return Memory::Object_at(caller_sp() + offset);
+}
+
+
+inline void JavaScriptFrame::set_receiver(Object* value) {
+  const int offset = JavaScriptFrameConstants::kReceiverOffset;
+  Memory::Object_at(caller_sp() + offset) = value;
+}
+
+
+inline bool JavaScriptFrame::has_adapted_arguments() const {
+  return IsArgumentsAdaptorFrame(caller_fp());
+}
+
+
+inline Object* JavaScriptFrame::function() const {
+  Object* result = function_slot_object();
+  ASSERT(result->IsJSFunction());
+  return result;
+}
+
+
+template<typename Iterator>
+inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
+  // TODO(1233797): The frame hierarchy needs to change. It's
+  // problematic that we can't use the safe-cast operator to cast to
+  // the JavaScript frame type, because we may encounter arguments
+  // adaptor frames.
+  StackFrame* frame = iterator_.frame();
+  ASSERT(frame->is_java_script() || frame->is_arguments_adaptor());
+  return static_cast<JavaScriptFrame*>(frame);
+}
+
+
+template<typename Iterator>
+JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+    StackFrame::Id id) {
+  while (!done()) {
+    Advance();
+    if (frame()->id() == id) return;
+  }
+}
+
+
+template<typename Iterator>
+void JavaScriptFrameIteratorTemp<Iterator>::Advance() {
+  do {
+    iterator_.Advance();
+  } while (!iterator_.done() && !iterator_.frame()->is_java_script());
+}
+
+
+template<typename Iterator>
+void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToArgumentsFrame() {
+  if (!frame()->has_adapted_arguments()) return;
+  iterator_.Advance();
+  ASSERT(iterator_.frame()->is_arguments_adaptor());
+}
+
+
+template<typename Iterator>
+void JavaScriptFrameIteratorTemp<Iterator>::Reset() {
+  iterator_.Reset();
+  if (!done()) Advance();
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_FRAMES_INL_H_
diff --git a/src/frames.cc b/src/frames.cc
new file mode 100644
index 0000000..5cd8332
--- /dev/null
+++ b/src/frames.cc
@@ -0,0 +1,743 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "frames-inl.h"
+#include "mark-compact.h"
+#include "scopeinfo.h"
+#include "string-stream.h"
+#include "top.h"
+#include "zone-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Iterator that supports traversing the stack handlers of a
+// particular frame. Needs to know the top of the handler chain.
+class StackHandlerIterator BASE_EMBEDDED {
+ public:
+  StackHandlerIterator(const StackFrame* frame, StackHandler* handler)
+      : limit_(frame->fp()), handler_(handler) {
+    // Make sure the handler has already been unwound to this frame.
+    ASSERT(frame->sp() <= handler->address());
+  }
+
+  StackHandler* handler() const { return handler_; }
+
+  bool done() {
+    return handler_ == NULL || handler_->address() > limit_;
+  }
+  void Advance() {
+    ASSERT(!done());
+    handler_ = handler_->next();
+  }
+
+ private:
+  const Address limit_;
+  StackHandler* handler_;
+};
+
+
+// -------------------------------------------------------------------------
+
+
+#define INITIALIZE_SINGLETON(type, field) field##_(this),
+StackFrameIterator::StackFrameIterator()
+    : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+      frame_(NULL), handler_(NULL), thread_(Top::GetCurrentThread()),
+      fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
+  Reset();
+}
+StackFrameIterator::StackFrameIterator(ThreadLocalTop* t)
+    : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+      frame_(NULL), handler_(NULL), thread_(t),
+      fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
+  Reset();
+}
+StackFrameIterator::StackFrameIterator(bool use_top, Address fp, Address sp)
+    : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+      frame_(NULL), handler_(NULL),
+      thread_(use_top ? Top::GetCurrentThread() : NULL),
+      fp_(use_top ? NULL : fp), sp_(sp),
+      advance_(use_top ? &StackFrameIterator::AdvanceWithHandler :
+               &StackFrameIterator::AdvanceWithoutHandler) {
+  if (use_top || fp != NULL) {
+    Reset();
+  }
+  JavaScriptFrame_.DisableHeapAccess();
+}
+
+#undef INITIALIZE_SINGLETON
+
+
+void StackFrameIterator::AdvanceWithHandler() {
+  ASSERT(!done());
+  // Compute the state of the calling frame before restoring
+  // callee-saved registers and unwinding handlers. This allows the
+  // frame code that computes the caller state to access the top
+  // handler and the value of any callee-saved register if needed.
+  StackFrame::State state;
+  StackFrame::Type type = frame_->GetCallerState(&state);
+
+  // Unwind handlers corresponding to the current frame.
+  StackHandlerIterator it(frame_, handler_);
+  while (!it.done()) it.Advance();
+  handler_ = it.handler();
+
+  // Advance to the calling frame.
+  frame_ = SingletonFor(type, &state);
+
+  // When we're done iterating over the stack frames, the handler
+  // chain must have been completely unwound.
+  ASSERT(!done() || handler_ == NULL);
+}
+
+
+void StackFrameIterator::AdvanceWithoutHandler() {
+  // A simpler version of Advance which doesn't care about handler.
+  ASSERT(!done());
+  StackFrame::State state;
+  StackFrame::Type type = frame_->GetCallerState(&state);
+  frame_ = SingletonFor(type, &state);
+}
+
+
+void StackFrameIterator::Reset() {
+  StackFrame::State state;
+  StackFrame::Type type;
+  if (thread_ != NULL) {
+    type = ExitFrame::GetStateForFramePointer(Top::c_entry_fp(thread_), &state);
+    handler_ = StackHandler::FromAddress(Top::handler(thread_));
+  } else {
+    ASSERT(fp_ != NULL);
+    state.fp = fp_;
+    state.sp = sp_;
+    state.pc_address =
+        reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_));
+    type = StackFrame::ComputeType(&state);
+    if (SingletonFor(type) == NULL) return;
+  }
+  frame_ = SingletonFor(type, &state);
+}
+
+
+StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type,
+                                             StackFrame::State* state) {
+  if (type == StackFrame::NONE) return NULL;
+  StackFrame* result = SingletonFor(type);
+  ASSERT(result != NULL);
+  result->state_ = *state;
+  return result;
+}
+
+
+StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) {
+#define FRAME_TYPE_CASE(type, field) \
+  case StackFrame::type: result = &field##_; break;
+
+  StackFrame* result = NULL;
+  switch (type) {
+    case StackFrame::NONE: return NULL;
+    STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
+    default: break;
+  }
+  return result;
+
+#undef FRAME_TYPE_CASE
+}
+
+
+// -------------------------------------------------------------------------
+
+
+StackTraceFrameIterator::StackTraceFrameIterator() {
+  if (!done() && !frame()->function()->IsJSFunction()) Advance();
+}
+
+
+void StackTraceFrameIterator::Advance() {
+  while (true) {
+    JavaScriptFrameIterator::Advance();
+    if (done()) return;
+    if (frame()->function()->IsJSFunction()) return;
+  }
+}
+
+
+// -------------------------------------------------------------------------
+
+
+SafeStackFrameIterator::SafeStackFrameIterator(
+    Address fp, Address sp, Address low_bound, Address high_bound) :
+    low_bound_(low_bound), high_bound_(high_bound),
+    is_valid_top_(
+        IsWithinBounds(low_bound, high_bound,
+                       Top::c_entry_fp(Top::GetCurrentThread())) &&
+        Top::handler(Top::GetCurrentThread()) != NULL),
+    is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
+    is_working_iterator_(is_valid_top_ || is_valid_fp_),
+    iteration_done_(!is_working_iterator_),
+    iterator_(is_valid_top_, is_valid_fp_ ? fp : NULL, sp) {
+}
+
+
+void SafeStackFrameIterator::Advance() {
+  ASSERT(is_working_iterator_);
+  ASSERT(!done());
+  StackFrame* last_frame = iterator_.frame();
+  Address last_sp = last_frame->sp(), last_fp = last_frame->fp();
+  // Before advancing to the next stack frame, perform pointer validity tests
+  iteration_done_ = !IsValidFrame(last_frame) ||
+      !CanIterateHandles(last_frame, iterator_.handler()) ||
+      !IsValidCaller(last_frame);
+  if (iteration_done_) return;
+
+  iterator_.Advance();
+  if (iterator_.done()) return;
+  // Check that we have actually moved to the previous frame in the stack
+  StackFrame* prev_frame = iterator_.frame();
+  iteration_done_ = prev_frame->sp() < last_sp || prev_frame->fp() < last_fp;
+}
+
+
+bool SafeStackFrameIterator::CanIterateHandles(StackFrame* frame,
+                                               StackHandler* handler) {
+  // If StackIterator iterates over StackHandles, verify that
+  // StackHandlerIterator can be instantiated (see StackHandlerIterator
+  // constructor.)
+  return !is_valid_top_ || (frame->sp() <= handler->address());
+}
+
+
+bool SafeStackFrameIterator::IsValidFrame(StackFrame* frame) const {
+  return IsValidStackAddress(frame->sp()) && IsValidStackAddress(frame->fp());
+}
+
+
+bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
+  StackFrame::State state;
+  if (frame->is_entry() || frame->is_entry_construct()) {
+    // See EntryFrame::GetCallerState. It computes the caller FP address
+    // and calls ExitFrame::GetStateForFramePointer on it. We need to be
+    // sure that caller FP address is valid.
+    Address caller_fp = Memory::Address_at(
+        frame->fp() + EntryFrameConstants::kCallerFPOffset);
+    if (!IsValidStackAddress(caller_fp)) {
+      return false;
+    }
+  } else if (frame->is_arguments_adaptor()) {
+    // See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
+    // the number of arguments is stored on stack as Smi. We need to check
+    // that it really an Smi.
+    Object* number_of_args = reinterpret_cast<ArgumentsAdaptorFrame*>(frame)->
+        GetExpression(0);
+    if (!number_of_args->IsSmi()) {
+      return false;
+    }
+  }
+  frame->ComputeCallerState(&state);
+  return IsValidStackAddress(state.sp) && IsValidStackAddress(state.fp) &&
+      iterator_.SingletonFor(frame->GetCallerState(&state)) != NULL;
+}
+
+
+void SafeStackFrameIterator::Reset() {
+  if (is_working_iterator_) {
+    iterator_.Reset();
+    iteration_done_ = false;
+  }
+}
+
+
+// -------------------------------------------------------------------------
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
+    Address fp, Address sp, Address low_bound, Address high_bound) :
+    SafeJavaScriptFrameIterator(fp, sp, low_bound, high_bound) {
+  if (!done() && !frame()->is_java_script()) Advance();
+}
+
+
+void SafeStackTraceFrameIterator::Advance() {
+  while (true) {
+    SafeJavaScriptFrameIterator::Advance();
+    if (done()) return;
+    if (frame()->is_java_script()) return;
+  }
+}
+#endif
+
+
+// -------------------------------------------------------------------------
+
+
+void StackHandler::Cook(Code* code) {
+  ASSERT(MarkCompactCollector::IsCompacting());
+  ASSERT(code->contains(pc()));
+  set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
+}
+
+
+void StackHandler::Uncook(Code* code) {
+  ASSERT(MarkCompactCollector::IsCompacting());
+  set_pc(code->instruction_start() + OffsetFrom(pc()));
+  ASSERT(code->contains(pc()));
+}
+
+
+// -------------------------------------------------------------------------
+
+
+bool StackFrame::HasHandler() const {
+  StackHandlerIterator it(this, top_handler());
+  return !it.done();
+}
+
+
+void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
+  // Only cooking frames when the collector is compacting and thus moving code
+  // around.
+  ASSERT(MarkCompactCollector::IsCompacting());
+  ASSERT(!thread->stack_is_cooked());
+  for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
+    it.frame()->Cook();
+  }
+  thread->set_stack_is_cooked(true);
+}
+
+
+void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
+  // Only uncooking frames when the collector is compacting and thus moving code
+  // around.
+  ASSERT(MarkCompactCollector::IsCompacting());
+  ASSERT(thread->stack_is_cooked());
+  for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
+    it.frame()->Uncook();
+  }
+  thread->set_stack_is_cooked(false);
+}
+
+
+void StackFrame::Cook() {
+  Code* code = this->code();
+  for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
+    it.handler()->Cook(code);
+  }
+  ASSERT(code->contains(pc()));
+  set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
+}
+
+
+void StackFrame::Uncook() {
+  Code* code = this->code();
+  for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
+    it.handler()->Uncook(code);
+  }
+  set_pc(code->instruction_start() + OffsetFrom(pc()));
+  ASSERT(code->contains(pc()));
+}
+
+
+StackFrame::Type StackFrame::GetCallerState(State* state) const {
+  ComputeCallerState(state);
+  return ComputeType(state);
+}
+
+
+Code* EntryFrame::code() const {
+  return Heap::js_entry_code();
+}
+
+
+void EntryFrame::ComputeCallerState(State* state) const {
+  GetCallerState(state);
+}
+
+
+StackFrame::Type EntryFrame::GetCallerState(State* state) const {
+  const int offset = EntryFrameConstants::kCallerFPOffset;
+  Address fp = Memory::Address_at(this->fp() + offset);
+  return ExitFrame::GetStateForFramePointer(fp, state);
+}
+
+
+Code* EntryConstructFrame::code() const {
+  return Heap::js_construct_entry_code();
+}
+
+
+Code* ExitFrame::code() const {
+  return Heap::c_entry_code();
+}
+
+
+void ExitFrame::ComputeCallerState(State* state) const {
+  // Setup the caller state.
+  state->sp = caller_sp();
+  state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
+  state->pc_address
+      = reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset);
+}
+
+
+Address ExitFrame::GetCallerStackPointer() const {
+  return fp() + ExitFrameConstants::kCallerSPDisplacement;
+}
+
+
+Code* ExitDebugFrame::code() const {
+  return Heap::c_entry_debug_break_code();
+}
+
+
+Address StandardFrame::GetExpressionAddress(int n) const {
+  const int offset = StandardFrameConstants::kExpressionsOffset;
+  return fp() + offset - n * kPointerSize;
+}
+
+
+int StandardFrame::ComputeExpressionsCount() const {
+  const int offset =
+      StandardFrameConstants::kExpressionsOffset + kPointerSize;
+  Address base = fp() + offset;
+  Address limit = sp();
+  ASSERT(base >= limit);  // stack grows downwards
+  // Include register-allocated locals in number of expressions.
+  return (base - limit) / kPointerSize;
+}
+
+
+void StandardFrame::ComputeCallerState(State* state) const {
+  state->sp = caller_sp();
+  state->fp = caller_fp();
+  state->pc_address = reinterpret_cast<Address*>(ComputePCAddress(fp()));
+}
+
+
+bool StandardFrame::IsExpressionInsideHandler(int n) const {
+  Address address = GetExpressionAddress(n);
+  for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
+    if (it.handler()->includes(address)) return true;
+  }
+  return false;
+}
+
+
+Object* JavaScriptFrame::GetParameter(int index) const {
+  ASSERT(index >= 0 && index < ComputeParametersCount());
+  const int offset = JavaScriptFrameConstants::kParam0Offset;
+  return Memory::Object_at(caller_sp() + offset - (index * kPointerSize));
+}
+
+
+int JavaScriptFrame::ComputeParametersCount() const {
+  Address base  = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
+  Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
+  return (base - limit) / kPointerSize;
+}
+
+
+bool JavaScriptFrame::IsConstructor() const {
+  Address fp = caller_fp();
+  if (has_adapted_arguments()) {
+    // Skip the arguments adaptor frame and look at the real caller.
+    fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
+  }
+  return IsConstructFrame(fp);
+}
+
+
+Code* JavaScriptFrame::code() const {
+  JSFunction* function = JSFunction::cast(this->function());
+  return function->shared()->code();
+}
+
+
+Code* ArgumentsAdaptorFrame::code() const {
+  return Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline);
+}
+
+
+Code* InternalFrame::code() const {
+  const int offset = InternalFrameConstants::kCodeOffset;
+  Object* code = Memory::Object_at(fp() + offset);
+  ASSERT(code != NULL);
+  return Code::cast(code);
+}
+
+
+void StackFrame::PrintIndex(StringStream* accumulator,
+                            PrintMode mode,
+                            int index) {
+  accumulator->Add((mode == OVERVIEW) ? "%5d: " : "[%d]: ", index);
+}
+
+
+void JavaScriptFrame::Print(StringStream* accumulator,
+                            PrintMode mode,
+                            int index) const {
+  HandleScope scope;
+  Object* receiver = this->receiver();
+  Object* function = this->function();
+
+  accumulator->PrintSecurityTokenIfChanged(function);
+  PrintIndex(accumulator, mode, index);
+  Code* code = NULL;
+  if (IsConstructor()) accumulator->Add("new ");
+  accumulator->PrintFunction(function, receiver, &code);
+  accumulator->Add("(this=%o", receiver);
+
+  // Get scope information for nicer output, if possible. If code is
+  // NULL, or doesn't contain scope info, info will return 0 for the
+  // number of parameters, stack slots, or context slots.
+  ScopeInfo<PreallocatedStorage> info(code);
+
+  // Print the parameters.
+  int parameters_count = ComputeParametersCount();
+  for (int i = 0; i < parameters_count; i++) {
+    accumulator->Add(",");
+    // If we have a name for the parameter we print it. Nameless
+    // parameters are either because we have more actual parameters
+    // than formal parameters or because we have no scope information.
+    if (i < info.number_of_parameters()) {
+      accumulator->PrintName(*info.parameter_name(i));
+      accumulator->Add("=");
+    }
+    accumulator->Add("%o", GetParameter(i));
+  }
+
+  accumulator->Add(")");
+  if (mode == OVERVIEW) {
+    accumulator->Add("\n");
+    return;
+  }
+  accumulator->Add(" {\n");
+
+  // Compute the number of locals and expression stack elements.
+  int stack_locals_count = info.number_of_stack_slots();
+  int heap_locals_count = info.number_of_context_slots();
+  int expressions_count = ComputeExpressionsCount();
+
+  // Print stack-allocated local variables.
+  if (stack_locals_count > 0) {
+    accumulator->Add("  // stack-allocated locals\n");
+  }
+  for (int i = 0; i < stack_locals_count; i++) {
+    accumulator->Add("  var ");
+    accumulator->PrintName(*info.stack_slot_name(i));
+    accumulator->Add(" = ");
+    if (i < expressions_count) {
+      accumulator->Add("%o", GetExpression(i));
+    } else {
+      accumulator->Add("// no expression found - inconsistent frame?");
+    }
+    accumulator->Add("\n");
+  }
+
+  // Try to get hold of the context of this frame.
+  Context* context = NULL;
+  if (this->context() != NULL && this->context()->IsContext()) {
+    context = Context::cast(this->context());
+  }
+
+  // Print heap-allocated local variables.
+  if (heap_locals_count > Context::MIN_CONTEXT_SLOTS) {
+    accumulator->Add("  // heap-allocated locals\n");
+  }
+  for (int i = Context::MIN_CONTEXT_SLOTS; i < heap_locals_count; i++) {
+    accumulator->Add("  var ");
+    accumulator->PrintName(*info.context_slot_name(i));
+    accumulator->Add(" = ");
+    if (context != NULL) {
+      if (i < context->length()) {
+        accumulator->Add("%o", context->get(i));
+      } else {
+        accumulator->Add(
+            "// warning: missing context slot - inconsistent frame?");
+      }
+    } else {
+      accumulator->Add("// warning: no context found - inconsistent frame?");
+    }
+    accumulator->Add("\n");
+  }
+
+  // Print the expression stack.
+  int expressions_start = stack_locals_count;
+  if (expressions_start < expressions_count) {
+    accumulator->Add("  // expression stack (top to bottom)\n");
+  }
+  for (int i = expressions_count - 1; i >= expressions_start; i--) {
+    if (IsExpressionInsideHandler(i)) continue;
+    accumulator->Add("  [%02d] : %o\n", i, GetExpression(i));
+  }
+
+  // Print details about the function.
+  if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
+    SharedFunctionInfo* shared = JSFunction::cast(function)->shared();
+    accumulator->Add("--------- s o u r c e   c o d e ---------\n");
+    shared->SourceCodePrint(accumulator, FLAG_max_stack_trace_source_length);
+    accumulator->Add("\n-----------------------------------------\n");
+  }
+
+  accumulator->Add("}\n\n");
+}
+
+
+void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
+                                  PrintMode mode,
+                                  int index) const {
+  int actual = ComputeParametersCount();
+  int expected = -1;
+  Object* function = this->function();
+  if (function->IsJSFunction()) {
+    expected = JSFunction::cast(function)->shared()->formal_parameter_count();
+  }
+
+  PrintIndex(accumulator, mode, index);
+  accumulator->Add("arguments adaptor frame: %d->%d", actual, expected);
+  if (mode == OVERVIEW) {
+    accumulator->Add("\n");
+    return;
+  }
+  accumulator->Add(" {\n");
+
+  // Print actual arguments.
+  if (actual > 0) accumulator->Add("  // actual arguments\n");
+  for (int i = 0; i < actual; i++) {
+    accumulator->Add("  [%02d] : %o", i, GetParameter(i));
+    if (expected != -1 && i >= expected) {
+      accumulator->Add("  // not passed to callee");
+    }
+    accumulator->Add("\n");
+  }
+
+  accumulator->Add("}\n\n");
+}
+
+
+void EntryFrame::Iterate(ObjectVisitor* v) const {
+  StackHandlerIterator it(this, top_handler());
+  ASSERT(!it.done());
+  StackHandler* handler = it.handler();
+  ASSERT(handler->is_entry());
+  handler->Iterate(v);
+  // Make sure that there's the entry frame does not contain more than
+  // one stack handler.
+#ifdef DEBUG
+  it.Advance();
+  ASSERT(it.done());
+#endif
+}
+
+
+void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
+  const int offset = StandardFrameConstants::kContextOffset;
+  Object** base = &Memory::Object_at(sp());
+  Object** limit = &Memory::Object_at(fp() + offset) + 1;
+  for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
+    StackHandler* handler = it.handler();
+    // Traverse pointers down to - but not including - the next
+    // handler in the handler chain. Update the base to skip the
+    // handler and allow the handler to traverse its own pointers.
+    const Address address = handler->address();
+    v->VisitPointers(base, reinterpret_cast<Object**>(address));
+    base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
+    // Traverse the pointers in the handler itself.
+    handler->Iterate(v);
+  }
+  v->VisitPointers(base, limit);
+}
+
+
+void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
+  IterateExpressions(v);
+
+  // Traverse callee-saved registers, receiver, and parameters.
+  const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
+  const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
+  Object** base = &Memory::Object_at(fp() + kBaseOffset);
+  Object** limit = &Memory::Object_at(caller_sp() + kLimitOffset) + 1;
+  v->VisitPointers(base, limit);
+}
+
+
+void InternalFrame::Iterate(ObjectVisitor* v) const {
+  // Internal frames only have object pointers on the expression stack
+  // as they never have any arguments.
+  IterateExpressions(v);
+}
+
+
+// -------------------------------------------------------------------------
+
+
+JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
+  ASSERT(n >= 0);
+  for (int i = 0; i <= n; i++) {
+    while (!iterator_.frame()->is_java_script()) iterator_.Advance();
+    if (i == n) return JavaScriptFrame::cast(iterator_.frame());
+    iterator_.Advance();
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+// -------------------------------------------------------------------------
+
+
+int NumRegs(RegList reglist) {
+  int n = 0;
+  while (reglist != 0) {
+    n++;
+    reglist &= reglist - 1;  // clear one bit
+  }
+  return n;
+}
+
+
+int JSCallerSavedCode(int n) {
+  static int reg_code[kNumJSCallerSaved];
+  static bool initialized = false;
+  if (!initialized) {
+    initialized = true;
+    int i = 0;
+    for (int r = 0; r < kNumRegs; r++)
+      if ((kJSCallerSaved & (1 << r)) != 0)
+        reg_code[i++] = r;
+
+    ASSERT(i == kNumJSCallerSaved);
+  }
+  ASSERT(0 <= n && n < kNumJSCallerSaved);
+  return reg_code[n];
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/frames.h b/src/frames.h
new file mode 100644
index 0000000..768196d
--- /dev/null
+++ b/src/frames.h
@@ -0,0 +1,678 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FRAMES_H_
+#define V8_FRAMES_H_
+
+namespace v8 {
+namespace internal {
+
+typedef uint32_t RegList;
+
+// Get the number of registers in a given register list.
+int NumRegs(RegList list);
+
+// Return the code of the n-th saved register available to JavaScript.
+int JSCallerSavedCode(int n);
+
+
+// Forward declarations.
+class StackFrameIterator;
+class Top;
+class ThreadLocalTop;
+
+
+class StackHandler BASE_EMBEDDED {
+ public:
+  enum State {
+    ENTRY,
+    TRY_CATCH,
+    TRY_FINALLY
+  };
+
+  // Get the address of this stack handler.
+  inline Address address() const;
+
+  // Get the next stack handler in the chain.
+  inline StackHandler* next() const;
+
+  // Tells whether the given address is inside this handler.
+  inline bool includes(Address address) const;
+
+  // Garbage collection support.
+  inline void Iterate(ObjectVisitor* v) const;
+
+  // Conversion support.
+  static inline StackHandler* FromAddress(Address address);
+
+  // Testers
+  bool is_entry() { return state() == ENTRY; }
+  bool is_try_catch() { return state() == TRY_CATCH; }
+  bool is_try_finally() { return state() == TRY_FINALLY; }
+
+  // Garbage collection support.
+  void Cook(Code* code);
+  void Uncook(Code* code);
+
+ private:
+  // Accessors.
+  inline State state() const;
+
+  inline Address pc() const;
+  inline void set_pc(Address value);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
+};
+
+
+#define STACK_FRAME_TYPE_LIST(V)              \
+  V(ENTRY,             EntryFrame)            \
+  V(ENTRY_CONSTRUCT,   EntryConstructFrame)   \
+  V(EXIT,              ExitFrame)             \
+  V(EXIT_DEBUG,        ExitDebugFrame)        \
+  V(JAVA_SCRIPT,       JavaScriptFrame)       \
+  V(INTERNAL,          InternalFrame)         \
+  V(CONSTRUCT,         ConstructFrame)        \
+  V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
+
+
+// Abstract base class for all stack frames.
+class StackFrame BASE_EMBEDDED {
+ public:
+#define DECLARE_TYPE(type, ignore) type,
+  enum Type {
+    NONE = 0,
+    STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
+    NUMBER_OF_TYPES
+  };
+#undef DECLARE_TYPE
+
+  // Opaque data type for identifying stack frames. Used extensively
+  // by the debugger.
+  enum Id { NO_ID = 0 };
+
+  // Type testers.
+  bool is_entry() const { return type() == ENTRY; }
+  bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
+  bool is_exit() const { return type() == EXIT; }
+  bool is_exit_debug() const { return type() == EXIT_DEBUG; }
+  bool is_java_script() const { return type() == JAVA_SCRIPT; }
+  bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
+  bool is_internal() const { return type() == INTERNAL; }
+  bool is_construct() const { return type() == CONSTRUCT; }
+  virtual bool is_standard() const { return false; }
+
+  // Accessors.
+  Address sp() const { return state_.sp; }
+  Address fp() const { return state_.fp; }
+  Address caller_sp() const { return GetCallerStackPointer(); }
+
+  Address pc() const { return *pc_address(); }
+  void set_pc(Address pc) { *pc_address() = pc; }
+
+  Address* pc_address() const { return state_.pc_address; }
+
+  // Get the id of this stack frame.
+  Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); }
+
+  // Checks if this frame includes any stack handlers.
+  bool HasHandler() const;
+
+  // Get the type of this frame.
+  virtual Type type() const = 0;
+
+  // Get the code associated with this frame.
+  virtual Code* code() const = 0;
+
+  // Garbage collection support.
+  static void CookFramesForThread(ThreadLocalTop* thread);
+  static void UncookFramesForThread(ThreadLocalTop* thread);
+
+  virtual void Iterate(ObjectVisitor* v) const { }
+
+  // Printing support.
+  enum PrintMode { OVERVIEW, DETAILS };
+  virtual void Print(StringStream* accumulator,
+                     PrintMode mode,
+                     int index) const { }
+
+ protected:
+  struct State {
+    Address sp;
+    Address fp;
+    Address* pc_address;
+  };
+
+  explicit StackFrame(StackFrameIterator* iterator) : iterator_(iterator) { }
+  virtual ~StackFrame() { }
+
+  // Compute the stack pointer for the calling frame.
+  virtual Address GetCallerStackPointer() const = 0;
+
+  // Printing support.
+  static void PrintIndex(StringStream* accumulator,
+                         PrintMode mode,
+                         int index);
+
+  // Get the top handler from the current stack iterator.
+  inline StackHandler* top_handler() const;
+
+  // Compute the stack frame type for the given state.
+  static Type ComputeType(State* state);
+
+ private:
+  const StackFrameIterator* iterator_;
+  State state_;
+
+  // Fill in the state of the calling frame.
+  virtual void ComputeCallerState(State* state) const = 0;
+
+  // Get the type and the state of the calling frame.
+  virtual Type GetCallerState(State* state) const;
+
+  // Cooking/uncooking support.
+  void Cook();
+  void Uncook();
+
+  friend class StackFrameIterator;
+  friend class StackHandlerIterator;
+  friend class SafeStackFrameIterator;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StackFrame);
+};
+
+
+// Entry frames are used to enter JavaScript execution from C.
+class EntryFrame: public StackFrame {
+ public:
+  virtual Type type() const { return ENTRY; }
+
+  virtual Code* code() const;
+
+  // Garbage collection support.
+  virtual void Iterate(ObjectVisitor* v) const;
+
+  static EntryFrame* cast(StackFrame* frame) {
+    ASSERT(frame->is_entry());
+    return static_cast<EntryFrame*>(frame);
+  }
+
+ protected:
+  explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+
+  // The caller stack pointer for entry frames is always zero. The
+  // real information about the caller frame is available through the
+  // link to the top exit frame.
+  virtual Address GetCallerStackPointer() const { return 0; }
+
+ private:
+  virtual void ComputeCallerState(State* state) const;
+  virtual Type GetCallerState(State* state) const;
+
+  friend class StackFrameIterator;
+};
+
+
+class EntryConstructFrame: public EntryFrame {
+ public:
+  virtual Type type() const { return ENTRY_CONSTRUCT; }
+
+  virtual Code* code() const;
+
+  static EntryConstructFrame* cast(StackFrame* frame) {
+    ASSERT(frame->is_entry_construct());
+    return static_cast<EntryConstructFrame*>(frame);
+  }
+
+ protected:
+  explicit EntryConstructFrame(StackFrameIterator* iterator)
+      : EntryFrame(iterator) { }
+
+ private:
+  friend class StackFrameIterator;
+};
+
+
+// Exit frames are used to exit JavaScript execution and go to C.
+class ExitFrame: public StackFrame {
+ public:
+  virtual Type type() const { return EXIT; }
+
+  virtual Code* code() const;
+
+  // Garbage collection support.
+  virtual void Iterate(ObjectVisitor* v) const;
+
+  static ExitFrame* cast(StackFrame* frame) {
+    ASSERT(frame->is_exit());
+    return static_cast<ExitFrame*>(frame);
+  }
+
+  // Compute the state and type of an exit frame given a frame
+  // pointer. Used when constructing the first stack frame seen by an
+  // iterator and the frames following entry frames.
+  static Type GetStateForFramePointer(Address fp, State* state);
+
+ protected:
+  explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+
+  virtual Address GetCallerStackPointer() const;
+
+ private:
+  virtual void ComputeCallerState(State* state) const;
+
+  friend class StackFrameIterator;
+};
+
+
+class ExitDebugFrame: public ExitFrame {
+ public:
+  virtual Type type() const { return EXIT_DEBUG; }
+
+  virtual Code* code() const;
+
+  static ExitDebugFrame* cast(StackFrame* frame) {
+    ASSERT(frame->is_exit_debug());
+    return static_cast<ExitDebugFrame*>(frame);
+  }
+
+ protected:
+  explicit ExitDebugFrame(StackFrameIterator* iterator)
+      : ExitFrame(iterator) { }
+
+ private:
+  friend class StackFrameIterator;
+};
+
+
+class StandardFrame: public StackFrame {
+ public:
+  // Testers.
+  virtual bool is_standard() const { return true; }
+
+  // Accessors.
+  inline Object* context() const;
+
+  // Access the expressions in the stack frame including locals.
+  inline Object* GetExpression(int index) const;
+  inline void SetExpression(int index, Object* value);
+  int ComputeExpressionsCount() const;
+
+  static StandardFrame* cast(StackFrame* frame) {
+    ASSERT(frame->is_standard());
+    return static_cast<StandardFrame*>(frame);
+  }
+
+ protected:
+  explicit StandardFrame(StackFrameIterator* iterator)
+      : StackFrame(iterator) { }
+
+  virtual void ComputeCallerState(State* state) const;
+
+  // Accessors.
+  inline Address caller_fp() const;
+  inline Address caller_pc() const;
+
+  // Computes the address of the PC field in the standard frame given
+  // by the provided frame pointer.
+  static inline Address ComputePCAddress(Address fp);
+
+  // Iterate over expression stack including stack handlers, locals,
+  // and parts of the fixed part including context and code fields.
+  void IterateExpressions(ObjectVisitor* v) const;
+
+  // Returns the address of the n'th expression stack element.
+  Address GetExpressionAddress(int n) const;
+
+  // Determines if the n'th expression stack element is in a stack
+  // handler or not. Requires traversing all handlers in this frame.
+  bool IsExpressionInsideHandler(int n) const;
+
+  // Determines if the standard frame for the given frame pointer is
+  // an arguments adaptor frame.
+  static inline bool IsArgumentsAdaptorFrame(Address fp);
+
+  // Determines if the standard frame for the given frame pointer is a
+  // construct frame.
+  static inline bool IsConstructFrame(Address fp);
+
+ private:
+  friend class StackFrame;
+};
+
+
+class JavaScriptFrame: public StandardFrame {
+ public:
+  virtual Type type() const { return JAVA_SCRIPT; }
+
+  // Accessors.
+  inline Object* function() const;
+  inline Object* receiver() const;
+  inline void set_receiver(Object* value);
+
+  // Access the parameters.
+  Object* GetParameter(int index) const;
+  int ComputeParametersCount() const;
+
+  // Temporary way of getting access to the number of parameters
+  // passed on the stack by the caller. Once argument adaptor frames
+  // has been introduced on ARM, this number will always match the
+  // computed parameters count.
+  int GetProvidedParametersCount() const;
+
+  // Check if this frame is a constructor frame invoked through 'new'.
+  bool IsConstructor() const;
+
+  // Check if this frame has "adapted" arguments in the sense that the
+  // actual passed arguments are available in an arguments adaptor
+  // frame below it on the stack.
+  inline bool has_adapted_arguments() const;
+
+  // Garbage collection support.
+  virtual void Iterate(ObjectVisitor* v) const;
+
+  // Printing support.
+  virtual void Print(StringStream* accumulator,
+                     PrintMode mode,
+                     int index) const;
+
+  // Determine the code for the frame.
+  virtual Code* code() const;
+
+  static JavaScriptFrame* cast(StackFrame* frame) {
+    ASSERT(frame->is_java_script());
+    return static_cast<JavaScriptFrame*>(frame);
+  }
+
+ protected:
+  explicit JavaScriptFrame(StackFrameIterator* iterator)
+      : StandardFrame(iterator), disable_heap_access_(false) { }
+
+  virtual Address GetCallerStackPointer() const;
+
+  // When this mode is enabled it is not allowed to access heap objects.
+  // This is a special mode used when gathering stack samples in profiler.
+  // A shortcoming is that caller's SP value will be calculated incorrectly
+  // (see GetCallerStackPointer implementation), but it is not used for stack
+  // sampling.
+  void DisableHeapAccess() { disable_heap_access_ = true; }
+
+ private:
+  bool disable_heap_access_;
+  inline Object* function_slot_object() const;
+
+  friend class StackFrameIterator;
+};
+
+
+// Arguments adaptor frames are automatically inserted below
+// JavaScript frames when the actual number of parameters does not
+// match the formal number of parameters.
+class ArgumentsAdaptorFrame: public JavaScriptFrame {
+ public:
+  virtual Type type() const { return ARGUMENTS_ADAPTOR; }
+
+  // Determine the code for the frame.
+  virtual Code* code() const;
+
+  static ArgumentsAdaptorFrame* cast(StackFrame* frame) {
+    ASSERT(frame->is_arguments_adaptor());
+    return static_cast<ArgumentsAdaptorFrame*>(frame);
+  }
+
+  // Printing support.
+  virtual void Print(StringStream* accumulator,
+                     PrintMode mode,
+                     int index) const;
+ protected:
+  explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
+      : JavaScriptFrame(iterator) { }
+
+  virtual Address GetCallerStackPointer() const;
+
+ private:
+  friend class StackFrameIterator;
+};
+
+
+class InternalFrame: public StandardFrame {
+ public:
+  virtual Type type() const { return INTERNAL; }
+
+  // Garbage collection support.
+  virtual void Iterate(ObjectVisitor* v) const;
+
+  // Determine the code for the frame.
+  virtual Code* code() const;
+
+  static InternalFrame* cast(StackFrame* frame) {
+    ASSERT(frame->is_internal());
+    return static_cast<InternalFrame*>(frame);
+  }
+
+ protected:
+  explicit InternalFrame(StackFrameIterator* iterator)
+      : StandardFrame(iterator) { }
+
+  virtual Address GetCallerStackPointer() const;
+
+ private:
+  friend class StackFrameIterator;
+};
+
+
+// Construct frames are special trampoline frames introduced to handle
+// function invocations through 'new'.
+class ConstructFrame: public InternalFrame {
+ public:
+  virtual Type type() const { return CONSTRUCT; }
+
+  static ConstructFrame* cast(StackFrame* frame) {
+    ASSERT(frame->is_construct());
+    return static_cast<ConstructFrame*>(frame);
+  }
+
+ protected:
+  explicit ConstructFrame(StackFrameIterator* iterator)
+      : InternalFrame(iterator) { }
+
+ private:
+  friend class StackFrameIterator;
+};
+
+
+class StackFrameIterator BASE_EMBEDDED {
+ public:
+  // An iterator that iterates over the current thread's stack.
+  StackFrameIterator();
+
+  // An iterator that iterates over a given thread's stack.
+  explicit StackFrameIterator(ThreadLocalTop* thread);
+
+  // An iterator that can start from a given FP address.
+  // If use_top, then work as usual, if fp isn't NULL, use it,
+  // otherwise, do nothing.
+  StackFrameIterator(bool use_top, Address fp, Address sp);
+
+  StackFrame* frame() const {
+    ASSERT(!done());
+    return frame_;
+  }
+
+  bool done() const { return frame_ == NULL; }
+  void Advance() { (this->*advance_)(); }
+
+  // Go back to the first frame.
+  void Reset();
+
+ private:
+#define DECLARE_SINGLETON(ignore, type) type type##_;
+  STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
+#undef DECLARE_SINGLETON
+  StackFrame* frame_;
+  StackHandler* handler_;
+  ThreadLocalTop* thread_;
+  Address fp_;
+  Address sp_;
+  void (StackFrameIterator::*advance_)();
+
+  StackHandler* handler() const {
+    ASSERT(!done());
+    return handler_;
+  }
+
+  // Get the type-specific frame singleton in a given state.
+  StackFrame* SingletonFor(StackFrame::Type type, StackFrame::State* state);
+  // A helper function, can return a NULL pointer.
+  StackFrame* SingletonFor(StackFrame::Type type);
+
+  void AdvanceWithHandler();
+  void AdvanceWithoutHandler();
+
+  friend class StackFrame;
+  friend class SafeStackFrameIterator;
+  DISALLOW_COPY_AND_ASSIGN(StackFrameIterator);
+};
+
+
+// Iterator that supports iterating through all JavaScript frames.
+template<typename Iterator>
+class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
+ public:
+  JavaScriptFrameIteratorTemp() { if (!done()) Advance(); }
+
+  explicit JavaScriptFrameIteratorTemp(ThreadLocalTop* thread) :
+      iterator_(thread) {
+    if (!done()) Advance();
+  }
+
+  // Skip frames until the frame with the given id is reached.
+  explicit JavaScriptFrameIteratorTemp(StackFrame::Id id);
+
+  JavaScriptFrameIteratorTemp(Address fp, Address sp,
+                              Address low_bound, Address high_bound) :
+      iterator_(fp, sp, low_bound, high_bound) {
+    if (!done()) Advance();
+  }
+
+  inline JavaScriptFrame* frame() const;
+
+  bool done() const { return iterator_.done(); }
+  void Advance();
+
+  // Advance to the frame holding the arguments for the current
+  // frame. This only affects the current frame if it has adapted
+  // arguments.
+  void AdvanceToArgumentsFrame();
+
+  // Go back to the first frame.
+  void Reset();
+
+ private:
+  Iterator iterator_;
+};
+
+
+typedef JavaScriptFrameIteratorTemp<StackFrameIterator> JavaScriptFrameIterator;
+
+
+// NOTE: The stack trace frame iterator is an iterator that only
+// traverse proper JavaScript frames; that is JavaScript frames that
+// have proper JavaScript functions. This excludes the problematic
+// functions in runtime.js.
+class StackTraceFrameIterator: public JavaScriptFrameIterator {
+ public:
+  StackTraceFrameIterator();
+  void Advance();
+};
+
+
+class SafeStackFrameIterator BASE_EMBEDDED {
+ public:
+  SafeStackFrameIterator(Address fp, Address sp,
+                         Address low_bound, Address high_bound);
+
+  StackFrame* frame() const {
+    ASSERT(is_working_iterator_);
+    return iterator_.frame();
+  }
+
+  bool done() const { return iteration_done_ ? true : iterator_.done(); }
+
+  void Advance();
+  void Reset();
+
+ private:
+  static bool IsWithinBounds(
+      Address low_bound, Address high_bound, Address addr) {
+    return low_bound <= addr && addr <= high_bound;
+  }
+  bool IsValidStackAddress(Address addr) const {
+    return IsWithinBounds(low_bound_, high_bound_, addr);
+  }
+  bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
+  bool IsValidFrame(StackFrame* frame) const;
+  bool IsValidCaller(StackFrame* frame);
+
+  Address low_bound_;
+  Address high_bound_;
+  const bool is_valid_top_;
+  const bool is_valid_fp_;
+  const bool is_working_iterator_;
+  bool iteration_done_;
+  StackFrameIterator iterator_;
+};
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator>
+    SafeJavaScriptFrameIterator;
+
+
+class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
+ public:
+  explicit SafeStackTraceFrameIterator(Address fp, Address sp,
+                                       Address low_bound, Address high_bound);
+  void Advance();
+};
+#endif
+
+
+class StackFrameLocator BASE_EMBEDDED {
+ public:
+  // Find the nth JavaScript frame on the stack. The caller must
+  // guarantee that such a frame exists.
+  JavaScriptFrame* FindJavaScriptFrame(int n);
+
+ private:
+  StackFrameIterator iterator_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_FRAMES_H_
diff --git a/src/func-name-inferrer.cc b/src/func-name-inferrer.cc
new file mode 100644
index 0000000..2d6a86a
--- /dev/null
+++ b/src/func-name-inferrer.cc
@@ -0,0 +1,76 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "func-name-inferrer.h"
+
+namespace v8 {
+namespace internal {
+
+
+void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
+  // Enclosing name is a name of a constructor function. To check
+  // that it is really a constructor, we check that it is not empty
+  // and starts with a capital letter.
+  if (name->length() > 0 && Runtime::IsUpperCaseChar(name->Get(0))) {
+    names_stack_.Add(name);
+  }
+}
+
+
+Handle<String> FuncNameInferrer::MakeNameFromStack() {
+  if (names_stack_.is_empty()) {
+    return Factory::empty_string();
+  } else {
+    return MakeNameFromStackHelper(1, names_stack_.at(0));
+  }
+}
+
+
+Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
+                                                         Handle<String> prev) {
+  if (pos >= names_stack_.length()) {
+    return prev;
+  } else {
+    Handle<String> curr = Factory::NewConsString(dot_, names_stack_.at(pos));
+    return MakeNameFromStackHelper(pos + 1, Factory::NewConsString(prev, curr));
+  }
+}
+
+
+void FuncNameInferrer::InferFunctionsNames() {
+  Handle<String> func_name = MakeNameFromStack();
+  for (int i = 0; i < funcs_to_infer_.length(); ++i) {
+    funcs_to_infer_[i]->set_inferred_name(func_name);
+  }
+  funcs_to_infer_.Rewind(0);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/func-name-inferrer.h b/src/func-name-inferrer.h
new file mode 100644
index 0000000..e88586a
--- /dev/null
+++ b/src/func-name-inferrer.h
@@ -0,0 +1,135 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FUNC_NAME_INFERRER_H_
+#define V8_FUNC_NAME_INFERRER_H_
+
+namespace v8 {
+namespace internal {
+
+// FuncNameInferrer is a stateful class that is used to perform name
+// inference for anonymous functions during static analysis of source code.
+// Inference is performed in cases when an anonymous function is assigned
+// to a variable or a property (see test-func-name-inference.cc for examples.)
+//
+// The basic idea is that during AST traversal LHSs of expressions are
+// always visited before RHSs. Thus, during visiting the LHS, a name can be
+// collected, and during visiting the RHS, a function literal can be collected.
+// Inference is performed while leaving the assignment node.
+class FuncNameInferrer BASE_EMBEDDED {
+ public:
+  FuncNameInferrer()
+      : entries_stack_(10),
+        names_stack_(5),
+        funcs_to_infer_(4),
+        dot_(Factory::NewStringFromAscii(CStrVector("."))) {
+  }
+
+  // Returns whether we have entered name collection state.
+  bool IsOpen() const { return !entries_stack_.is_empty(); }
+
+  // Pushes an enclosing the name of enclosing function onto names stack.
+  void PushEnclosingName(Handle<String> name);
+
+  // Enters name collection state.
+  void Enter() {
+    entries_stack_.Add(names_stack_.length());
+  }
+
+  // Pushes an encountered name onto names stack when in collection state.
+  void PushName(Handle<String> name) {
+    if (IsOpen()) {
+      names_stack_.Add(name);
+    }
+  }
+
+  // Adds a function to infer name for.
+  void AddFunction(FunctionLiteral* func_to_infer) {
+    if (IsOpen()) {
+      funcs_to_infer_.Add(func_to_infer);
+    }
+  }
+
+  // Infers a function name and leaves names collection state.
+  void InferAndLeave() {
+    ASSERT(IsOpen());
+    if (!funcs_to_infer_.is_empty()) {
+      InferFunctionsNames();
+    }
+    names_stack_.Rewind(entries_stack_.RemoveLast());
+  }
+
+ private:
+  // Constructs a full name in dotted notation from gathered names.
+  Handle<String> MakeNameFromStack();
+
+  // A helper function for MakeNameFromStack.
+  Handle<String> MakeNameFromStackHelper(int pos, Handle<String> prev);
+
+  // Performs name inferring for added functions.
+  void InferFunctionsNames();
+
+  ZoneList<int> entries_stack_;
+  ZoneList<Handle<String> > names_stack_;
+  ZoneList<FunctionLiteral*> funcs_to_infer_;
+  Handle<String> dot_;
+
+  DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
+};
+
+
+// A wrapper class that automatically calls InferAndLeave when
+// leaving scope.
+class ScopedFuncNameInferrer BASE_EMBEDDED {
+ public:
+  explicit ScopedFuncNameInferrer(FuncNameInferrer* inferrer)
+      : inferrer_(inferrer),
+        is_entered_(false) {}
+
+  ~ScopedFuncNameInferrer() {
+    if (is_entered_) {
+      inferrer_->InferAndLeave();
+    }
+  }
+
+  // Triggers the wrapped inferrer into name collection state.
+  void Enter() {
+    inferrer_->Enter();
+    is_entered_ = true;
+  }
+
+ private:
+  FuncNameInferrer* inferrer_;
+  bool is_entered_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedFuncNameInferrer);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_FUNC_NAME_INFERRER_H_
diff --git a/src/global-handles.cc b/src/global-handles.cc
new file mode 100644
index 0000000..e51c4aa
--- /dev/null
+++ b/src/global-handles.cc
@@ -0,0 +1,400 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "global-handles.h"
+
+namespace v8 {
+namespace internal {
+
+class GlobalHandles::Node : public Malloced {
+ public:
+
+  void Initialize(Object* object) {
+    // Set the initial value of the handle.
+    object_ = object;
+    state_  = NORMAL;
+    parameter_or_next_free_.parameter = NULL;
+    callback_ = NULL;
+  }
+
+  explicit Node(Object* object) {
+    Initialize(object);
+    // Initialize link structure.
+    next_ = NULL;
+  }
+
+  ~Node() {
+    if (state_ != DESTROYED) Destroy();
+#ifdef DEBUG
+    // Zap the values for eager trapping.
+    object_ = NULL;
+    next_ = NULL;
+    parameter_or_next_free_.next_free = NULL;
+#endif
+  }
+
+  void Destroy() {
+    if (state_ == WEAK || IsNearDeath()) {
+      GlobalHandles::number_of_weak_handles_--;
+      if (object_->IsJSGlobalObject()) {
+        GlobalHandles::number_of_global_object_weak_handles_--;
+      }
+    }
+    state_ = DESTROYED;
+  }
+
+  // Accessors for next_.
+  Node* next() { return next_; }
+  void set_next(Node* value) { next_ = value; }
+  Node** next_addr() { return &next_; }
+
+  // Accessors for next free node in the free list.
+  Node* next_free() {
+    ASSERT(state_ == DESTROYED);
+    return parameter_or_next_free_.next_free;
+  }
+  void set_next_free(Node* value) {
+    ASSERT(state_ == DESTROYED);
+    parameter_or_next_free_.next_free = value;
+  }
+
+  // Returns a link from the handle.
+  static Node* FromLocation(Object** location) {
+    ASSERT(OFFSET_OF(Node, object_) == 0);
+    return reinterpret_cast<Node*>(location);
+  }
+
+  // Returns the handle.
+  Handle<Object> handle() { return Handle<Object>(&object_); }
+
+  // Make this handle weak.
+  void MakeWeak(void* parameter, WeakReferenceCallback callback) {
+    LOG(HandleEvent("GlobalHandle::MakeWeak", handle().location()));
+    ASSERT(state_ != DESTROYED);
+    if (state_ != WEAK && !IsNearDeath()) {
+      GlobalHandles::number_of_weak_handles_++;
+      if (object_->IsJSGlobalObject()) {
+        GlobalHandles::number_of_global_object_weak_handles_++;
+      }
+    }
+    state_ = WEAK;
+    set_parameter(parameter);
+    callback_ = callback;
+  }
+
+  void ClearWeakness() {
+    LOG(HandleEvent("GlobalHandle::ClearWeakness", handle().location()));
+    ASSERT(state_ != DESTROYED);
+    if (state_ == WEAK || IsNearDeath()) {
+      GlobalHandles::number_of_weak_handles_--;
+      if (object_->IsJSGlobalObject()) {
+        GlobalHandles::number_of_global_object_weak_handles_--;
+      }
+    }
+    state_ = NORMAL;
+    set_parameter(NULL);
+  }
+
+  bool IsNearDeath() {
+    // Check for PENDING to ensure correct answer when processing callbacks.
+    return state_ == PENDING || state_ == NEAR_DEATH;
+  }
+
+  bool IsWeak() {
+    return state_ == WEAK;
+  }
+
+  // Returns the id for this weak handle.
+  void set_parameter(void* parameter) {
+    ASSERT(state_ != DESTROYED);
+    parameter_or_next_free_.parameter = parameter;
+  }
+  void* parameter() {
+    ASSERT(state_ != DESTROYED);
+    return parameter_or_next_free_.parameter;
+  }
+
+  // Returns the callback for this weak handle.
+  WeakReferenceCallback callback() { return callback_; }
+
+  bool PostGarbageCollectionProcessing() {
+    if (state_ != Node::PENDING) return false;
+    LOG(HandleEvent("GlobalHandle::Processing", handle().location()));
+    void* par = parameter();
+    state_ = NEAR_DEATH;
+    set_parameter(NULL);
+    // The callback function is resolved as late as possible to preserve old
+    // behavior.
+    WeakReferenceCallback func = callback();
+    if (func == NULL) return false;
+
+    v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
+    {
+      // Forbid reuse of destroyed nodes as they might be already deallocated.
+      // It's fine though to reuse nodes that were destroyed in weak callback
+      // as those cannot be deallocated until we are back from the callback.
+      set_first_free(NULL);
+      // Leaving V8.
+      VMState state(EXTERNAL);
+      func(object, par);
+    }
+    return true;
+  }
+
+  // Place the handle address first to avoid offset computation.
+  Object* object_;  // Storage for object pointer.
+
+  // Transition diagram:
+  // NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, DESTROYED }
+  enum State {
+    NORMAL,      // Normal global handle.
+    WEAK,        // Flagged as weak but not yet finalized.
+    PENDING,     // Has been recognized as only reachable by weak handles.
+    NEAR_DEATH,  // Callback has informed the handle is near death.
+    DESTROYED
+  };
+  State state_;
+
+ private:
+  // Handle specific callback.
+  WeakReferenceCallback callback_;
+  // Provided data for callback.  In DESTROYED state, this is used for
+  // the free list link.
+  union {
+    void* parameter;
+    Node* next_free;
+  } parameter_or_next_free_;
+
+  // Linkage for the list.
+  Node* next_;
+
+ public:
+  TRACK_MEMORY("GlobalHandles::Node")
+};
+
+
+Handle<Object> GlobalHandles::Create(Object* value) {
+  Counters::global_handles.Increment();
+  Node* result;
+  if (first_free() == NULL) {
+    // Allocate a new node.
+    result = new Node(value);
+    result->set_next(head());
+    set_head(result);
+  } else {
+    // Take the first node in the free list.
+    result = first_free();
+    set_first_free(result->next_free());
+    result->Initialize(value);
+  }
+  return result->handle();
+}
+
+
+void GlobalHandles::Destroy(Object** location) {
+  Counters::global_handles.Decrement();
+  if (location == NULL) return;
+  Node* node = Node::FromLocation(location);
+  node->Destroy();
+  // Link the destroyed.
+  node->set_next_free(first_free());
+  set_first_free(node);
+}
+
+
+void GlobalHandles::MakeWeak(Object** location, void* parameter,
+                             WeakReferenceCallback callback) {
+  ASSERT(callback != NULL);
+  Node::FromLocation(location)->MakeWeak(parameter, callback);
+}
+
+
+void GlobalHandles::ClearWeakness(Object** location) {
+  Node::FromLocation(location)->ClearWeakness();
+}
+
+
+bool GlobalHandles::IsNearDeath(Object** location) {
+  return Node::FromLocation(location)->IsNearDeath();
+}
+
+
+bool GlobalHandles::IsWeak(Object** location) {
+  return Node::FromLocation(location)->IsWeak();
+}
+
+
+void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
+  // Traversal of GC roots in the global handle list that are marked as
+  // WEAK or PENDING.
+  for (Node* current = head_; current != NULL; current = current->next()) {
+    if (current->state_ == Node::WEAK
+      || current->state_ == Node::PENDING
+      || current->state_ == Node::NEAR_DEATH) {
+      v->VisitPointer(&current->object_);
+    }
+  }
+}
+
+
+void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
+  for (Node* current = head_; current != NULL; current = current->next()) {
+    if (current->state_ == Node::WEAK) {
+      if (f(&current->object_)) {
+        current->state_ = Node::PENDING;
+        LOG(HandleEvent("GlobalHandle::Pending", current->handle().location()));
+      }
+    }
+  }
+}
+
+
+int post_gc_processing_count = 0;
+
+void GlobalHandles::PostGarbageCollectionProcessing() {
+  // Process weak global handle callbacks. This must be done after the
+  // GC is completely done, because the callbacks may invoke arbitrary
+  // API functions.
+  // At the same time deallocate all DESTROYED nodes
+  ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
+  const int initial_post_gc_processing_count = ++post_gc_processing_count;
+  Node** p = &head_;
+  while (*p != NULL) {
+    if ((*p)->PostGarbageCollectionProcessing()) {
+      if (initial_post_gc_processing_count != post_gc_processing_count) {
+        // Weak callback triggered another GC and another round of
+        // PostGarbageCollection processing.  The current node might
+        // have been deleted in that round, so we need to bail out (or
+        // restart the processing).
+        break;
+      }
+    }
+    if ((*p)->state_ == Node::DESTROYED) {
+      // Delete the link.
+      Node* node = *p;
+      *p = node->next();  // Update the link.
+      delete node;
+    } else {
+      p = (*p)->next_addr();
+    }
+  }
+  set_first_free(NULL);
+}
+
+
+void GlobalHandles::IterateRoots(ObjectVisitor* v) {
+  // Traversal of global handles marked as NORMAL or NEAR_DEATH.
+  for (Node* current = head_; current != NULL; current = current->next()) {
+    if (current->state_ == Node::NORMAL) {
+      v->VisitPointer(&current->object_);
+    }
+  }
+}
+
+void GlobalHandles::TearDown() {
+  // Delete all the nodes in the linked list.
+  Node* current = head_;
+  while (current != NULL) {
+    Node* n = current;
+    current = current->next();
+    delete n;
+  }
+  // Reset the head and free_list.
+  set_head(NULL);
+  set_first_free(NULL);
+}
+
+
+int GlobalHandles::number_of_weak_handles_ = 0;
+int GlobalHandles::number_of_global_object_weak_handles_ = 0;
+
+GlobalHandles::Node* GlobalHandles::head_ = NULL;
+GlobalHandles::Node* GlobalHandles::first_free_ = NULL;
+
+#ifdef DEBUG
+
+void GlobalHandles::PrintStats() {
+  int total = 0;
+  int weak = 0;
+  int pending = 0;
+  int near_death = 0;
+  int destroyed = 0;
+
+  for (Node* current = head_; current != NULL; current = current->next()) {
+    total++;
+    if (current->state_ == Node::WEAK) weak++;
+    if (current->state_ == Node::PENDING) pending++;
+    if (current->state_ == Node::NEAR_DEATH) near_death++;
+    if (current->state_ == Node::DESTROYED) destroyed++;
+  }
+
+  PrintF("Global Handle Statistics:\n");
+  PrintF("  allocated memory = %dB\n", sizeof(Node) * total);
+  PrintF("  # weak       = %d\n", weak);
+  PrintF("  # pending    = %d\n", pending);
+  PrintF("  # near_death = %d\n", near_death);
+  PrintF("  # destroyed  = %d\n", destroyed);
+  PrintF("  # total      = %d\n", total);
+}
+
+void GlobalHandles::Print() {
+  PrintF("Global handles:\n");
+  for (Node* current = head_; current != NULL; current = current->next()) {
+    PrintF("  handle %p to %p (weak=%d)\n", current->handle().location(),
+           *current->handle(), current->state_ == Node::WEAK);
+  }
+}
+
+#endif
+
+List<ObjectGroup*>* GlobalHandles::ObjectGroups() {
+  // Lazily initialize the list to avoid startup time static constructors.
+  static List<ObjectGroup*> groups(4);
+  return &groups;
+}
+
+void GlobalHandles::AddGroup(Object*** handles, size_t length) {
+  ObjectGroup* new_entry = new ObjectGroup(length);
+  for (size_t i = 0; i < length; ++i)
+    new_entry->objects_.Add(handles[i]);
+  ObjectGroups()->Add(new_entry);
+}
+
+
+void GlobalHandles::RemoveObjectGroups() {
+  List<ObjectGroup*>* object_groups = ObjectGroups();
+  for (int i = 0; i< object_groups->length(); i++) {
+    delete object_groups->at(i);
+  }
+  object_groups->Clear();
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/global-handles.h b/src/global-handles.h
new file mode 100644
index 0000000..9e63ba7
--- /dev/null
+++ b/src/global-handles.h
@@ -0,0 +1,150 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_GLOBAL_HANDLES_H_
+#define V8_GLOBAL_HANDLES_H_
+
+#include "list-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Structure for tracking global handles.
+// A single list keeps all the allocated global handles.
+// Destroyed handles stay in the list but is added to the free list.
+// At GC the destroyed global handles are removed from the free list
+// and deallocated.
+
+// Callback function on handling weak global handles.
+// typedef bool (*WeakSlotCallback)(Object** pointer);
+
+// An object group is treated like a single JS object: if one of object in
+// the group is alive, all objects in the same group are considered alive.
+// An object group is used to simulate object relationship in a DOM tree.
+class ObjectGroup : public Malloced {
+ public:
+  ObjectGroup() : objects_(4) {}
+  explicit ObjectGroup(size_t capacity) : objects_(capacity) {}
+
+  List<Object**> objects_;
+};
+
+
+class GlobalHandles : public AllStatic {
+ public:
+  // Creates a new global handle that is alive until Destroy is called.
+  static Handle<Object> Create(Object* value);
+
+  // Destroy a global handle.
+  static void Destroy(Object** location);
+
+  // Make the global handle weak and set the callback parameter for the
+  // handle.  When the garbage collector recognizes that only weak global
+  // handles point to an object the handles are cleared and the callback
+  // function is invoked (for each handle) with the handle and corresponding
+  // parameter as arguments.  Note: cleared means set to Smi::FromInt(0). The
+  // reason is that Smi::FromInt(0) does not change during garage collection.
+  static void MakeWeak(Object** location,
+                       void* parameter,
+                       WeakReferenceCallback callback);
+
+  // Returns the current number of weak handles.
+  static int NumberOfWeakHandles() { return number_of_weak_handles_; }
+
+  // Returns the current number of weak handles to global objects.
+  // These handles are also included in NumberOfWeakHandles().
+  static int NumberOfGlobalObjectWeakHandles() {
+    return number_of_global_object_weak_handles_;
+  }
+
+  // Clear the weakness of a global handle.
+  static void ClearWeakness(Object** location);
+
+  // Tells whether global handle is near death.
+  static bool IsNearDeath(Object** location);
+
+  // Tells whether global handle is weak.
+  static bool IsWeak(Object** location);
+
+  // Process pending weak handles.
+  static void PostGarbageCollectionProcessing();
+
+  // Iterates over all handles.
+  static void IterateRoots(ObjectVisitor* v);
+
+  // Iterates over all weak roots in heap.
+  static void IterateWeakRoots(ObjectVisitor* v);
+
+  // Find all weak handles satisfying the callback predicate, mark
+  // them as pending.
+  static void IdentifyWeakHandles(WeakSlotCallback f);
+
+  // Add an object group.
+  // Should only used in GC callback function before a collection.
+  // All groups are destroyed after a mark-compact collection.
+  static void AddGroup(Object*** handles, size_t length);
+
+  // Returns the object groups.
+  static List<ObjectGroup*>* ObjectGroups();
+
+  // Remove bags, this should only happen after GC.
+  static void RemoveObjectGroups();
+
+  // Tear down the global handle structure.
+  static void TearDown();
+
+#ifdef DEBUG
+  static void PrintStats();
+  static void Print();
+#endif
+ private:
+  // Internal node structure, one for each global handle.
+  class Node;
+
+  // Field always containing the number of weak and near-death handles.
+  static int number_of_weak_handles_;
+
+  // Field always containing the number of weak and near-death handles
+  // to global objects.  These objects are also included in
+  // number_of_weak_handles_.
+  static int number_of_global_object_weak_handles_;
+
+  // Global handles are kept in a single linked list pointed to by head_.
+  static Node* head_;
+  static Node* head() { return head_; }
+  static void set_head(Node* value) { head_ = value; }
+
+  // Free list for DESTROYED global handles not yet deallocated.
+  static Node* first_free_;
+  static Node* first_free() { return first_free_; }
+  static void set_first_free(Node* value) { first_free_ = value; }
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_GLOBAL_HANDLES_H_
diff --git a/src/globals.h b/src/globals.h
new file mode 100644
index 0000000..efe0127
--- /dev/null
+++ b/src/globals.h
@@ -0,0 +1,563 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_GLOBALS_H_
+#define V8_GLOBALS_H_
+
+namespace v8 {
+namespace internal {
+
+// Processor architecture detection.  For more info on what's defined, see:
+//   http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+//   http://www.agner.org/optimize/calling_conventions.pdf
+//   or with gcc, run: "echo | gcc -E -dM -"
+#if defined(_M_X64) || defined(__x86_64__)
+#define V8_HOST_ARCH_X64 1
+#define V8_HOST_ARCH_64_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(_M_IX86) || defined(__i386__)
+#define V8_HOST_ARCH_IA32 1
+#define V8_HOST_ARCH_32_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(__ARMEL__)
+#define V8_HOST_ARCH_ARM 1
+#define V8_HOST_ARCH_32_BIT 1
+#else
+#error Your host architecture was not detected as supported by v8
+#endif
+
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
+#define V8_TARGET_CAN_READ_UNALIGNED 1
+#elif V8_TARGET_ARCH_ARM
+#else
+#error Your target architecture is not supported by v8
+#endif
+
+// Support for alternative bool type. This is only enabled if the code is
+// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
+// For instance, 'bool b = "false";' results in b == true! This is a hidden
+// source of bugs.
+// However, redefining the bool type does have some negative impact on some
+// platforms. It gives rise to compiler warnings (i.e. with
+// MSVC) in the API header files when mixing code that uses the standard
+// bool with code that uses the redefined version.
+// This does not actually belong in the platform code, but needs to be
+// defined here because the platform code uses bool, and platform.h is
+// include very early in the main include file.
+
+#ifdef USE_MYBOOL
+typedef unsigned int __my_bool__;
+#define bool __my_bool__  // use 'indirection' to avoid name clashes
+#endif
+
+typedef uint8_t byte;
+typedef byte* Address;
+
+// Define our own macros for writing 64-bit constants.  This is less fragile
+// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
+// works on compilers that don't have it (like MSVC).
+#if V8_HOST_ARCH_64_BIT
+#ifdef _MSC_VER
+#define V8_UINT64_C(x)  (x ## UI64)
+#define V8_INT64_C(x)   (x ## I64)
+#define V8_PTR_PREFIX "ll"
+#else  // _MSC_VER
+#define V8_UINT64_C(x)  (x ## UL)
+#define V8_INT64_C(x)   (x ## L)
+#define V8_PTR_PREFIX "l"
+#endif  // _MSC_VER
+#else  // V8_HOST_ARCH_64_BIT
+#define V8_PTR_PREFIX ""
+#endif  // V8_HOST_ARCH_64_BIT
+
+#define V8PRIxPTR V8_PTR_PREFIX "x"
+#define V8PRIdPTR V8_PTR_PREFIX "d"
+
+// Fix for Mac OS X defining uintptr_t as "unsigned long":
+#if defined(__APPLE__) && defined(__MACH__)
+#undef V8PRIxPTR
+#define V8PRIxPTR "lx"
+#endif
+
+// Code-point values in Unicode 4.0 are 21 bits wide.
+typedef uint16_t uc16;
+typedef int32_t uc32;
+
+// -----------------------------------------------------------------------------
+// Constants
+
+const int KB = 1024;
+const int MB = KB * KB;
+const int GB = KB * KB * KB;
+const int kMaxInt = 0x7FFFFFFF;
+const int kMinInt = -kMaxInt - 1;
+
+const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
+
+const int kCharSize     = sizeof(char);      // NOLINT
+const int kShortSize    = sizeof(short);     // NOLINT
+const int kIntSize      = sizeof(int);       // NOLINT
+const int kDoubleSize   = sizeof(double);    // NOLINT
+const int kPointerSize  = sizeof(void*);     // NOLINT
+const int kIntptrSize   = sizeof(intptr_t);  // NOLINT
+
+#if V8_HOST_ARCH_64_BIT
+const int kPointerSizeLog2 = 3;
+const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
+#else
+const int kPointerSizeLog2 = 2;
+const intptr_t kIntptrSignBit = 0x80000000;
+#endif
+
+const int kObjectAlignmentBits = kPointerSizeLog2;
+const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
+const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
+
+// Desired alignment for pointers.
+const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+
+// Tag information for Failure.
+const int kFailureTag = 3;
+const int kFailureTagSize = 2;
+const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
+
+
+const int kBitsPerByte = 8;
+const int kBitsPerByteLog2 = 3;
+const int kBitsPerPointer = kPointerSize * kBitsPerByte;
+const int kBitsPerInt = kIntSize * kBitsPerByte;
+
+
+// Zap-value: The value used for zapping dead objects.
+// Should be a recognizable hex value tagged as a heap object pointer.
+#ifdef V8_HOST_ARCH_64_BIT
+const Address kZapValue =
+    reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed));
+const Address kHandleZapValue =
+    reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
+const Address kFromSpaceZapValue =
+    reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
+#else
+const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
+const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
+const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
+#endif
+
+
+// -----------------------------------------------------------------------------
+// Forward declarations for frequently used classes
+// (sorted alphabetically)
+
+class AccessorInfo;
+class Allocation;
+class Arguments;
+class Assembler;
+class BreakableStatement;
+class Code;
+class CodeGenerator;
+class CodeStub;
+class Context;
+class Debug;
+class Debugger;
+class DebugInfo;
+class Descriptor;
+class DescriptorArray;
+class Expression;
+class ExternalReference;
+class FixedArray;
+class FunctionEntry;
+class FunctionLiteral;
+class FunctionTemplateInfo;
+class NumberDictionary;
+class StringDictionary;
+class FreeStoreAllocationPolicy;
+template <typename T> class Handle;
+class Heap;
+class HeapObject;
+class IC;
+class InterceptorInfo;
+class IterationStatement;
+class Array;
+class JSArray;
+class JSFunction;
+class JSObject;
+class LargeObjectSpace;
+template <typename T, class P = FreeStoreAllocationPolicy> class List;
+class LookupResult;
+class MacroAssembler;
+class Map;
+class MapSpace;
+class MarkCompactCollector;
+class NewSpace;
+class NodeVisitor;
+class Object;
+class OldSpace;
+class Property;
+class Proxy;
+class RegExpNode;
+struct RegExpCompileData;
+class RegExpTree;
+class RegExpCompiler;
+class RegExpVisitor;
+class Scope;
+template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
+class Script;
+class Slot;
+class Smi;
+class Statement;
+class String;
+class Struct;
+class SwitchStatement;
+class AstVisitor;
+class Variable;
+class VariableProxy;
+class RelocInfo;
+class Deserializer;
+class MessageLocation;
+class ObjectGroup;
+class TickSample;
+class VirtualMemory;
+class Mutex;
+class ZoneScopeInfo;
+
+typedef bool (*WeakSlotCallback)(Object** pointer);
+
+// -----------------------------------------------------------------------------
+// Miscellaneous
+
+// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
+// consecutive.
+enum AllocationSpace {
+  NEW_SPACE,            // Semispaces collected with copying collector.
+  OLD_POINTER_SPACE,    // May contain pointers to new space.
+  OLD_DATA_SPACE,       // Must not have pointers to new space.
+  CODE_SPACE,           // No pointers to new space, marked executable.
+  MAP_SPACE,            // Only and all map objects.
+  CELL_SPACE,           // Only and all cell objects.
+  LO_SPACE,             // Promoted large objects.
+
+  FIRST_SPACE = NEW_SPACE,
+  LAST_SPACE = LO_SPACE
+};
+const int kSpaceTagSize = 3;
+const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
+
+
+// A flag that indicates whether objects should be pretenured when
+// allocated (allocated directly into the old generation) or not
+// (allocated in the young generation if the object size and type
+// allows).
+enum PretenureFlag { NOT_TENURED, TENURED };
+
+enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
+
+enum Executability { NOT_EXECUTABLE, EXECUTABLE };
+
+
+// A CodeDesc describes a buffer holding instructions and relocation
+// information. The instructions start at the beginning of the buffer
+// and grow forward, the relocation information starts at the end of
+// the buffer and grows backward.
+//
+//  |<--------------- buffer_size ---------------->|
+//  |<-- instr_size -->|        |<-- reloc_size -->|
+//  +==================+========+==================+
+//  |   instructions   |  free  |    reloc info    |
+//  +==================+========+==================+
+//  ^
+//  |
+//  buffer
+
+struct CodeDesc {
+  byte* buffer;
+  int buffer_size;
+  int instr_size;
+  int reloc_size;
+  Assembler* origin;
+};
+
+
+// Callback function on object slots, used for iterating heap object slots in
+// HeapObjects, global pointers to heap objects, etc. The callback allows the
+// callback function to change the value of the slot.
+typedef void (*ObjectSlotCallback)(HeapObject** pointer);
+
+
+// Callback function used for iterating objects in heap spaces,
+// for example, scanning heap objects.
+typedef int (*HeapObjectCallback)(HeapObject* obj);
+
+
+// Callback function used for checking constraints when copying/relocating
+// objects. Returns true if an object can be copied/relocated from its
+// old_addr to a new_addr.
+typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
+
+
+// Callback function on inline caches, used for iterating over inline caches
+// in compiled code.
+typedef void (*InlineCacheCallback)(Code* code, Address ic);
+
+
+// State for inline cache call sites. Aliased as IC::State.
+enum InlineCacheState {
+  // Has never been executed.
+  UNINITIALIZED,
+  // Has been executed but monomorhic state has been delayed.
+  PREMONOMORPHIC,
+  // Has been executed and only one receiver type has been seen.
+  MONOMORPHIC,
+  // Like MONOMORPHIC but check failed due to prototype.
+  MONOMORPHIC_PROTOTYPE_FAILURE,
+  // Multiple receiver types have been seen.
+  MEGAMORPHIC,
+  // Special states for debug break or step in prepare stubs.
+  DEBUG_BREAK,
+  DEBUG_PREPARE_STEP_IN
+};
+
+
+enum InLoopFlag {
+  NOT_IN_LOOP,
+  IN_LOOP
+};
+
+
+// Type of properties.
+// Order of properties is significant.
+// Must fit in the BitField PropertyDetails::TypeField.
+// A copy of this is in mirror-delay.js.
+enum PropertyType {
+  NORMAL              = 0,  // only in slow mode
+  FIELD               = 1,  // only in fast mode
+  CONSTANT_FUNCTION   = 2,  // only in fast mode
+  CALLBACKS           = 3,
+  INTERCEPTOR         = 4,  // only in lookup results, not in descriptors.
+  MAP_TRANSITION      = 5,  // only in fast mode
+  CONSTANT_TRANSITION = 6,  // only in fast mode
+  NULL_DESCRIPTOR     = 7,  // only in fast mode
+  // All properties before MAP_TRANSITION are real.
+  FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION
+};
+
+
+// Whether to remove map transitions and constant transitions from a
+// DescriptorArray.
+enum TransitionFlag {
+  REMOVE_TRANSITIONS,
+  KEEP_TRANSITIONS
+};
+
+
+// Union used for fast testing of specific double values.
+union DoubleRepresentation {
+  double  value;
+  int64_t bits;
+  DoubleRepresentation(double x) { value = x; }
+};
+
+
+// AccessorCallback
+struct AccessorDescriptor {
+  Object* (*getter)(Object* object, void* data);
+  Object* (*setter)(JSObject* object, Object* value, void* data);
+  void* data;
+};
+
+
+// Logging and profiling.
+// A StateTag represents a possible state of the VM.  When compiled with
+// ENABLE_LOGGING_AND_PROFILING, the logger maintains a stack of these.
+// Creating a VMState object enters a state by pushing on the stack, and
+// destroying a VMState object leaves a state by popping the current state
+// from the stack.
+
+#define STATE_TAG_LIST(V) \
+  V(JS)                   \
+  V(GC)                   \
+  V(COMPILER)             \
+  V(OTHER)                \
+  V(EXTERNAL)
+
+enum StateTag {
+#define DEF_STATE_TAG(name) name,
+  STATE_TAG_LIST(DEF_STATE_TAG)
+#undef DEF_STATE_TAG
+  // Pseudo-types.
+  state_tag_count
+};
+
+
+// -----------------------------------------------------------------------------
+// Macros
+
+// Testers for test.
+
+#define HAS_SMI_TAG(value) \
+  ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
+
+#define HAS_FAILURE_TAG(value) \
+  ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
+
+// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
+#define OBJECT_SIZE_ALIGN(value)                                \
+  (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
+
+// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
+#define POINTER_SIZE_ALIGN(value)                               \
+  (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
+
+// The expression OFFSET_OF(type, field) computes the byte-offset
+// of the specified field relative to the containing type. This
+// corresponds to 'offsetof' (in stddef.h), except that it doesn't
+// use 0 or NULL, which causes a problem with the compiler warnings
+// we have enabled (which is also why 'offsetof' doesn't seem to work).
+// Here we simply use the non-zero value 4, which seems to work.
+#define OFFSET_OF(type, field)                                          \
+  (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
+
+
+// The expression ARRAY_SIZE(a) is a compile-time constant of type
+// size_t which represents the number of elements of the given
+// array. You should only use ARRAY_SIZE on statically allocated
+// arrays.
+#define ARRAY_SIZE(a)                                   \
+  ((sizeof(a) / sizeof(*(a))) /                         \
+  static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+
+
+// The USE(x) template is used to silence C++ compiler warnings
+// issued for (yet) unused variables (typically parameters).
+template <typename T>
+static inline void USE(T) { }
+
+
+// FUNCTION_ADDR(f) gets the address of a C function f.
+#define FUNCTION_ADDR(f)                                        \
+  (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
+
+
+// FUNCTION_CAST<F>(addr) casts an address into a function
+// of type F. Used to invoke generated code from within C.
+template <typename F>
+F FUNCTION_CAST(Address addr) {
+  return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
+}
+
+
+// A macro to disallow the evil copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#define DISALLOW_COPY_AND_ASSIGN(TypeName)      \
+  TypeName(const TypeName&);                    \
+  void operator=(const TypeName&)
+
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+  TypeName();                                    \
+  DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+
+// Support for tracking C++ memory allocation.  Insert TRACK_MEMORY("Fisk")
+// inside a C++ class and new and delete will be overloaded so logging is
+// performed.
+// This file (globals.h) is included before log.h, so we use direct calls to
+// the Logger rather than the LOG macro.
+#ifdef DEBUG
+#define TRACK_MEMORY(name) \
+  void* operator new(size_t size) { \
+    void* result = ::operator new(size); \
+    Logger::NewEvent(name, result, size); \
+    return result; \
+  } \
+  void operator delete(void* object) { \
+    Logger::DeleteEvent(name, object); \
+    ::operator delete(object); \
+  }
+#else
+#define TRACK_MEMORY(name)
+#endif
+
+// define used for helping GCC to make better inlining. Don't bother for debug
+// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
+// errors in debug build.
+#if defined(__GNUC__) && !defined(DEBUG)
+#if (__GNUC__ >= 4)
+#define INLINE(header) inline header  __attribute__((always_inline))
+#else
+#define INLINE(header) inline __attribute__((always_inline)) header
+#endif
+#else
+#define INLINE(header) inline header
+#endif
+
+// The type-based aliasing rule allows the compiler to assume that pointers of
+// different types (for some definition of different) never alias each other.
+// Thus the following code does not work:
+//
+// float f = foo();
+// int fbits = *(int*)(&f);
+//
+// The compiler 'knows' that the int pointer can't refer to f since the types
+// don't match, so the compiler may cache f in a register, leaving random data
+// in fbits.  Using C++ style casts makes no difference, however a pointer to
+// char data is assumed to alias any other pointer.  This is the 'memcpy
+// exception'.
+//
+// Bit_cast uses the memcpy exception to move the bits from a variable of one
+// type of a variable of another type.  Of course the end result is likely to
+// be implementation dependent.  Most compilers (gcc-4.2 and MSVC 2005)
+// will completely optimize bit_cast away.
+//
+// There is an additional use for bit_cast.
+// Recent gccs will warn when they see casts that may result in breakage due to
+// the type-based aliasing rule.  If you have checked that there is no breakage
+// you can use bit_cast to cast one pointer type to another.  This confuses gcc
+// enough that it can no longer see that you have cast one pointer type to
+// another thus avoiding the warning.
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+  // Compile time assertion: sizeof(Dest) == sizeof(Source)
+  // A compile error here means your Dest and Source have different sizes.
+  typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
+
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_GLOBALS_H_
diff --git a/src/handles-inl.h b/src/handles-inl.h
new file mode 100644
index 0000000..8478bb5
--- /dev/null
+++ b/src/handles-inl.h
@@ -0,0 +1,76 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef V8_HANDLES_INL_H_
+#define V8_HANDLES_INL_H_
+
+#include "apiutils.h"
+#include "handles.h"
+#include "api.h"
+
+namespace v8 {
+namespace internal {
+
+template<class T>
+Handle<T>::Handle(T* obj) {
+  ASSERT(!obj->IsFailure());
+  location_ = HandleScope::CreateHandle(obj);
+}
+
+
+template <class T>
+inline T* Handle<T>::operator*() const {
+  ASSERT(location_ != NULL);
+  ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
+  return *location_;
+}
+
+
+#ifdef DEBUG
+inline NoHandleAllocation::NoHandleAllocation() {
+  v8::ImplementationUtilities::HandleScopeData* current =
+      v8::ImplementationUtilities::CurrentHandleScope();
+  extensions_ = current->extensions;
+  // Shrink the current handle scope to make it impossible to do
+  // handle allocations without an explicit handle scope.
+  current->limit = current->next;
+  current->extensions = -1;
+}
+
+
+inline NoHandleAllocation::~NoHandleAllocation() {
+  // Restore state in current handle scope to re-enable handle
+  // allocations.
+  v8::ImplementationUtilities::CurrentHandleScope()->extensions = extensions_;
+}
+#endif
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_HANDLES_INL_H_
diff --git a/src/handles.cc b/src/handles.cc
new file mode 100644
index 0000000..b43ec53
--- /dev/null
+++ b/src/handles.cc
@@ -0,0 +1,766 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "arguments.h"
+#include "bootstrapper.h"
+#include "compiler.h"
+#include "debug.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "natives.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+v8::ImplementationUtilities::HandleScopeData HandleScope::current_ =
+    { -1, NULL, NULL };
+
+
+int HandleScope::NumberOfHandles() {
+  int n = HandleScopeImplementer::instance()->blocks()->length();
+  if (n == 0) return 0;
+  return ((n - 1) * kHandleBlockSize) +
+      (current_.next - HandleScopeImplementer::instance()->blocks()->last());
+}
+
+
+Object** HandleScope::Extend() {
+  Object** result = current_.next;
+
+  ASSERT(result == current_.limit);
+  // Make sure there's at least one scope on the stack and that the
+  // top of the scope stack isn't a barrier.
+  if (current_.extensions < 0) {
+    Utils::ReportApiFailure("v8::HandleScope::CreateHandle()",
+                            "Cannot create a handle without a HandleScope");
+    return NULL;
+  }
+  HandleScopeImplementer* impl = HandleScopeImplementer::instance();
+  // If there's more room in the last block, we use that. This is used
+  // for fast creation of scopes after scope barriers.
+  if (!impl->blocks()->is_empty()) {
+    Object** limit = &impl->blocks()->last()[kHandleBlockSize];
+    if (current_.limit != limit) {
+      current_.limit = limit;
+    }
+  }
+
+  // If we still haven't found a slot for the handle, we extend the
+  // current handle scope by allocating a new handle block.
+  if (result == current_.limit) {
+    // If there's a spare block, use it for growing the current scope.
+    result = impl->GetSpareOrNewBlock();
+    // Add the extension to the global list of blocks, but count the
+    // extension as part of the current scope.
+    impl->blocks()->Add(result);
+    current_.extensions++;
+    current_.limit = &result[kHandleBlockSize];
+  }
+
+  return result;
+}
+
+
+void HandleScope::DeleteExtensions() {
+  ASSERT(current_.extensions != 0);
+  HandleScopeImplementer::instance()->DeleteExtensions(current_.extensions);
+}
+
+
+void HandleScope::ZapRange(Object** start, Object** end) {
+  if (start == NULL) return;
+  for (Object** p = start; p < end; p++) {
+    *reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue;
+  }
+}
+
+
+Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
+                                      Handle<JSArray> array) {
+  CALL_HEAP_FUNCTION(content->AddKeysFromJSArray(*array), FixedArray);
+}
+
+
+Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
+                               Handle<FixedArray> second) {
+  CALL_HEAP_FUNCTION(first->UnionOfKeys(*second), FixedArray);
+}
+
+
+Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
+    Handle<JSFunction> constructor,
+    Handle<JSGlobalProxy> global) {
+  CALL_HEAP_FUNCTION(Heap::ReinitializeJSGlobalProxy(*constructor, *global),
+                     JSGlobalProxy);
+}
+
+
+void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
+  func->shared()->set_expected_nof_properties(nof);
+  if (func->has_initial_map()) {
+    Handle<Map> new_initial_map =
+        Factory::CopyMapDropTransitions(Handle<Map>(func->initial_map()));
+    new_initial_map->set_unused_property_fields(nof);
+    func->set_initial_map(*new_initial_map);
+  }
+}
+
+
+void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value) {
+  CALL_HEAP_FUNCTION_VOID(func->SetPrototype(*value));
+}
+
+
+static int ExpectedNofPropertiesFromEstimate(int estimate) {
+  // TODO(1231235): We need dynamic feedback to estimate the number
+  // of expected properties in an object. The static hack below
+  // is barely a solution.
+  if (estimate == 0) return 4;
+  return estimate + 2;
+}
+
+
+void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
+                                          int estimate) {
+  shared->set_expected_nof_properties(
+      ExpectedNofPropertiesFromEstimate(estimate));
+}
+
+
+void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
+                                          int estimate) {
+  SetExpectedNofProperties(
+      func, ExpectedNofPropertiesFromEstimate(estimate));
+}
+
+
+void NormalizeProperties(Handle<JSObject> object,
+                         PropertyNormalizationMode mode,
+                         int expected_additional_properties) {
+  CALL_HEAP_FUNCTION_VOID(object->NormalizeProperties(
+      mode,
+      expected_additional_properties));
+}
+
+
+void NormalizeElements(Handle<JSObject> object) {
+  CALL_HEAP_FUNCTION_VOID(object->NormalizeElements());
+}
+
+
+void TransformToFastProperties(Handle<JSObject> object,
+                               int unused_property_fields) {
+  CALL_HEAP_FUNCTION_VOID(
+      object->TransformToFastProperties(unused_property_fields));
+}
+
+
+void FlattenString(Handle<String> string) {
+  CALL_HEAP_FUNCTION_VOID(string->TryFlattenIfNotFlat());
+  ASSERT(string->IsFlat());
+}
+
+
+Handle<Object> SetPrototype(Handle<JSFunction> function,
+                            Handle<Object> prototype) {
+  CALL_HEAP_FUNCTION(Accessors::FunctionSetPrototype(*function,
+                                                     *prototype,
+                                                     NULL),
+                     Object);
+}
+
+
+Handle<Object> SetProperty(Handle<JSObject> object,
+                           Handle<String> key,
+                           Handle<Object> value,
+                           PropertyAttributes attributes) {
+  CALL_HEAP_FUNCTION(object->SetProperty(*key, *value, attributes), Object);
+}
+
+
+Handle<Object> SetProperty(Handle<Object> object,
+                           Handle<Object> key,
+                           Handle<Object> value,
+                           PropertyAttributes attributes) {
+  CALL_HEAP_FUNCTION(
+      Runtime::SetObjectProperty(object, key, value, attributes), Object);
+}
+
+
+Handle<Object> ForceSetProperty(Handle<JSObject> object,
+                                Handle<Object> key,
+                                Handle<Object> value,
+                                PropertyAttributes attributes) {
+  CALL_HEAP_FUNCTION(
+      Runtime::ForceSetObjectProperty(object, key, value, attributes), Object);
+}
+
+
+Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
+                                   Handle<Object> key) {
+  CALL_HEAP_FUNCTION(Runtime::ForceDeleteObjectProperty(object, key), Object);
+}
+
+
+Handle<Object> IgnoreAttributesAndSetLocalProperty(
+    Handle<JSObject> object,
+    Handle<String> key,
+    Handle<Object> value,
+    PropertyAttributes attributes) {
+  CALL_HEAP_FUNCTION(object->
+      IgnoreAttributesAndSetLocalProperty(*key, *value, attributes), Object);
+}
+
+
+Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
+                                          Handle<String> key,
+                                          Handle<Object> value,
+                                          PropertyAttributes attributes) {
+  CALL_HEAP_FUNCTION(object->SetPropertyWithInterceptor(*key,
+                                                        *value,
+                                                        attributes),
+                     Object);
+}
+
+
+Handle<Object> GetProperty(Handle<JSObject> obj,
+                           const char* name) {
+  Handle<String> str = Factory::LookupAsciiSymbol(name);
+  CALL_HEAP_FUNCTION(obj->GetProperty(*str), Object);
+}
+
+
+Handle<Object> GetProperty(Handle<Object> obj,
+                           Handle<Object> key) {
+  CALL_HEAP_FUNCTION(Runtime::GetObjectProperty(obj, key), Object);
+}
+
+
+Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
+                                          Handle<JSObject> holder,
+                                          Handle<String> name,
+                                          PropertyAttributes* attributes) {
+  CALL_HEAP_FUNCTION(holder->GetPropertyWithInterceptor(*receiver,
+                                                        *name,
+                                                        attributes),
+                     Object);
+}
+
+
+Handle<Object> GetPrototype(Handle<Object> obj) {
+  Handle<Object> result(obj->GetPrototype());
+  return result;
+}
+
+
+Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
+                                   bool create_if_needed) {
+  Handle<String> key = Factory::hidden_symbol();
+
+  if (obj->HasFastProperties()) {
+    // If the object has fast properties, check whether the first slot
+    // in the descriptor array matches the hidden symbol. Since the
+    // hidden symbols hash code is zero (and no other string has hash
+    // code zero) it will always occupy the first entry if present.
+    DescriptorArray* descriptors = obj->map()->instance_descriptors();
+    if ((descriptors->number_of_descriptors() > 0) &&
+        (descriptors->GetKey(0) == *key) &&
+        descriptors->IsProperty(0)) {
+      ASSERT(descriptors->GetType(0) == FIELD);
+      return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)));
+    }
+  }
+
+  // Only attempt to find the hidden properties in the local object and not
+  // in the prototype chain.  Note that HasLocalProperty() can cause a GC in
+  // the general case in the presence of interceptors.
+  if (!obj->HasLocalProperty(*key)) {
+    // Hidden properties object not found. Allocate a new hidden properties
+    // object if requested. Otherwise return the undefined value.
+    if (create_if_needed) {
+      Handle<Object> hidden_obj = Factory::NewJSObject(Top::object_function());
+      return SetProperty(obj, key, hidden_obj, DONT_ENUM);
+    } else {
+      return Factory::undefined_value();
+    }
+  }
+  return GetProperty(obj, key);
+}
+
+
+Handle<Object> DeleteElement(Handle<JSObject> obj,
+                             uint32_t index) {
+  CALL_HEAP_FUNCTION(obj->DeleteElement(index, JSObject::NORMAL_DELETION),
+                     Object);
+}
+
+
+Handle<Object> DeleteProperty(Handle<JSObject> obj,
+                              Handle<String> prop) {
+  CALL_HEAP_FUNCTION(obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
+                     Object);
+}
+
+
+Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
+  CALL_HEAP_FUNCTION(Heap::LookupSingleCharacterStringFromCode(index), Object);
+}
+
+
+Handle<String> SubString(Handle<String> str, int start, int end) {
+  CALL_HEAP_FUNCTION(str->Slice(start, end), String);
+}
+
+
+Handle<Object> SetElement(Handle<JSObject> object,
+                          uint32_t index,
+                          Handle<Object> value) {
+  if (object->HasPixelElements()) {
+    if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
+      bool has_exception;
+      Handle<Object> number = Execution::ToNumber(value, &has_exception);
+      if (has_exception) return Handle<Object>();
+      value = number;
+    }
+  }
+  CALL_HEAP_FUNCTION(object->SetElement(index, *value), Object);
+}
+
+
+Handle<JSObject> Copy(Handle<JSObject> obj) {
+  CALL_HEAP_FUNCTION(Heap::CopyJSObject(*obj), JSObject);
+}
+
+
+// Wrappers for scripts are kept alive and cached in weak global
+// handles referred from proxy objects held by the scripts as long as
+// they are used. When they are not used anymore, the garbage
+// collector will call the weak callback on the global handle
+// associated with the wrapper and get rid of both the wrapper and the
+// handle.
+static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
+#ifdef ENABLE_HEAP_PROTECTION
+  // Weak reference callbacks are called as if from outside V8.  We
+  // need to reeenter to unprotect the heap.
+  VMState state(OTHER);
+#endif
+  Handle<Object> cache = Utils::OpenHandle(*handle);
+  JSValue* wrapper = JSValue::cast(*cache);
+  Proxy* proxy = Script::cast(wrapper->value())->wrapper();
+  ASSERT(proxy->proxy() == reinterpret_cast<Address>(cache.location()));
+  proxy->set_proxy(0);
+  GlobalHandles::Destroy(cache.location());
+  Counters::script_wrappers.Decrement();
+}
+
+
+Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
+  if (script->wrapper()->proxy() != NULL) {
+    // Return the script wrapper directly from the cache.
+    return Handle<JSValue>(
+        reinterpret_cast<JSValue**>(script->wrapper()->proxy()));
+  }
+
+  // Construct a new script wrapper.
+  Counters::script_wrappers.Increment();
+  Handle<JSFunction> constructor = Top::script_function();
+  Handle<JSValue> result =
+      Handle<JSValue>::cast(Factory::NewJSObject(constructor));
+  result->set_value(*script);
+
+  // Create a new weak global handle and use it to cache the wrapper
+  // for future use. The cache will automatically be cleared by the
+  // garbage collector when it is not used anymore.
+  Handle<Object> handle = GlobalHandles::Create(*result);
+  GlobalHandles::MakeWeak(handle.location(), NULL, &ClearWrapperCache);
+  script->wrapper()->set_proxy(reinterpret_cast<Address>(handle.location()));
+  return result;
+}
+
+
+// Init line_ends array with code positions of line ends inside script
+// source.
+void InitScriptLineEnds(Handle<Script> script) {
+  if (!script->line_ends()->IsUndefined()) return;
+
+  if (!script->source()->IsString()) {
+    ASSERT(script->source()->IsUndefined());
+    script->set_line_ends(*(Factory::NewJSArray(0)));
+    ASSERT(script->line_ends()->IsJSArray());
+    return;
+  }
+
+  Handle<String> src(String::cast(script->source()));
+  const int src_len = src->length();
+  Handle<String> new_line = Factory::NewStringFromAscii(CStrVector("\n"));
+
+  // Pass 1: Identify line count.
+  int line_count = 0;
+  int position = 0;
+  while (position != -1 && position < src_len) {
+    position = Runtime::StringMatch(src, new_line, position);
+    if (position != -1) {
+      position++;
+    }
+    // Even if the last line misses a line end, it is counted.
+    line_count++;
+  }
+
+  // Pass 2: Fill in line ends positions
+  Handle<FixedArray> array = Factory::NewFixedArray(line_count);
+  int array_index = 0;
+  position = 0;
+  while (position != -1 && position < src_len) {
+    position = Runtime::StringMatch(src, new_line, position);
+    // If the script does not end with a line ending add the final end
+    // position as just past the last line ending.
+    array->set(array_index++,
+               Smi::FromInt(position != -1 ? position++ : src_len));
+  }
+  ASSERT(array_index == line_count);
+
+  Handle<JSArray> object = Factory::NewJSArrayWithElements(array);
+  script->set_line_ends(*object);
+  ASSERT(script->line_ends()->IsJSArray());
+}
+
+
+// Convert code position into line number.
+int GetScriptLineNumber(Handle<Script> script, int code_pos) {
+  InitScriptLineEnds(script);
+  AssertNoAllocation no_allocation;
+  JSArray* line_ends_array = JSArray::cast(script->line_ends());
+  const int line_ends_len = (Smi::cast(line_ends_array->length()))->value();
+
+  int line = -1;
+  if (line_ends_len > 0 &&
+      code_pos <= (Smi::cast(line_ends_array->GetElement(0)))->value()) {
+    line = 0;
+  } else {
+    for (int i = 1; i < line_ends_len; ++i) {
+      if ((Smi::cast(line_ends_array->GetElement(i - 1)))->value() < code_pos &&
+          code_pos <= (Smi::cast(line_ends_array->GetElement(i)))->value()) {
+        line = i;
+        break;
+      }
+    }
+  }
+
+  return line != -1 ? line + script->line_offset()->value() : line;
+}
+
+
+void CustomArguments::IterateInstance(ObjectVisitor* v) {
+  v->VisitPointers(values_, values_ + 4);
+}
+
+
+// Compute the property keys from the interceptor.
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
+                                                 Handle<JSObject> object) {
+  Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
+  CustomArguments args(interceptor->data(), *receiver, *object);
+  v8::AccessorInfo info(args.end());
+  v8::Handle<v8::Array> result;
+  if (!interceptor->enumerator()->IsUndefined()) {
+    v8::NamedPropertyEnumerator enum_fun =
+        v8::ToCData<v8::NamedPropertyEnumerator>(interceptor->enumerator());
+    LOG(ApiObjectAccess("interceptor-named-enum", *object));
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = enum_fun(info);
+    }
+  }
+  return result;
+}
+
+
+// Compute the element keys from the interceptor.
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
+                                                   Handle<JSObject> object) {
+  Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
+  CustomArguments args(interceptor->data(), *receiver, *object);
+  v8::AccessorInfo info(args.end());
+  v8::Handle<v8::Array> result;
+  if (!interceptor->enumerator()->IsUndefined()) {
+    v8::IndexedPropertyEnumerator enum_fun =
+        v8::ToCData<v8::IndexedPropertyEnumerator>(interceptor->enumerator());
+    LOG(ApiObjectAccess("interceptor-indexed-enum", *object));
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = enum_fun(info);
+    }
+  }
+  return result;
+}
+
+
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
+                                          KeyCollectionType type) {
+  Handle<FixedArray> content = Factory::empty_fixed_array();
+
+  // Only collect keys if access is permitted.
+  for (Handle<Object> p = object;
+       *p != Heap::null_value();
+       p = Handle<Object>(p->GetPrototype())) {
+    Handle<JSObject> current(JSObject::cast(*p));
+
+    // Check access rights if required.
+    if (current->IsAccessCheckNeeded() &&
+      !Top::MayNamedAccess(*current, Heap::undefined_value(),
+                           v8::ACCESS_KEYS)) {
+      Top::ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
+      break;
+    }
+
+    // Compute the element keys.
+    Handle<FixedArray> element_keys =
+        Factory::NewFixedArray(current->NumberOfEnumElements());
+    current->GetEnumElementKeys(*element_keys);
+    content = UnionOfKeys(content, element_keys);
+
+    // Add the element keys from the interceptor.
+    if (current->HasIndexedInterceptor()) {
+      v8::Handle<v8::Array> result =
+          GetKeysForIndexedInterceptor(object, current);
+      if (!result.IsEmpty())
+        content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
+    }
+
+    // Compute the property keys.
+    content = UnionOfKeys(content, GetEnumPropertyKeys(current));
+
+    // Add the property keys from the interceptor.
+    if (current->HasNamedInterceptor()) {
+      v8::Handle<v8::Array> result =
+          GetKeysForNamedInterceptor(object, current);
+      if (!result.IsEmpty())
+        content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
+    }
+
+    // If we only want local properties we bail out after the first
+    // iteration.
+    if (type == LOCAL_ONLY)
+      break;
+  }
+  return content;
+}
+
+
+Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
+  Counters::for_in.Increment();
+  Handle<FixedArray> elements = GetKeysInFixedArrayFor(object,
+                                                       INCLUDE_PROTOS);
+  return Factory::NewJSArrayWithElements(elements);
+}
+
+
+Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object) {
+  int index = 0;
+  if (object->HasFastProperties()) {
+    if (object->map()->instance_descriptors()->HasEnumCache()) {
+      Counters::enum_cache_hits.Increment();
+      DescriptorArray* desc = object->map()->instance_descriptors();
+      return Handle<FixedArray>(FixedArray::cast(desc->GetEnumCache()));
+    }
+    Counters::enum_cache_misses.Increment();
+    int num_enum = object->NumberOfEnumProperties();
+    Handle<FixedArray> storage = Factory::NewFixedArray(num_enum);
+    Handle<FixedArray> sort_array = Factory::NewFixedArray(num_enum);
+    Handle<DescriptorArray> descs =
+        Handle<DescriptorArray>(object->map()->instance_descriptors());
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      if (descs->IsProperty(i) && !descs->IsDontEnum(i)) {
+        (*storage)->set(index, descs->GetKey(i));
+        PropertyDetails details(descs->GetDetails(i));
+        (*sort_array)->set(index, Smi::FromInt(details.index()));
+        index++;
+      }
+    }
+    (*storage)->SortPairs(*sort_array, sort_array->length());
+    Handle<FixedArray> bridge_storage =
+        Factory::NewFixedArray(DescriptorArray::kEnumCacheBridgeLength);
+    DescriptorArray* desc = object->map()->instance_descriptors();
+    desc->SetEnumCache(*bridge_storage, *storage);
+    ASSERT(storage->length() == index);
+    return storage;
+  } else {
+    int num_enum = object->NumberOfEnumProperties();
+    Handle<FixedArray> storage = Factory::NewFixedArray(num_enum);
+    Handle<FixedArray> sort_array = Factory::NewFixedArray(num_enum);
+    object->property_dictionary()->CopyEnumKeysTo(*storage, *sort_array);
+    return storage;
+  }
+}
+
+
+bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
+                       ClearExceptionFlag flag,
+                       int loop_nesting) {
+  // Compile the source information to a code object.
+  ASSERT(!shared->is_compiled());
+  bool result = Compiler::CompileLazy(shared, loop_nesting);
+  ASSERT(result != Top::has_pending_exception());
+  if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();
+  return result;
+}
+
+
+bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
+  // Compile the source information to a code object.
+  Handle<SharedFunctionInfo> shared(function->shared());
+  return CompileLazyShared(shared, flag, 0);
+}
+
+
+bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag) {
+  // Compile the source information to a code object.
+  Handle<SharedFunctionInfo> shared(function->shared());
+  return CompileLazyShared(shared, flag, 1);
+}
+
+OptimizedObjectForAddingMultipleProperties::
+OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
+                                           int expected_additional_properties,
+                                           bool condition) {
+  object_ = object;
+  if (condition && object_->HasFastProperties()) {
+    // Normalize the properties of object to avoid n^2 behavior
+    // when extending the object multiple properties. Indicate the number of
+    // properties to be added.
+    unused_property_fields_ = object->map()->unused_property_fields();
+    NormalizeProperties(object_,
+                        KEEP_INOBJECT_PROPERTIES,
+                        expected_additional_properties);
+    has_been_transformed_ = true;
+
+  } else {
+    has_been_transformed_ = false;
+  }
+}
+
+
+OptimizedObjectForAddingMultipleProperties::
+~OptimizedObjectForAddingMultipleProperties() {
+  // Reoptimize the object to allow fast property access.
+  if (has_been_transformed_) {
+    TransformToFastProperties(object_, unused_property_fields_);
+  }
+}
+
+
+void LoadLazy(Handle<JSObject> obj, bool* pending_exception) {
+  HandleScope scope;
+  Handle<FixedArray> info(FixedArray::cast(obj->map()->constructor()));
+  int index = Smi::cast(info->get(0))->value();
+  ASSERT(index >= 0);
+  Handle<Context> compile_context(Context::cast(info->get(1)));
+  Handle<Context> function_context(Context::cast(info->get(2)));
+  Handle<Object> receiver(compile_context->global()->builtins());
+
+  Vector<const char> name = Natives::GetScriptName(index);
+
+  Handle<JSFunction> boilerplate;
+
+  if (!Bootstrapper::NativesCacheLookup(name, &boilerplate)) {
+    Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
+    Handle<String> script_name = Factory::NewStringFromAscii(name);
+    bool allow_natives_syntax = FLAG_allow_natives_syntax;
+    FLAG_allow_natives_syntax = true;
+    boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL);
+    FLAG_allow_natives_syntax = allow_natives_syntax;
+    // If the compilation failed (possibly due to stack overflows), we
+    // should never enter the result in the natives cache. Instead we
+    // return from the function without marking the function as having
+    // been lazily loaded.
+    if (boilerplate.is_null()) {
+      *pending_exception = true;
+      return;
+    }
+    Bootstrapper::NativesCacheAdd(name, boilerplate);
+  }
+
+  // We shouldn't get here if compiling the script failed.
+  ASSERT(!boilerplate.is_null());
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // When the debugger running in its own context touches lazy loaded
+  // functions loading can be triggered. In that case ensure that the
+  // execution of the boilerplate is in the correct context.
+  SaveContext save;
+  if (!Debug::debug_context().is_null() &&
+      Top::context() == *Debug::debug_context()) {
+    Top::set_context(*compile_context);
+  }
+#endif
+
+  // Reset the lazy load data before running the script to make sure
+  // not to get recursive lazy loading.
+  obj->map()->set_needs_loading(false);
+  obj->map()->set_constructor(info->get(3));
+
+  // Run the script.
+  Handle<JSFunction> script_fun(
+      Factory::NewFunctionFromBoilerplate(boilerplate, function_context));
+  Execution::Call(script_fun, receiver, 0, NULL, pending_exception);
+
+  // If lazy loading failed, restore the unloaded state of obj.
+  if (*pending_exception) {
+    obj->map()->set_needs_loading(true);
+    obj->map()->set_constructor(*info);
+  }
+}
+
+
+void SetupLazy(Handle<JSObject> obj,
+               int index,
+               Handle<Context> compile_context,
+               Handle<Context> function_context) {
+  Handle<FixedArray> arr = Factory::NewFixedArray(4);
+  arr->set(0, Smi::FromInt(index));
+  arr->set(1, *compile_context);  // Compile in this context
+  arr->set(2, *function_context);  // Set function context to this
+  arr->set(3, obj->map()->constructor());  // Remember the constructor
+  Handle<Map> old_map(obj->map());
+  Handle<Map> new_map = Factory::CopyMapDropTransitions(old_map);
+  obj->set_map(*new_map);
+  new_map->set_needs_loading(true);
+  // Store the lazy loading info in the constructor field.  We'll
+  // reestablish the constructor from the fixed array after loading.
+  new_map->set_constructor(*arr);
+  ASSERT(!obj->IsLoaded());
+}
+
+} }  // namespace v8::internal
diff --git a/src/handles.h b/src/handles.h
new file mode 100644
index 0000000..5d57465
--- /dev/null
+++ b/src/handles.h
@@ -0,0 +1,359 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HANDLES_H_
+#define V8_HANDLES_H_
+
+#include "apiutils.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// A Handle provides a reference to an object that survives relocation by
+// the garbage collector.
+// Handles are only valid within a HandleScope.
+// When a handle is created for an object a cell is allocated in the heap.
+
+template<class T>
+class Handle {
+ public:
+  INLINE(Handle(T** location)) { location_ = location; }
+  INLINE(explicit Handle(T* obj));
+
+  INLINE(Handle()) : location_(NULL) {}
+
+  // Constructor for handling automatic up casting.
+  // Ex. Handle<JSFunction> can be passed when Handle<Object> is expected.
+  template <class S> Handle(Handle<S> handle) {
+#ifdef DEBUG
+    T* a = NULL;
+    S* b = NULL;
+    a = b;  // Fake assignment to enforce type checks.
+    USE(a);
+#endif
+    location_ = reinterpret_cast<T**>(handle.location());
+  }
+
+  INLINE(T* operator ->() const) { return operator*(); }
+
+  // Check if this handle refers to the exact same object as the other handle.
+  bool is_identical_to(const Handle<T> other) const {
+    return operator*() == *other;
+  }
+
+  // Provides the C++ dereference operator.
+  INLINE(T* operator*() const);
+
+  // Returns the address to where the raw pointer is stored.
+  T** location() const {
+    ASSERT(location_ == NULL ||
+           reinterpret_cast<Address>(*location_) != kZapValue);
+    return location_;
+  }
+
+  template <class S> static Handle<T> cast(Handle<S> that) {
+    T::cast(*that);
+    return Handle<T>(reinterpret_cast<T**>(that.location()));
+  }
+
+  static Handle<T> null() { return Handle<T>(); }
+  bool is_null() { return location_ == NULL; }
+
+  // Closes the given scope, but lets this handle escape. See
+  // implementation in api.h.
+  inline Handle<T> EscapeFrom(v8::HandleScope* scope);
+
+ private:
+  T** location_;
+};
+
+
+// A stack-allocated class that governs a number of local handles.
+// After a handle scope has been created, all local handles will be
+// allocated within that handle scope until either the handle scope is
+// deleted or another handle scope is created.  If there is already a
+// handle scope and a new one is created, all allocations will take
+// place in the new handle scope until it is deleted.  After that,
+// new handles will again be allocated in the original handle scope.
+//
+// After the handle scope of a local handle has been deleted the
+// garbage collector will no longer track the object stored in the
+// handle and may deallocate it.  The behavior of accessing a handle
+// for which the handle scope has been deleted is undefined.
+class HandleScope {
+ public:
+  HandleScope() : previous_(current_) {
+    current_.extensions = 0;
+  }
+
+  ~HandleScope() {
+    Leave(&previous_);
+  }
+
+  // Counts the number of allocated handles.
+  static int NumberOfHandles();
+
+  // Creates a new handle with the given value.
+  template <typename T>
+  static inline T** CreateHandle(T* value) {
+    internal::Object** cur = current_.next;
+    if (cur == current_.limit) cur = Extend();
+    // Update the current next field, set the value in the created
+    // handle, and return the result.
+    ASSERT(cur < current_.limit);
+    current_.next = cur + 1;
+
+    T** result = reinterpret_cast<T**>(cur);
+    *result = value;
+    return result;
+  }
+
+ private:
+  // Prevent heap allocation or illegal handle scopes.
+  HandleScope(const HandleScope&);
+  void operator=(const HandleScope&);
+  void* operator new(size_t size);
+  void operator delete(void* size_t);
+
+  static v8::ImplementationUtilities::HandleScopeData current_;
+  const v8::ImplementationUtilities::HandleScopeData previous_;
+
+  // Pushes a fresh handle scope to be used when allocating new handles.
+  static void Enter(
+      v8::ImplementationUtilities::HandleScopeData* previous) {
+    *previous = current_;
+    current_.extensions = 0;
+  }
+
+  // Re-establishes the previous scope state. Should be called only
+  // once, and only for the current scope.
+  static void Leave(
+      const v8::ImplementationUtilities::HandleScopeData* previous) {
+    if (current_.extensions > 0) {
+      DeleteExtensions();
+    }
+    current_ = *previous;
+#ifdef DEBUG
+    ZapRange(current_.next, current_.limit);
+#endif
+  }
+
+  // Extend the handle scope making room for more handles.
+  static internal::Object** Extend();
+
+  // Deallocates any extensions used by the current scope.
+  static void DeleteExtensions();
+
+  // Zaps the handles in the half-open interval [start, end).
+  static void ZapRange(internal::Object** start, internal::Object** end);
+
+  friend class v8::HandleScope;
+  friend class v8::ImplementationUtilities;
+};
+
+
+// ----------------------------------------------------------------------------
+// Handle operations.
+// They might invoke garbage collection. The result is an handle to
+// an object of expected type, or the handle is an error if running out
+// of space or encountering an internal error.
+
+void NormalizeProperties(Handle<JSObject> object,
+                         PropertyNormalizationMode mode,
+                         int expected_additional_properties);
+void NormalizeElements(Handle<JSObject> object);
+void TransformToFastProperties(Handle<JSObject> object,
+                               int unused_property_fields);
+void FlattenString(Handle<String> str);
+
+Handle<Object> SetProperty(Handle<JSObject> object,
+                           Handle<String> key,
+                           Handle<Object> value,
+                           PropertyAttributes attributes);
+
+Handle<Object> SetProperty(Handle<Object> object,
+                           Handle<Object> key,
+                           Handle<Object> value,
+                           PropertyAttributes attributes);
+
+Handle<Object> ForceSetProperty(Handle<JSObject> object,
+                                Handle<Object> key,
+                                Handle<Object> value,
+                                PropertyAttributes attributes);
+
+Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
+                                   Handle<Object> key);
+
+Handle<Object> IgnoreAttributesAndSetLocalProperty(Handle<JSObject> object,
+                                                   Handle<String> key,
+                                                   Handle<Object> value,
+    PropertyAttributes attributes);
+
+Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
+                                          Handle<String> key,
+                                          Handle<Object> value,
+                                          PropertyAttributes attributes);
+
+Handle<Object> SetElement(Handle<JSObject> object,
+                          uint32_t index,
+                          Handle<Object> value);
+
+Handle<Object> GetProperty(Handle<JSObject> obj,
+                           const char* name);
+
+Handle<Object> GetProperty(Handle<Object> obj,
+                           Handle<Object> key);
+
+Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
+                                          Handle<JSObject> holder,
+                                          Handle<String> name,
+                                          PropertyAttributes* attributes);
+
+Handle<Object> GetPrototype(Handle<Object> obj);
+
+// Return the object's hidden properties object. If the object has no hidden
+// properties and create_if_needed is true, then a new hidden property object
+// will be allocated. Otherwise the Heap::undefined_value is returned.
+Handle<Object> GetHiddenProperties(Handle<JSObject> obj, bool create_if_needed);
+
+Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
+Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
+
+Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index);
+
+Handle<JSObject> Copy(Handle<JSObject> obj);
+
+Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
+                                      Handle<JSArray> array);
+
+// Get the JS object corresponding to the given script; create it
+// if none exists.
+Handle<JSValue> GetScriptWrapper(Handle<Script> script);
+
+// Script line number computations.
+void InitScriptLineEnds(Handle<Script> script);
+int GetScriptLineNumber(Handle<Script> script, int code_position);
+
+// Computes the enumerable keys from interceptors. Used for debug mirrors and
+// by GetKeysInFixedArrayFor below.
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
+                                                 Handle<JSObject> object);
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
+                                                   Handle<JSObject> object);
+
+enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
+
+// Computes the enumerable keys for a JSObject. Used for implementing
+// "for (n in object) { }".
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
+                                          KeyCollectionType type);
+Handle<JSArray> GetKeysFor(Handle<JSObject> object);
+Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
+
+// Computes the union of keys and return the result.
+// Used for implementing "for (n in object) { }"
+Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
+                               Handle<FixedArray> second);
+
+Handle<String> SubString(Handle<String> str, int start, int end);
+
+
+// Sets the expected number of properties for the function's instances.
+void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
+
+// Sets the prototype property for a function instance.
+void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value);
+
+// Sets the expected number of properties based on estimate from compiler.
+void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
+                                          int estimate);
+void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
+                                          int estimate);
+
+
+Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
+    Handle<JSFunction> constructor,
+    Handle<JSGlobalProxy> global);
+
+Handle<Object> SetPrototype(Handle<JSFunction> function,
+                            Handle<Object> prototype);
+
+
+// Do lazy compilation of the given function. Returns true on success
+// and false if the compilation resulted in a stack overflow.
+enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
+
+bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
+                       ClearExceptionFlag flag,
+                       int loop_nesting);
+
+bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
+bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
+
+// These deal with lazily loaded properties.
+void SetupLazy(Handle<JSObject> obj,
+               int index,
+               Handle<Context> compile_context,
+               Handle<Context> function_context);
+void LoadLazy(Handle<JSObject> obj, bool* pending_exception);
+
+class NoHandleAllocation BASE_EMBEDDED {
+ public:
+#ifndef DEBUG
+  NoHandleAllocation() {}
+  ~NoHandleAllocation() {}
+#else
+  inline NoHandleAllocation();
+  inline ~NoHandleAllocation();
+ private:
+  int extensions_;
+#endif
+};
+
+
+// ----------------------------------------------------------------------------
+
+
+// Stack allocated wrapper call for optimizing adding multiple
+// properties to an object.
+class OptimizedObjectForAddingMultipleProperties BASE_EMBEDDED {
+ public:
+  OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
+                                             int expected_property_count,
+                                             bool condition = true);
+  ~OptimizedObjectForAddingMultipleProperties();
+ private:
+  bool has_been_transformed_;  // Tells whether the object has been transformed.
+  int unused_property_fields_;  // Captures the unused number of field.
+  Handle<JSObject> object_;    // The object being optimized.
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_HANDLES_H_
diff --git a/src/hashmap.cc b/src/hashmap.cc
new file mode 100644
index 0000000..3c4e5cd
--- /dev/null
+++ b/src/hashmap.cc
@@ -0,0 +1,226 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "hashmap.h"
+
+namespace v8 {
+namespace internal {
+
+Allocator HashMap::DefaultAllocator;
+
+
+HashMap::HashMap() {
+  allocator_ = NULL;
+  match_ = NULL;
+}
+
+
+HashMap::HashMap(MatchFun match,
+                 Allocator* allocator,
+                 uint32_t initial_capacity) {
+  allocator_ = allocator;
+  match_ = match;
+  Initialize(initial_capacity);
+}
+
+
+HashMap::~HashMap() {
+  if (allocator_) {
+    allocator_->Delete(map_);
+  }
+}
+
+
+HashMap::Entry* HashMap::Lookup(void* key, uint32_t hash, bool insert) {
+  // Find a matching entry.
+  Entry* p = Probe(key, hash);
+  if (p->key != NULL) {
+    return p;
+  }
+
+  // No entry found; insert one if necessary.
+  if (insert) {
+    p->key = key;
+    p->value = NULL;
+    p->hash = hash;
+    occupancy_++;
+
+    // Grow the map if we reached >= 80% occupancy.
+    if (occupancy_ + occupancy_/4 >= capacity_) {
+      Resize();
+      p = Probe(key, hash);
+    }
+
+    return p;
+  }
+
+  // No entry found and none inserted.
+  return NULL;
+}
+
+
+void HashMap::Remove(void* key, uint32_t hash) {
+  // Lookup the entry for the key to remove.
+  Entry* p = Probe(key, hash);
+  if (p->key == NULL) {
+    // Key not found nothing to remove.
+    return;
+  }
+
+  // To remove an entry we need to ensure that it does not create an empty
+  // entry that will cause the search for another entry to stop too soon. If all
+  // the entries between the entry to remove and the next empty slot have their
+  // initial position inside this interval, clearing the entry to remove will
+  // not break the search. If, while searching for the next empty entry, an
+  // entry is encountered which does not have its initial position between the
+  // entry to remove and the position looked at, then this entry can be moved to
+  // the place of the entry to remove without breaking the search for it. The
+  // entry made vacant by this move is now the entry to remove and the process
+  // starts over.
+  // Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
+
+  // This guarantees loop termination as there is at least one empty entry so
+  // eventually the removed entry will have an empty entry after it.
+  ASSERT(occupancy_ < capacity_);
+
+  // p is the candidate entry to clear. q is used to scan forwards.
+  Entry* q = p;  // Start at the entry to remove.
+  while (true) {
+    // Move q to the next entry.
+    q = q + 1;
+    if (q == map_end()) {
+      q = map_;
+    }
+
+    // All entries between p and q have their initial position between p and q
+    // and the entry p can be cleared without breaking the search for these
+    // entries.
+    if (q->key == NULL) {
+      break;
+    }
+
+    // Find the initial position for the entry at position q.
+    Entry* r = map_ + (q->hash & (capacity_ - 1));
+
+    // If the entry at position q has its initial position outside the range
+    // between p and q it can be moved forward to position p and will still be
+    // found. There is now a new candidate entry for clearing.
+    if ((q > p && (r <= p || r > q)) ||
+        (q < p && (r <= p && r > q))) {
+      *p = *q;
+      p = q;
+    }
+  }
+
+  // Clear the entry which is allowed to en emptied.
+  p->key = NULL;
+  occupancy_--;
+}
+
+
+void HashMap::Clear() {
+  // Mark all entries as empty.
+  const Entry* end = map_end();
+  for (Entry* p = map_; p < end; p++) {
+    p->key = NULL;
+  }
+  occupancy_ = 0;
+}
+
+
+HashMap::Entry* HashMap::Start() const {
+  return Next(map_ - 1);
+}
+
+
+HashMap::Entry* HashMap::Next(Entry* p) const {
+  const Entry* end = map_end();
+  ASSERT(map_ - 1 <= p && p < end);
+  for (p++; p < end; p++) {
+    if (p->key != NULL) {
+      return p;
+    }
+  }
+  return NULL;
+}
+
+
+HashMap::Entry* HashMap::Probe(void* key, uint32_t hash) {
+  ASSERT(key != NULL);
+
+  ASSERT(IsPowerOf2(capacity_));
+  Entry* p = map_ + (hash & (capacity_ - 1));
+  const Entry* end = map_end();
+  ASSERT(map_ <= p && p < end);
+
+  ASSERT(occupancy_ < capacity_);  // Guarantees loop termination.
+  while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
+    p++;
+    if (p >= end) {
+      p = map_;
+    }
+  }
+
+  return p;
+}
+
+
+void HashMap::Initialize(uint32_t capacity) {
+  ASSERT(IsPowerOf2(capacity));
+  map_ = reinterpret_cast<Entry*>(allocator_->New(capacity * sizeof(Entry)));
+  if (map_ == NULL) {
+    V8::FatalProcessOutOfMemory("HashMap::Initialize");
+    return;
+  }
+  capacity_ = capacity;
+  Clear();
+}
+
+
+void HashMap::Resize() {
+  Entry* map = map_;
+  uint32_t n = occupancy_;
+
+  // Allocate larger map.
+  Initialize(capacity_ * 2);
+
+  // Rehash all current entries.
+  for (Entry* p = map; n > 0; p++) {
+    if (p->key != NULL) {
+      Lookup(p->key, p->hash, true)->value = p->value;
+      n--;
+    }
+  }
+
+  // Delete old map.
+  allocator_->Delete(map);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/hashmap.h b/src/hashmap.h
new file mode 100644
index 0000000..b92c715
--- /dev/null
+++ b/src/hashmap.h
@@ -0,0 +1,120 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HASHMAP_H_
+#define V8_HASHMAP_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Allocator defines the memory allocator interface
+// used by HashMap and implements a default allocator.
+class Allocator BASE_EMBEDDED {
+ public:
+  virtual ~Allocator()  {}
+  virtual void* New(size_t size)  { return Malloced::New(size); }
+  virtual void Delete(void* p)  { Malloced::Delete(p); }
+};
+
+
+class HashMap {
+ public:
+  static Allocator DefaultAllocator;
+
+  typedef bool (*MatchFun) (void* key1, void* key2);
+
+  // Dummy constructor.  This constructor doesn't set up the hash
+  // map properly so don't use it unless you have good reason.
+  HashMap();
+
+  // initial_capacity is the size of the initial hash map;
+  // it must be a power of 2 (and thus must not be 0).
+  HashMap(MatchFun match,
+          Allocator* allocator = &DefaultAllocator,
+          uint32_t initial_capacity = 8);
+
+  ~HashMap();
+
+  // HashMap entries are (key, value, hash) triplets.
+  // Some clients may not need to use the value slot
+  // (e.g. implementers of sets, where the key is the value).
+  struct Entry {
+    void* key;
+    void* value;
+    uint32_t hash;  // the full hash value for key
+  };
+
+  // If an entry with matching key is found, Lookup()
+  // returns that entry. If no matching entry is found,
+  // but insert is set, a new entry is inserted with
+  // corresponding key, key hash, and NULL value.
+  // Otherwise, NULL is returned.
+  Entry* Lookup(void* key, uint32_t hash, bool insert);
+
+  // Removes the entry with matching key.
+  void Remove(void* key, uint32_t hash);
+
+  // Empties the hash map (occupancy() == 0).
+  void Clear();
+
+  // The number of (non-empty) entries in the table.
+  uint32_t occupancy() const  { return occupancy_; }
+
+  // The capacity of the table. The implementation
+  // makes sure that occupancy is at most 80% of
+  // the table capacity.
+  uint32_t capacity() const  { return capacity_; }
+
+  // Iteration
+  //
+  // for (Entry* p = map.Start(); p != NULL; p = map.Next(p)) {
+  //   ...
+  // }
+  //
+  // If entries are inserted during iteration, the effect of
+  // calling Next() is undefined.
+  Entry* Start() const;
+  Entry* Next(Entry* p) const;
+
+ private:
+  Allocator* allocator_;
+  MatchFun match_;
+  Entry* map_;
+  uint32_t capacity_;
+  uint32_t occupancy_;
+
+  Entry* map_end() const  { return map_ + capacity_; }
+  Entry* Probe(void* key, uint32_t hash);
+  void Initialize(uint32_t capacity);
+  void Resize();
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_HASHMAP_H_
diff --git a/src/heap-inl.h b/src/heap-inl.h
new file mode 100644
index 0000000..0646878
--- /dev/null
+++ b/src/heap-inl.h
@@ -0,0 +1,326 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HEAP_INL_H_
+#define V8_HEAP_INL_H_
+
+#include "log.h"
+#include "v8-counters.h"
+
+namespace v8 {
+namespace internal {
+
+int Heap::MaxObjectSizeInPagedSpace() {
+  return Page::kMaxHeapObjectSize;
+}
+
+
+Object* Heap::AllocateSymbol(Vector<const char> str,
+                             int chars,
+                             uint32_t length_field) {
+  unibrow::Utf8InputBuffer<> buffer(str.start(),
+                                    static_cast<unsigned>(str.length()));
+  return AllocateInternalSymbol(&buffer, chars, length_field);
+}
+
+
+Object* Heap::AllocateRaw(int size_in_bytes,
+                          AllocationSpace space,
+                          AllocationSpace retry_space) {
+  ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+  ASSERT(space != NEW_SPACE ||
+         retry_space == OLD_POINTER_SPACE ||
+         retry_space == OLD_DATA_SPACE);
+#ifdef DEBUG
+  if (FLAG_gc_interval >= 0 &&
+      !disallow_allocation_failure_ &&
+      Heap::allocation_timeout_-- <= 0) {
+    return Failure::RetryAfterGC(size_in_bytes, space);
+  }
+  Counters::objs_since_last_full.Increment();
+  Counters::objs_since_last_young.Increment();
+#endif
+  Object* result;
+  if (NEW_SPACE == space) {
+    result = new_space_.AllocateRaw(size_in_bytes);
+    if (always_allocate() && result->IsFailure()) {
+      space = retry_space;
+    } else {
+      return result;
+    }
+  }
+
+  if (OLD_POINTER_SPACE == space) {
+    result = old_pointer_space_->AllocateRaw(size_in_bytes);
+  } else if (OLD_DATA_SPACE == space) {
+    result = old_data_space_->AllocateRaw(size_in_bytes);
+  } else if (CODE_SPACE == space) {
+    result = code_space_->AllocateRaw(size_in_bytes);
+  } else if (LO_SPACE == space) {
+    result = lo_space_->AllocateRaw(size_in_bytes);
+  } else if (CELL_SPACE == space) {
+    result = cell_space_->AllocateRaw(size_in_bytes);
+  } else {
+    ASSERT(MAP_SPACE == space);
+    result = map_space_->AllocateRaw(size_in_bytes);
+  }
+  if (result->IsFailure()) old_gen_exhausted_ = true;
+  return result;
+}
+
+
+Object* Heap::NumberFromInt32(int32_t value) {
+  if (Smi::IsValid(value)) return Smi::FromInt(value);
+  // Bypass NumberFromDouble to avoid various redundant checks.
+  return AllocateHeapNumber(FastI2D(value));
+}
+
+
+Object* Heap::NumberFromUint32(uint32_t value) {
+  if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
+    return Smi::FromInt((int32_t)value);
+  }
+  // Bypass NumberFromDouble to avoid various redundant checks.
+  return AllocateHeapNumber(FastUI2D(value));
+}
+
+
+Object* Heap::AllocateRawMap() {
+#ifdef DEBUG
+  Counters::objs_since_last_full.Increment();
+  Counters::objs_since_last_young.Increment();
+#endif
+  Object* result = map_space_->AllocateRaw(Map::kSize);
+  if (result->IsFailure()) old_gen_exhausted_ = true;
+  return result;
+}
+
+
+Object* Heap::AllocateRawCell() {
+#ifdef DEBUG
+  Counters::objs_since_last_full.Increment();
+  Counters::objs_since_last_young.Increment();
+#endif
+  Object* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
+  if (result->IsFailure()) old_gen_exhausted_ = true;
+  return result;
+}
+
+
+bool Heap::InNewSpace(Object* object) {
+  return new_space_.Contains(object);
+}
+
+
+bool Heap::InFromSpace(Object* object) {
+  return new_space_.FromSpaceContains(object);
+}
+
+
+bool Heap::InToSpace(Object* object) {
+  return new_space_.ToSpaceContains(object);
+}
+
+
+bool Heap::ShouldBePromoted(Address old_address, int object_size) {
+  // An object should be promoted if:
+  // - the object has survived a scavenge operation or
+  // - to space is already 25% full.
+  return old_address < new_space_.age_mark()
+      || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
+}
+
+
+void Heap::RecordWrite(Address address, int offset) {
+  if (new_space_.Contains(address)) return;
+  ASSERT(!new_space_.FromSpaceContains(address));
+  SLOW_ASSERT(Contains(address + offset));
+  Page::SetRSet(address, offset);
+}
+
+
+OldSpace* Heap::TargetSpace(HeapObject* object) {
+  InstanceType type = object->map()->instance_type();
+  AllocationSpace space = TargetSpaceId(type);
+  return (space == OLD_POINTER_SPACE)
+      ? old_pointer_space_
+      : old_data_space_;
+}
+
+
+AllocationSpace Heap::TargetSpaceId(InstanceType type) {
+  // Heap numbers and sequential strings are promoted to old data space, all
+  // other object types are promoted to old pointer space.  We do not use
+  // object->IsHeapNumber() and object->IsSeqString() because we already
+  // know that object has the heap object tag.
+  ASSERT((type != CODE_TYPE) && (type != MAP_TYPE));
+  bool has_pointers =
+      type != HEAP_NUMBER_TYPE &&
+      (type >= FIRST_NONSTRING_TYPE ||
+       (type & kStringRepresentationMask) != kSeqStringTag);
+  return has_pointers ? OLD_POINTER_SPACE : OLD_DATA_SPACE;
+}
+
+
+void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
+  ASSERT(IsAligned(byte_size, kPointerSize));
+
+  // Use block copying memcpy if the segment we're copying is
+  // enough to justify the extra call/setup overhead.
+  static const int kBlockCopyLimit = 16 * kPointerSize;
+
+  if (byte_size >= kBlockCopyLimit) {
+    memcpy(dst, src, byte_size);
+  } else {
+    int remaining = byte_size / kPointerSize;
+    do {
+      remaining--;
+      *dst++ = *src++;
+    } while (remaining > 0);
+  }
+}
+
+
+void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
+  ASSERT(InFromSpace(object));
+
+  // We use the first word (where the map pointer usually is) of a heap
+  // object to record the forwarding pointer.  A forwarding pointer can
+  // point to an old space, the code space, or the to space of the new
+  // generation.
+  MapWord first_word = object->map_word();
+
+  // If the first word is a forwarding address, the object has already been
+  // copied.
+  if (first_word.IsForwardingAddress()) {
+    *p = first_word.ToForwardingAddress();
+    return;
+  }
+
+  // Call the slow part of scavenge object.
+  return ScavengeObjectSlow(p, object);
+}
+
+
+int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
+  ASSERT(HasBeenSetup());
+  int amount = amount_of_external_allocated_memory_ + change_in_bytes;
+  if (change_in_bytes >= 0) {
+    // Avoid overflow.
+    if (amount > amount_of_external_allocated_memory_) {
+      amount_of_external_allocated_memory_ = amount;
+    }
+    int amount_since_last_global_gc =
+        amount_of_external_allocated_memory_ -
+        amount_of_external_allocated_memory_at_last_global_gc_;
+    if (amount_since_last_global_gc > external_allocation_limit_) {
+      CollectAllGarbage(false);
+    }
+  } else {
+    // Avoid underflow.
+    if (amount >= 0) {
+      amount_of_external_allocated_memory_ = amount;
+    }
+  }
+  ASSERT(amount_of_external_allocated_memory_ >= 0);
+  return amount_of_external_allocated_memory_;
+}
+
+
+void Heap::SetLastScriptId(Object* last_script_id) {
+  roots_[kLastScriptIdRootIndex] = last_script_id;
+}
+
+
+#define GC_GREEDY_CHECK() \
+  ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck())
+
+
+// Calls the FUNCTION_CALL function and retries it up to three times
+// to guarantee that any allocations performed during the call will
+// succeed if there's enough memory.
+
+// Warning: Do not use the identifiers __object__ or __scope__ in a
+// call to this macro.
+
+#define CALL_AND_RETRY(FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)         \
+  do {                                                                    \
+    GC_GREEDY_CHECK();                                                    \
+    Object* __object__ = FUNCTION_CALL;                                   \
+    if (!__object__->IsFailure()) RETURN_VALUE;                           \
+    if (__object__->IsOutOfMemoryFailure()) {                             \
+      v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0");      \
+    }                                                                     \
+    if (!__object__->IsRetryAfterGC()) RETURN_EMPTY;                      \
+    Heap::CollectGarbage(Failure::cast(__object__)->requested(),          \
+                         Failure::cast(__object__)->allocation_space());  \
+    __object__ = FUNCTION_CALL;                                           \
+    if (!__object__->IsFailure()) RETURN_VALUE;                           \
+    if (__object__->IsOutOfMemoryFailure()) {                             \
+      v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1");      \
+    }                                                                     \
+    if (!__object__->IsRetryAfterGC()) RETURN_EMPTY;                      \
+    Counters::gc_last_resort_from_handles.Increment();                    \
+    Heap::CollectAllGarbage(false);                                       \
+    {                                                                     \
+      AlwaysAllocateScope __scope__;                                      \
+      __object__ = FUNCTION_CALL;                                         \
+    }                                                                     \
+    if (!__object__->IsFailure()) RETURN_VALUE;                           \
+    if (__object__->IsOutOfMemoryFailure() ||                             \
+        __object__->IsRetryAfterGC()) {                                   \
+      /* TODO(1181417): Fix this. */                                      \
+      v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2");      \
+    }                                                                     \
+    RETURN_EMPTY;                                                         \
+  } while (false)
+
+
+#define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE)                \
+  CALL_AND_RETRY(FUNCTION_CALL,                                \
+                 return Handle<TYPE>(TYPE::cast(__object__)),  \
+                 return Handle<TYPE>())
+
+
+#define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \
+  CALL_AND_RETRY(FUNCTION_CALL, return, return)
+
+
+#ifdef DEBUG
+
+inline bool Heap::allow_allocation(bool new_state) {
+  bool old = allocation_allowed_;
+  allocation_allowed_ = new_state;
+  return old;
+}
+
+#endif
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_HEAP_INL_H_
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
new file mode 100644
index 0000000..ecb6919
--- /dev/null
+++ b/src/heap-profiler.cc
@@ -0,0 +1,626 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "heap-profiler.h"
+#include "string-stream.h"
+
+namespace v8 {
+namespace internal {
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+namespace {
+
+// Clusterizer is a set of helper functions for converting
+// object references into clusters.
+class Clusterizer : public AllStatic {
+ public:
+  static JSObjectsCluster Clusterize(HeapObject* obj) {
+    return Clusterize(obj, true);
+  }
+  static void InsertIntoTree(JSObjectsClusterTree* tree,
+                             HeapObject* obj, bool fine_grain);
+  static void InsertReferenceIntoTree(JSObjectsClusterTree* tree,
+                                      const JSObjectsCluster& cluster) {
+    InsertIntoTree(tree, cluster, 0);
+  }
+
+ private:
+  static JSObjectsCluster Clusterize(HeapObject* obj, bool fine_grain);
+  static int CalculateNetworkSize(JSObject* obj);
+  static int GetObjectSize(HeapObject* obj) {
+    return obj->IsJSObject() ?
+        CalculateNetworkSize(JSObject::cast(obj)) : obj->Size();
+  }
+  static void InsertIntoTree(JSObjectsClusterTree* tree,
+                             const JSObjectsCluster& cluster, int size);
+};
+
+
+JSObjectsCluster Clusterizer::Clusterize(HeapObject* obj, bool fine_grain) {
+  if (obj->IsJSObject()) {
+    JSObject* js_obj = JSObject::cast(obj);
+    String* constructor = JSObject::cast(js_obj)->constructor_name();
+    // Differentiate Object and Array instances.
+    if (fine_grain && (constructor == Heap::Object_symbol() ||
+                       constructor == Heap::Array_symbol())) {
+      return JSObjectsCluster(constructor, obj);
+    } else {
+      return JSObjectsCluster(constructor);
+    }
+  } else if (obj->IsString()) {
+    return JSObjectsCluster(Heap::String_symbol());
+  }
+  return JSObjectsCluster();
+}
+
+
+void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
+                                 HeapObject* obj, bool fine_grain) {
+  JSObjectsCluster cluster = Clusterize(obj, fine_grain);
+  if (cluster.is_null()) return;
+  InsertIntoTree(tree, cluster, GetObjectSize(obj));
+}
+
+
+void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
+                                 const JSObjectsCluster& cluster, int size) {
+  JSObjectsClusterTree::Locator loc;
+  tree->Insert(cluster, &loc);
+  NumberAndSizeInfo number_and_size = loc.value();
+  number_and_size.increment_number(1);
+  number_and_size.increment_bytes(size);
+  loc.set_value(number_and_size);
+}
+
+
+int Clusterizer::CalculateNetworkSize(JSObject* obj) {
+  int size = obj->Size();
+  // If 'properties' and 'elements' are non-empty (thus, non-shared),
+  // take their size into account.
+  if (FixedArray::cast(obj->properties())->length() != 0) {
+    size += obj->properties()->Size();
+  }
+  if (FixedArray::cast(obj->elements())->length() != 0) {
+    size += obj->elements()->Size();
+  }
+  return size;
+}
+
+
+// A helper class for recording back references.
+class ReferencesExtractor : public ObjectVisitor {
+ public:
+  ReferencesExtractor(const JSObjectsCluster& cluster,
+                      RetainerHeapProfile* profile)
+      : cluster_(cluster),
+        profile_(profile),
+        inside_array_(false) {
+  }
+
+  void VisitPointer(Object** o) {
+    if ((*o)->IsJSObject() || (*o)->IsString()) {
+      profile_->StoreReference(cluster_, HeapObject::cast(*o));
+    } else if ((*o)->IsFixedArray() && !inside_array_) {
+      // Traverse one level deep for data members that are fixed arrays.
+      // This covers the case of 'elements' and 'properties' of JSObject,
+      // and function contexts.
+      inside_array_ = true;
+      FixedArray::cast(*o)->Iterate(this);
+      inside_array_ = false;
+    }
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) VisitPointer(p);
+  }
+
+ private:
+  const JSObjectsCluster& cluster_;
+  RetainerHeapProfile* profile_;
+  bool inside_array_;
+};
+
+
+// A printer interface implementation for the Retainers profile.
+class RetainersPrinter : public RetainerHeapProfile::Printer {
+ public:
+  void PrintRetainers(const JSObjectsCluster& cluster,
+                      const StringStream& retainers) {
+    HeapStringAllocator allocator;
+    StringStream stream(&allocator);
+    cluster.Print(&stream);
+    LOG(HeapSampleJSRetainersEvent(
+        *(stream.ToCString()), *(retainers.ToCString())));
+  }
+};
+
+
+// Visitor for printing a cluster tree.
+class ClusterTreePrinter BASE_EMBEDDED {
+ public:
+  explicit ClusterTreePrinter(StringStream* stream) : stream_(stream) {}
+  void Call(const JSObjectsCluster& cluster,
+            const NumberAndSizeInfo& number_and_size) {
+    Print(stream_, cluster, number_and_size);
+  }
+  static void Print(StringStream* stream,
+                    const JSObjectsCluster& cluster,
+                    const NumberAndSizeInfo& number_and_size);
+
+ private:
+  StringStream* stream_;
+};
+
+
+void ClusterTreePrinter::Print(StringStream* stream,
+                               const JSObjectsCluster& cluster,
+                               const NumberAndSizeInfo& number_and_size) {
+  stream->Put(',');
+  cluster.Print(stream);
+  stream->Add(";%d", number_and_size.number());
+}
+
+
+// Visitor for printing a retainer tree.
+class SimpleRetainerTreePrinter BASE_EMBEDDED {
+ public:
+  explicit SimpleRetainerTreePrinter(RetainerHeapProfile::Printer* printer)
+      : printer_(printer) {}
+  void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+
+ private:
+  RetainerHeapProfile::Printer* printer_;
+};
+
+
+void SimpleRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
+                                     JSObjectsClusterTree* tree) {
+  HeapStringAllocator allocator;
+  StringStream stream(&allocator);
+  ClusterTreePrinter retainers_printer(&stream);
+  tree->ForEach(&retainers_printer);
+  printer_->PrintRetainers(cluster, stream);
+}
+
+
+// Visitor for aggregating references count of equivalent clusters.
+class RetainersAggregator BASE_EMBEDDED {
+ public:
+  RetainersAggregator(ClustersCoarser* coarser, JSObjectsClusterTree* dest_tree)
+      : coarser_(coarser), dest_tree_(dest_tree) {}
+  void Call(const JSObjectsCluster& cluster,
+            const NumberAndSizeInfo& number_and_size);
+
+ private:
+  ClustersCoarser* coarser_;
+  JSObjectsClusterTree* dest_tree_;
+};
+
+
+void RetainersAggregator::Call(const JSObjectsCluster& cluster,
+                               const NumberAndSizeInfo& number_and_size) {
+  JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
+  if (eq.is_null()) eq = cluster;
+  JSObjectsClusterTree::Locator loc;
+  dest_tree_->Insert(eq, &loc);
+  NumberAndSizeInfo aggregated_number = loc.value();
+  aggregated_number.increment_number(number_and_size.number());
+  loc.set_value(aggregated_number);
+}
+
+
+// Visitor for printing retainers tree. Aggregates equivalent retainer clusters.
+class AggregatingRetainerTreePrinter BASE_EMBEDDED {
+ public:
+  AggregatingRetainerTreePrinter(ClustersCoarser* coarser,
+                                 RetainerHeapProfile::Printer* printer)
+      : coarser_(coarser), printer_(printer) {}
+  void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+
+ private:
+  ClustersCoarser* coarser_;
+  RetainerHeapProfile::Printer* printer_;
+};
+
+
+void AggregatingRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
+                                          JSObjectsClusterTree* tree) {
+  if (!coarser_->GetCoarseEquivalent(cluster).is_null()) return;
+  JSObjectsClusterTree dest_tree_;
+  RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
+  tree->ForEach(&retainers_aggregator);
+  HeapStringAllocator allocator;
+  StringStream stream(&allocator);
+  ClusterTreePrinter retainers_printer(&stream);
+  dest_tree_.ForEach(&retainers_printer);
+  printer_->PrintRetainers(cluster, stream);
+}
+
+
+// A helper class for building a retainers tree, that aggregates
+// all equivalent clusters.
+class RetainerTreeAggregator BASE_EMBEDDED {
+ public:
+  explicit RetainerTreeAggregator(ClustersCoarser* coarser)
+      : coarser_(coarser) {}
+  void Process(JSObjectsRetainerTree* input_tree) {
+    input_tree->ForEach(this);
+  }
+  void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+  JSObjectsRetainerTree& output_tree() { return output_tree_; }
+
+ private:
+  ClustersCoarser* coarser_;
+  JSObjectsRetainerTree output_tree_;
+};
+
+
+void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
+                                  JSObjectsClusterTree* tree) {
+  JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
+  if (eq.is_null()) return;
+  JSObjectsRetainerTree::Locator loc;
+  if (output_tree_.Insert(eq, &loc)) {
+    loc.set_value(new JSObjectsClusterTree());
+  }
+  RetainersAggregator retainers_aggregator(coarser_, loc.value());
+  tree->ForEach(&retainers_aggregator);
+}
+
+}  // namespace
+
+
+const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
+const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;
+
+
+ConstructorHeapProfile::ConstructorHeapProfile()
+    : zscope_(DELETE_ON_EXIT) {
+}
+
+
+void ConstructorHeapProfile::Call(const JSObjectsCluster& cluster,
+                                  const NumberAndSizeInfo& number_and_size) {
+  HeapStringAllocator allocator;
+  StringStream stream(&allocator);
+  cluster.Print(&stream);
+  LOG(HeapSampleJSConstructorEvent(*(stream.ToCString()),
+                                   number_and_size.number(),
+                                   number_and_size.bytes()));
+}
+
+
+void ConstructorHeapProfile::CollectStats(HeapObject* obj) {
+  Clusterizer::InsertIntoTree(&js_objects_info_tree_, obj, false);
+}
+
+
+void ConstructorHeapProfile::PrintStats() {
+  js_objects_info_tree_.ForEach(this);
+}
+
+
+void JSObjectsCluster::Print(StringStream* accumulator) const {
+  ASSERT(!is_null());
+  if (constructor_ == FromSpecialCase(ROOTS)) {
+    accumulator->Add("(roots)");
+  } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
+    accumulator->Add("(global property)");
+  } else if (constructor_ == FromSpecialCase(SELF)) {
+    accumulator->Add("(self)");
+  } else {
+    SmartPointer<char> s_name(
+        constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
+    accumulator->Add("%s", (*s_name)[0] != '\0' ? *s_name : "(anonymous)");
+    if (instance_ != NULL) {
+      accumulator->Add(":%p", static_cast<void*>(instance_));
+    }
+  }
+}
+
+
+void JSObjectsCluster::DebugPrint(StringStream* accumulator) const {
+  if (!is_null()) {
+    Print(accumulator);
+  } else {
+    accumulator->Add("(null cluster)");
+  }
+}
+
+
+inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
+    const JSObjectsCluster& cluster_)
+    : cluster(cluster_), refs(kInitialBackrefsListCapacity) {
+}
+
+
+inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
+    const ClustersCoarser::ClusterBackRefs& src)
+    : cluster(src.cluster), refs(src.refs.capacity()) {
+  refs.AddAll(src.refs);
+}
+
+
+inline ClustersCoarser::ClusterBackRefs&
+    ClustersCoarser::ClusterBackRefs::operator=(
+    const ClustersCoarser::ClusterBackRefs& src) {
+  if (this == &src) return *this;
+  cluster = src.cluster;
+  refs.Clear();
+  refs.AddAll(src.refs);
+  return *this;
+}
+
+
+inline int ClustersCoarser::ClusterBackRefs::Compare(
+    const ClustersCoarser::ClusterBackRefs& a,
+    const ClustersCoarser::ClusterBackRefs& b) {
+  int cmp = JSObjectsCluster::CompareConstructors(a.cluster, b.cluster);
+  if (cmp != 0) return cmp;
+  if (a.refs.length() < b.refs.length()) return -1;
+  if (a.refs.length() > b.refs.length()) return 1;
+  for (int i = 0; i < a.refs.length(); ++i) {
+    int cmp = JSObjectsCluster::Compare(a.refs[i], b.refs[i]);
+    if (cmp != 0) return cmp;
+  }
+  return 0;
+}
+
+
+ClustersCoarser::ClustersCoarser()
+    : zscope_(DELETE_ON_EXIT),
+      sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
+      current_pair_(NULL),
+      current_set_(NULL),
+      self_(NULL) {
+}
+
+
+void ClustersCoarser::Call(const JSObjectsCluster& cluster,
+                           JSObjectsClusterTree* tree) {
+  if (!cluster.can_be_coarsed()) return;
+  ClusterBackRefs pair(cluster);
+  ASSERT(current_pair_ == NULL);
+  current_pair_ = &pair;
+  current_set_ = new JSObjectsRetainerTree();
+  self_ = &cluster;
+  tree->ForEach(this);
+  sim_list_.Add(pair);
+  current_pair_ = NULL;
+  current_set_ = NULL;
+  self_ = NULL;
+}
+
+
+void ClustersCoarser::Call(const JSObjectsCluster& cluster,
+                           const NumberAndSizeInfo& number_and_size) {
+  ASSERT(current_pair_ != NULL);
+  ASSERT(current_set_ != NULL);
+  ASSERT(self_ != NULL);
+  JSObjectsRetainerTree::Locator loc;
+  if (JSObjectsCluster::Compare(*self_, cluster) == 0) {
+    current_pair_->refs.Add(JSObjectsCluster(JSObjectsCluster::SELF));
+    return;
+  }
+  JSObjectsCluster eq = GetCoarseEquivalent(cluster);
+  if (!eq.is_null()) {
+    if (current_set_->Find(eq, &loc)) return;
+    current_pair_->refs.Add(eq);
+    current_set_->Insert(eq, &loc);
+  } else {
+    current_pair_->refs.Add(cluster);
+  }
+}
+
+
+void ClustersCoarser::Process(JSObjectsRetainerTree* tree) {
+  int last_eq_clusters = -1;
+  for (int i = 0; i < kMaxPassesCount; ++i) {
+    sim_list_.Clear();
+    const int curr_eq_clusters = DoProcess(tree);
+    // If no new cluster equivalents discovered, abort processing.
+    if (last_eq_clusters == curr_eq_clusters) break;
+    last_eq_clusters = curr_eq_clusters;
+  }
+}
+
+
+int ClustersCoarser::DoProcess(JSObjectsRetainerTree* tree) {
+  tree->ForEach(this);
+  sim_list_.Iterate(ClusterBackRefs::SortRefsIterator);
+  sim_list_.Sort(ClusterBackRefsCmp);
+  return FillEqualityTree();
+}
+
+
+JSObjectsCluster ClustersCoarser::GetCoarseEquivalent(
+    const JSObjectsCluster& cluster) {
+  if (!cluster.can_be_coarsed()) return JSObjectsCluster();
+  EqualityTree::Locator loc;
+  return eq_tree_.Find(cluster, &loc) ? loc.value() : JSObjectsCluster();
+}
+
+
+bool ClustersCoarser::HasAnEquivalent(const JSObjectsCluster& cluster) {
+  // Return true for coarsible clusters that have a non-identical equivalent.
+  if (!cluster.can_be_coarsed()) return false;
+  JSObjectsCluster eq = GetCoarseEquivalent(cluster);
+  return !eq.is_null() && JSObjectsCluster::Compare(cluster, eq) != 0;
+}
+
+
+int ClustersCoarser::FillEqualityTree() {
+  int eq_clusters_count = 0;
+  int eq_to = 0;
+  bool first_added = false;
+  for (int i = 1; i < sim_list_.length(); ++i) {
+    if (ClusterBackRefs::Compare(sim_list_[i], sim_list_[eq_to]) == 0) {
+      EqualityTree::Locator loc;
+      if (!first_added) {
+        // Add self-equivalence, if we have more than one item in this
+        // equivalence class.
+        eq_tree_.Insert(sim_list_[eq_to].cluster, &loc);
+        loc.set_value(sim_list_[eq_to].cluster);
+        first_added = true;
+      }
+      eq_tree_.Insert(sim_list_[i].cluster, &loc);
+      loc.set_value(sim_list_[eq_to].cluster);
+      ++eq_clusters_count;
+    } else {
+      eq_to = i;
+      first_added = false;
+    }
+  }
+  return eq_clusters_count;
+}
+
+
+const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoKey;
+const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoValue;
+const JSObjectsRetainerTreeConfig::Key JSObjectsRetainerTreeConfig::kNoKey;
+const JSObjectsRetainerTreeConfig::Value JSObjectsRetainerTreeConfig::kNoValue =
+    NULL;
+
+
+RetainerHeapProfile::RetainerHeapProfile()
+    : zscope_(DELETE_ON_EXIT) {
+  JSObjectsCluster roots(JSObjectsCluster::ROOTS);
+  ReferencesExtractor extractor(roots, this);
+  Heap::IterateRoots(&extractor);
+}
+
+
+void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
+                                         HeapObject* ref) {
+  JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
+  JSObjectsRetainerTree::Locator ref_loc;
+  if (retainers_tree_.Insert(ref_cluster, &ref_loc)) {
+    ref_loc.set_value(new JSObjectsClusterTree());
+  }
+  JSObjectsClusterTree* referenced_by = ref_loc.value();
+  Clusterizer::InsertReferenceIntoTree(referenced_by, cluster);
+}
+
+
+void RetainerHeapProfile::CollectStats(HeapObject* obj) {
+  if (obj->IsJSObject()) {
+    const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
+    ReferencesExtractor extractor(cluster, this);
+    obj->Iterate(&extractor);
+  } else if (obj->IsJSGlobalPropertyCell()) {
+    JSObjectsCluster global_prop(JSObjectsCluster::GLOBAL_PROPERTY);
+    ReferencesExtractor extractor(global_prop, this);
+    obj->Iterate(&extractor);
+  }
+}
+
+
+void RetainerHeapProfile::DebugPrintStats(
+    RetainerHeapProfile::Printer* printer) {
+  coarser_.Process(&retainers_tree_);
+  // Print clusters that have no equivalents, aggregating their retainers.
+  AggregatingRetainerTreePrinter agg_printer(&coarser_, printer);
+  retainers_tree_.ForEach(&agg_printer);
+  // Now aggregate clusters that have equivalents...
+  RetainerTreeAggregator aggregator(&coarser_);
+  aggregator.Process(&retainers_tree_);
+  // ...and print them.
+  SimpleRetainerTreePrinter s_printer(printer);
+  aggregator.output_tree().ForEach(&s_printer);
+}
+
+
+void RetainerHeapProfile::PrintStats() {
+  RetainersPrinter printer;
+  DebugPrintStats(&printer);
+}
+
+
+//
+// HeapProfiler class implementation.
+//
+void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
+  InstanceType type = obj->map()->instance_type();
+  ASSERT(0 <= type && type <= LAST_TYPE);
+  info[type].increment_number(1);
+  info[type].increment_bytes(obj->Size());
+}
+
+
+void HeapProfiler::WriteSample() {
+  LOG(HeapSampleBeginEvent("Heap", "allocated"));
+  LOG(HeapSampleStats(
+      "Heap", "allocated", Heap::Capacity(), Heap::SizeOfObjects()));
+
+  HistogramInfo info[LAST_TYPE+1];
+#define DEF_TYPE_NAME(name) info[name].set_name(#name);
+  INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
+#undef DEF_TYPE_NAME
+
+  ConstructorHeapProfile js_cons_profile;
+  RetainerHeapProfile js_retainer_profile;
+  HeapIterator iterator;
+  while (iterator.has_next()) {
+    HeapObject* obj = iterator.next();
+    CollectStats(obj, info);
+    js_cons_profile.CollectStats(obj);
+    js_retainer_profile.CollectStats(obj);
+  }
+
+  // Lump all the string types together.
+  int string_number = 0;
+  int string_bytes = 0;
+#define INCREMENT_SIZE(type, size, name, camel_name)   \
+    string_number += info[type].number();              \
+    string_bytes += info[type].bytes();
+  STRING_TYPE_LIST(INCREMENT_SIZE)
+#undef INCREMENT_SIZE
+  if (string_bytes > 0) {
+    LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
+  }
+
+  for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
+    if (info[i].bytes() > 0) {
+      LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
+                              info[i].bytes()));
+    }
+  }
+
+  js_cons_profile.PrintStats();
+  js_retainer_profile.PrintStats();
+
+  LOG(HeapSampleEndEvent("Heap", "allocated"));
+}
+
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+
+} }  // namespace v8::internal
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
new file mode 100644
index 0000000..7fda883
--- /dev/null
+++ b/src/heap-profiler.h
@@ -0,0 +1,263 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HEAP_PROFILER_H_
+#define V8_HEAP_PROFILER_H_
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+// The HeapProfiler writes data to the log files, which can be postprocessed
+// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
+class HeapProfiler {
+ public:
+  // Write a single heap sample to the log file.
+  static void WriteSample();
+
+ private:
+  // Update the array info with stats from obj.
+  static void CollectStats(HeapObject* obj, HistogramInfo* info);
+};
+
+
+// JSObjectsCluster describes a group of JS objects that are
+// considered equivalent in terms of a particular profile.
+class JSObjectsCluster BASE_EMBEDDED {
+ public:
+  // These special cases are used in retainer profile.
+  enum SpecialCase {
+    ROOTS = 1,
+    GLOBAL_PROPERTY = 2,
+    SELF = 3  // This case is used in ClustersCoarser only.
+  };
+
+  JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
+  explicit JSObjectsCluster(String* constructor)
+      : constructor_(constructor), instance_(NULL) {}
+  explicit JSObjectsCluster(SpecialCase special)
+      : constructor_(FromSpecialCase(special)), instance_(NULL) {}
+  JSObjectsCluster(String* constructor, Object* instance)
+      : constructor_(constructor), instance_(instance) {}
+
+  static int CompareConstructors(const JSObjectsCluster& a,
+                                 const JSObjectsCluster& b) {
+    // Strings are unique, so it is sufficient to compare their pointers.
+    return a.constructor_ == b.constructor_ ? 0
+        : (a.constructor_ < b.constructor_ ? -1 : 1);
+  }
+  static int Compare(const JSObjectsCluster& a, const JSObjectsCluster& b) {
+    // Strings are unique, so it is sufficient to compare their pointers.
+    const int cons_cmp = CompareConstructors(a, b);
+    return cons_cmp == 0 ?
+        (a.instance_ == b.instance_ ? 0 : (a.instance_ < b.instance_ ? -1 : 1))
+        : cons_cmp;
+  }
+  static int Compare(const JSObjectsCluster* a, const JSObjectsCluster* b) {
+    return Compare(*a, *b);
+  }
+
+  bool is_null() const { return constructor_ == NULL; }
+  bool can_be_coarsed() const { return instance_ != NULL; }
+  String* constructor() const { return constructor_; }
+
+  void Print(StringStream* accumulator) const;
+  // Allows null clusters to be printed.
+  void DebugPrint(StringStream* accumulator) const;
+
+ private:
+  static String* FromSpecialCase(SpecialCase special) {
+    // We use symbols that are illegal JS identifiers to identify special cases.
+    // Their actual value is irrelevant for us.
+    switch (special) {
+      case ROOTS: return Heap::result_symbol();
+      case GLOBAL_PROPERTY: return Heap::code_symbol();
+      case SELF: return Heap::catch_var_symbol();
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+
+  String* constructor_;
+  Object* instance_;
+};
+
+
+struct JSObjectsClusterTreeConfig {
+  typedef JSObjectsCluster Key;
+  typedef NumberAndSizeInfo Value;
+  static const Key kNoKey;
+  static const Value kNoValue;
+  static int Compare(const Key& a, const Key& b) {
+    return Key::Compare(a, b);
+  }
+};
+typedef ZoneSplayTree<JSObjectsClusterTreeConfig> JSObjectsClusterTree;
+
+
+// ConstructorHeapProfile is responsible for gathering and logging
+// "constructor profile" of JS objects allocated on heap.
+// It is run during garbage collection cycle, thus it doesn't need
+// to use handles.
+class ConstructorHeapProfile BASE_EMBEDDED {
+ public:
+  ConstructorHeapProfile();
+  virtual ~ConstructorHeapProfile() {}
+  void CollectStats(HeapObject* obj);
+  void PrintStats();
+  // Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests.
+  virtual void Call(const JSObjectsCluster& cluster,
+                    const NumberAndSizeInfo& number_and_size);
+
+ private:
+  ZoneScope zscope_;
+  JSObjectsClusterTree js_objects_info_tree_;
+};
+
+
+// JSObjectsRetainerTree is used to represent retainer graphs using
+// adjacency list form:
+//
+//   Cluster -> (Cluster -> NumberAndSizeInfo)
+//
+// Subordinate splay trees are stored by pointer. They are zone-allocated,
+// so it isn't needed to manage their lifetime.
+//
+struct JSObjectsRetainerTreeConfig {
+  typedef JSObjectsCluster Key;
+  typedef JSObjectsClusterTree* Value;
+  static const Key kNoKey;
+  static const Value kNoValue;
+  static int Compare(const Key& a, const Key& b) {
+    return Key::Compare(a, b);
+  }
+};
+typedef ZoneSplayTree<JSObjectsRetainerTreeConfig> JSObjectsRetainerTree;
+
+
+class ClustersCoarser BASE_EMBEDDED {
+ public:
+  ClustersCoarser();
+
+  // Processes a given retainer graph.
+  void Process(JSObjectsRetainerTree* tree);
+
+  // Returns an equivalent cluster (can be the cluster itself).
+  // If the given cluster doesn't have an equivalent, returns null cluster.
+  JSObjectsCluster GetCoarseEquivalent(const JSObjectsCluster& cluster);
+  // Returns whether a cluster can be substitued with an equivalent and thus,
+  // skipped in some cases.
+  bool HasAnEquivalent(const JSObjectsCluster& cluster);
+
+  // Used by JSObjectsRetainerTree::ForEach.
+  void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+  void Call(const JSObjectsCluster& cluster,
+            const NumberAndSizeInfo& number_and_size);
+
+ private:
+  // Stores a list of back references for a cluster.
+  struct ClusterBackRefs {
+    explicit ClusterBackRefs(const JSObjectsCluster& cluster_);
+    ClusterBackRefs(const ClusterBackRefs& src);
+    ClusterBackRefs& operator=(const ClusterBackRefs& src);
+
+    static int Compare(const ClusterBackRefs& a, const ClusterBackRefs& b);
+    void SortRefs() { refs.Sort(JSObjectsCluster::Compare); }
+    static void SortRefsIterator(ClusterBackRefs* ref) { ref->SortRefs(); }
+
+    JSObjectsCluster cluster;
+    ZoneList<JSObjectsCluster> refs;
+  };
+  typedef ZoneList<ClusterBackRefs> SimilarityList;
+
+  // A tree for storing a list of equivalents for a cluster.
+  struct ClusterEqualityConfig {
+    typedef JSObjectsCluster Key;
+    typedef JSObjectsCluster Value;
+    static const Key kNoKey;
+    static const Value kNoValue;
+    static int Compare(const Key& a, const Key& b) {
+      return Key::Compare(a, b);
+    }
+  };
+  typedef ZoneSplayTree<ClusterEqualityConfig> EqualityTree;
+
+  static int ClusterBackRefsCmp(const ClusterBackRefs* a,
+                                const ClusterBackRefs* b) {
+    return ClusterBackRefs::Compare(*a, *b);
+  }
+  int DoProcess(JSObjectsRetainerTree* tree);
+  int FillEqualityTree();
+
+  static const int kInitialBackrefsListCapacity = 2;
+  static const int kInitialSimilarityListCapacity = 2000;
+  // Number of passes for finding equivalents. Limits the length of paths
+  // that can be considered equivalent.
+  static const int kMaxPassesCount = 10;
+
+  ZoneScope zscope_;
+  SimilarityList sim_list_;
+  EqualityTree eq_tree_;
+  ClusterBackRefs* current_pair_;
+  JSObjectsRetainerTree* current_set_;
+  const JSObjectsCluster* self_;
+};
+
+
+// RetainerHeapProfile is responsible for gathering and logging
+// "retainer profile" of JS objects allocated on heap.
+// It is run during garbage collection cycle, thus it doesn't need
+// to use handles.
+class RetainerHeapProfile BASE_EMBEDDED {
+ public:
+  class Printer {
+   public:
+    virtual ~Printer() {}
+    virtual void PrintRetainers(const JSObjectsCluster& cluster,
+                                const StringStream& retainers) = 0;
+  };
+
+  RetainerHeapProfile();
+  void CollectStats(HeapObject* obj);
+  void PrintStats();
+  void DebugPrintStats(Printer* printer);
+  void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
+
+ private:
+  ZoneScope zscope_;
+  JSObjectsRetainerTree retainers_tree_;
+  ClustersCoarser coarser_;
+};
+
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+} }  // namespace v8::internal
+
+#endif  // V8_HEAP_PROFILER_H_
diff --git a/src/heap.cc b/src/heap.cc
new file mode 100644
index 0000000..dcc25a3
--- /dev/null
+++ b/src/heap.cc
@@ -0,0 +1,3896 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "compilation-cache.h"
+#include "debug.h"
+#include "heap-profiler.h"
+#include "global-handles.h"
+#include "mark-compact.h"
+#include "natives.h"
+#include "scanner.h"
+#include "scopeinfo.h"
+#include "v8threads.h"
+#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+#include "regexp-macro-assembler.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+String* Heap::hidden_symbol_;
+Object* Heap::roots_[Heap::kRootListLength];
+
+
+NewSpace Heap::new_space_;
+OldSpace* Heap::old_pointer_space_ = NULL;
+OldSpace* Heap::old_data_space_ = NULL;
+OldSpace* Heap::code_space_ = NULL;
+MapSpace* Heap::map_space_ = NULL;
+CellSpace* Heap::cell_space_ = NULL;
+LargeObjectSpace* Heap::lo_space_ = NULL;
+
+static const int kMinimumPromotionLimit = 2*MB;
+static const int kMinimumAllocationLimit = 8*MB;
+
+int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
+int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
+
+int Heap::old_gen_exhausted_ = false;
+
+int Heap::amount_of_external_allocated_memory_ = 0;
+int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
+
+// semispace_size_ should be a power of 2 and old_generation_size_ should be
+// a multiple of Page::kPageSize.
+#if defined(ANDROID)
+int Heap::semispace_size_  = 512*KB;
+int Heap::old_generation_size_ = 128*MB;
+int Heap::initial_semispace_size_ = 128*KB;
+size_t Heap::code_range_size_ = 0;
+#elif defined(V8_TARGET_ARCH_X64)
+int Heap::semispace_size_  = 16*MB;
+int Heap::old_generation_size_ = 1*GB;
+int Heap::initial_semispace_size_ = 1*MB;
+size_t Heap::code_range_size_ = 256*MB;
+#else
+int Heap::semispace_size_  = 8*MB;
+int Heap::old_generation_size_ = 512*MB;
+int Heap::initial_semispace_size_ = 512*KB;
+size_t Heap::code_range_size_ = 0;
+#endif
+
+GCCallback Heap::global_gc_prologue_callback_ = NULL;
+GCCallback Heap::global_gc_epilogue_callback_ = NULL;
+
+// Variables set based on semispace_size_ and old_generation_size_ in
+// ConfigureHeap.
+int Heap::young_generation_size_ = 0;  // Will be 2 * semispace_size_.
+int Heap::survived_since_last_expansion_ = 0;
+int Heap::external_allocation_limit_ = 0;
+
+Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
+
+int Heap::mc_count_ = 0;
+int Heap::gc_count_ = 0;
+
+int Heap::always_allocate_scope_depth_ = 0;
+bool Heap::context_disposed_pending_ = false;
+
+#ifdef DEBUG
+bool Heap::allocation_allowed_ = true;
+
+int Heap::allocation_timeout_ = 0;
+bool Heap::disallow_allocation_failure_ = false;
+#endif  // DEBUG
+
+
+int Heap::Capacity() {
+  if (!HasBeenSetup()) return 0;
+
+  return new_space_.Capacity() +
+      old_pointer_space_->Capacity() +
+      old_data_space_->Capacity() +
+      code_space_->Capacity() +
+      map_space_->Capacity() +
+      cell_space_->Capacity();
+}
+
+
+int Heap::Available() {
+  if (!HasBeenSetup()) return 0;
+
+  return new_space_.Available() +
+      old_pointer_space_->Available() +
+      old_data_space_->Available() +
+      code_space_->Available() +
+      map_space_->Available() +
+      cell_space_->Available();
+}
+
+
+bool Heap::HasBeenSetup() {
+  return old_pointer_space_ != NULL &&
+         old_data_space_ != NULL &&
+         code_space_ != NULL &&
+         map_space_ != NULL &&
+         cell_space_ != NULL &&
+         lo_space_ != NULL;
+}
+
+
+GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
+  // Is global GC requested?
+  if (space != NEW_SPACE || FLAG_gc_global) {
+    Counters::gc_compactor_caused_by_request.Increment();
+    return MARK_COMPACTOR;
+  }
+
+  // Is enough data promoted to justify a global GC?
+  if (OldGenerationPromotionLimitReached()) {
+    Counters::gc_compactor_caused_by_promoted_data.Increment();
+    return MARK_COMPACTOR;
+  }
+
+  // Have allocation in OLD and LO failed?
+  if (old_gen_exhausted_) {
+    Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
+    return MARK_COMPACTOR;
+  }
+
+  // Is there enough space left in OLD to guarantee that a scavenge can
+  // succeed?
+  //
+  // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
+  // for object promotion. It counts only the bytes that the memory
+  // allocator has not yet allocated from the OS and assigned to any space,
+  // and does not count available bytes already in the old space or code
+  // space.  Undercounting is safe---we may get an unrequested full GC when
+  // a scavenge would have succeeded.
+  if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
+    Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
+    return MARK_COMPACTOR;
+  }
+
+  // Default
+  return SCAVENGER;
+}
+
+
+// TODO(1238405): Combine the infrastructure for --heap-stats and
+// --log-gc to avoid the complicated preprocessor and flag testing.
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+void Heap::ReportStatisticsBeforeGC() {
+  // Heap::ReportHeapStatistics will also log NewSpace statistics when
+  // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set.  The
+  // following logic is used to avoid double logging.
+#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
+  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
+  if (FLAG_heap_stats) {
+    ReportHeapStatistics("Before GC");
+  } else if (FLAG_log_gc) {
+    new_space_.ReportStatistics();
+  }
+  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
+#elif defined(DEBUG)
+  if (FLAG_heap_stats) {
+    new_space_.CollectStatistics();
+    ReportHeapStatistics("Before GC");
+    new_space_.ClearHistograms();
+  }
+#elif defined(ENABLE_LOGGING_AND_PROFILING)
+  if (FLAG_log_gc) {
+    new_space_.CollectStatistics();
+    new_space_.ReportStatistics();
+    new_space_.ClearHistograms();
+  }
+#endif
+}
+
+
+#if defined(ENABLE_LOGGING_AND_PROFILING)
+void Heap::PrintShortHeapStatistics() {
+  if (!FLAG_trace_gc_verbose) return;
+  PrintF("Memory allocator,   used: %8d, available: %8d\n",
+         MemoryAllocator::Size(), MemoryAllocator::Available());
+  PrintF("New space,          used: %8d, available: %8d\n",
+         Heap::new_space_.Size(), new_space_.Available());
+  PrintF("Old pointers,       used: %8d, available: %8d\n",
+         old_pointer_space_->Size(), old_pointer_space_->Available());
+  PrintF("Old data space,     used: %8d, available: %8d\n",
+         old_data_space_->Size(), old_data_space_->Available());
+  PrintF("Code space,         used: %8d, available: %8d\n",
+         code_space_->Size(), code_space_->Available());
+  PrintF("Map space,          used: %8d, available: %8d\n",
+         map_space_->Size(), map_space_->Available());
+  PrintF("Large object space, used: %8d, avaialble: %8d\n",
+         lo_space_->Size(), lo_space_->Available());
+}
+#endif
+
+
+// TODO(1238405): Combine the infrastructure for --heap-stats and
+// --log-gc to avoid the complicated preprocessor and flag testing.
+void Heap::ReportStatisticsAfterGC() {
+  // Similar to the before GC, we use some complicated logic to ensure that
+  // NewSpace statistics are logged exactly once when --log-gc is turned on.
+#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
+  if (FLAG_heap_stats) {
+    new_space_.CollectStatistics();
+    ReportHeapStatistics("After GC");
+  } else if (FLAG_log_gc) {
+    new_space_.ReportStatistics();
+  }
+#elif defined(DEBUG)
+  if (FLAG_heap_stats) ReportHeapStatistics("After GC");
+#elif defined(ENABLE_LOGGING_AND_PROFILING)
+  if (FLAG_log_gc) new_space_.ReportStatistics();
+#endif
+}
+#endif  // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+
+
+void Heap::GarbageCollectionPrologue() {
+  TranscendentalCache::Clear();
+  gc_count_++;
+#ifdef DEBUG
+  ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+  allow_allocation(false);
+
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+
+  if (FLAG_gc_verbose) Print();
+
+  if (FLAG_print_rset) {
+    // Not all spaces have remembered set bits that we care about.
+    old_pointer_space_->PrintRSet();
+    map_space_->PrintRSet();
+    lo_space_->PrintRSet();
+  }
+#endif
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+  ReportStatisticsBeforeGC();
+#endif
+}
+
+int Heap::SizeOfObjects() {
+  int total = 0;
+  AllSpaces spaces;
+  while (Space* space = spaces.next()) {
+    total += space->Size();
+  }
+  return total;
+}
+
+void Heap::GarbageCollectionEpilogue() {
+#ifdef DEBUG
+  allow_allocation(true);
+  ZapFromSpace();
+
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+
+  if (FLAG_print_global_handles) GlobalHandles::Print();
+  if (FLAG_print_handles) PrintHandles();
+  if (FLAG_gc_verbose) Print();
+  if (FLAG_code_stats) ReportCodeStatistics("After GC");
+#endif
+
+  Counters::alive_after_last_gc.Set(SizeOfObjects());
+
+  Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
+  Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+  ReportStatisticsAfterGC();
+#endif
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  Debug::AfterGarbageCollection();
+#endif
+}
+
+
+void Heap::CollectAllGarbage(bool force_compaction) {
+  // Since we are ignoring the return value, the exact choice of space does
+  // not matter, so long as we do not specify NEW_SPACE, which would not
+  // cause a full GC.
+  MarkCompactCollector::SetForceCompaction(force_compaction);
+  CollectGarbage(0, OLD_POINTER_SPACE);
+  MarkCompactCollector::SetForceCompaction(false);
+}
+
+
+void Heap::CollectAllGarbageIfContextDisposed() {
+  // If the garbage collector interface is exposed through the global
+  // gc() function, we avoid being clever about forcing GCs when
+  // contexts are disposed and leave it to the embedder to make
+  // informed decisions about when to force a collection.
+  if (!FLAG_expose_gc && context_disposed_pending_) {
+    HistogramTimerScope scope(&Counters::gc_context);
+    CollectAllGarbage(false);
+  }
+  context_disposed_pending_ = false;
+}
+
+
+void Heap::NotifyContextDisposed() {
+  context_disposed_pending_ = true;
+}
+
+
+bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
+  // The VM is in the GC state until exiting this function.
+  VMState state(GC);
+
+#ifdef DEBUG
+  // Reset the allocation timeout to the GC interval, but make sure to
+  // allow at least a few allocations after a collection. The reason
+  // for this is that we have a lot of allocation sequences and we
+  // assume that a garbage collection will allow the subsequent
+  // allocation attempts to go through.
+  allocation_timeout_ = Max(6, FLAG_gc_interval);
+#endif
+
+  { GCTracer tracer;
+    GarbageCollectionPrologue();
+    // The GC count was incremented in the prologue.  Tell the tracer about
+    // it.
+    tracer.set_gc_count(gc_count_);
+
+    GarbageCollector collector = SelectGarbageCollector(space);
+    // Tell the tracer which collector we've selected.
+    tracer.set_collector(collector);
+
+    HistogramTimer* rate = (collector == SCAVENGER)
+        ? &Counters::gc_scavenger
+        : &Counters::gc_compactor;
+    rate->Start();
+    PerformGarbageCollection(space, collector, &tracer);
+    rate->Stop();
+
+    GarbageCollectionEpilogue();
+  }
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (FLAG_log_gc) HeapProfiler::WriteSample();
+#endif
+
+  switch (space) {
+    case NEW_SPACE:
+      return new_space_.Available() >= requested_size;
+    case OLD_POINTER_SPACE:
+      return old_pointer_space_->Available() >= requested_size;
+    case OLD_DATA_SPACE:
+      return old_data_space_->Available() >= requested_size;
+    case CODE_SPACE:
+      return code_space_->Available() >= requested_size;
+    case MAP_SPACE:
+      return map_space_->Available() >= requested_size;
+    case CELL_SPACE:
+      return cell_space_->Available() >= requested_size;
+    case LO_SPACE:
+      return lo_space_->Available() >= requested_size;
+  }
+  return false;
+}
+
+
+void Heap::PerformScavenge() {
+  GCTracer tracer;
+  PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
+}
+
+
+#ifdef DEBUG
+// Helper class for verifying the symbol table.
+class SymbolTableVerifier : public ObjectVisitor {
+ public:
+  SymbolTableVerifier() { }
+  void VisitPointers(Object** start, Object** end) {
+    // Visit all HeapObject pointers in [start, end).
+    for (Object** p = start; p < end; p++) {
+      if ((*p)->IsHeapObject()) {
+        // Check that the symbol is actually a symbol.
+        ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
+      }
+    }
+  }
+};
+#endif  // DEBUG
+
+
+static void VerifySymbolTable() {
+#ifdef DEBUG
+  SymbolTableVerifier verifier;
+  Heap::symbol_table()->IterateElements(&verifier);
+#endif  // DEBUG
+}
+
+
+void Heap::EnsureFromSpaceIsCommitted() {
+  if (new_space_.CommitFromSpaceIfNeeded()) return;
+
+  // Committing memory to from space failed.
+  // Try shrinking and try again.
+  Shrink();
+  if (new_space_.CommitFromSpaceIfNeeded()) return;
+
+  // Committing memory to from space failed again.
+  // Memory is exhausted and we will die.
+  V8::FatalProcessOutOfMemory("Committing semi space failed.");
+}
+
+
+void Heap::PerformGarbageCollection(AllocationSpace space,
+                                    GarbageCollector collector,
+                                    GCTracer* tracer) {
+  VerifySymbolTable();
+  if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
+    ASSERT(!allocation_allowed_);
+    global_gc_prologue_callback_();
+  }
+  EnsureFromSpaceIsCommitted();
+  if (collector == MARK_COMPACTOR) {
+    MarkCompact(tracer);
+
+    int old_gen_size = PromotedSpaceSize();
+    old_gen_promotion_limit_ =
+        old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
+    old_gen_allocation_limit_ =
+        old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+    old_gen_exhausted_ = false;
+  }
+  Scavenge();
+
+  Counters::objs_since_last_young.Set(0);
+
+  PostGarbageCollectionProcessing();
+
+  if (collector == MARK_COMPACTOR) {
+    // Register the amount of external allocated memory.
+    amount_of_external_allocated_memory_at_last_global_gc_ =
+        amount_of_external_allocated_memory_;
+  }
+
+  if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
+    ASSERT(!allocation_allowed_);
+    global_gc_epilogue_callback_();
+  }
+  VerifySymbolTable();
+}
+
+
+void Heap::PostGarbageCollectionProcessing() {
+  // Process weak handles post gc.
+  {
+    DisableAssertNoAllocation allow_allocation;
+    GlobalHandles::PostGarbageCollectionProcessing();
+  }
+  // Update relocatables.
+  Relocatable::PostGarbageCollectionProcessing();
+}
+
+
+void Heap::MarkCompact(GCTracer* tracer) {
+  gc_state_ = MARK_COMPACT;
+  mc_count_++;
+  tracer->set_full_gc_count(mc_count_);
+  LOG(ResourceEvent("markcompact", "begin"));
+
+  MarkCompactCollector::Prepare(tracer);
+
+  bool is_compacting = MarkCompactCollector::IsCompacting();
+
+  MarkCompactPrologue(is_compacting);
+
+  MarkCompactCollector::CollectGarbage();
+
+  MarkCompactEpilogue(is_compacting);
+
+  LOG(ResourceEvent("markcompact", "end"));
+
+  gc_state_ = NOT_IN_GC;
+
+  Shrink();
+
+  Counters::objs_since_last_full.Set(0);
+  context_disposed_pending_ = false;
+}
+
+
+void Heap::MarkCompactPrologue(bool is_compacting) {
+  // At any old GC clear the keyed lookup cache to enable collection of unused
+  // maps.
+  KeyedLookupCache::Clear();
+  ContextSlotCache::Clear();
+  DescriptorLookupCache::Clear();
+
+  CompilationCache::MarkCompactPrologue();
+
+  Top::MarkCompactPrologue(is_compacting);
+  ThreadManager::MarkCompactPrologue(is_compacting);
+}
+
+
+void Heap::MarkCompactEpilogue(bool is_compacting) {
+  Top::MarkCompactEpilogue(is_compacting);
+  ThreadManager::MarkCompactEpilogue(is_compacting);
+}
+
+
+Object* Heap::FindCodeObject(Address a) {
+  Object* obj = code_space_->FindObject(a);
+  if (obj->IsFailure()) {
+    obj = lo_space_->FindObject(a);
+  }
+  ASSERT(!obj->IsFailure());
+  return obj;
+}
+
+
+// Helper class for copying HeapObjects
+class ScavengeVisitor: public ObjectVisitor {
+ public:
+
+  void VisitPointer(Object** p) { ScavengePointer(p); }
+
+  void VisitPointers(Object** start, Object** end) {
+    // Copy all HeapObject pointers in [start, end)
+    for (Object** p = start; p < end; p++) ScavengePointer(p);
+  }
+
+ private:
+  void ScavengePointer(Object** p) {
+    Object* object = *p;
+    if (!Heap::InNewSpace(object)) return;
+    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
+                         reinterpret_cast<HeapObject*>(object));
+  }
+};
+
+
+// A queue of pointers and maps of to-be-promoted objects during a
+// scavenge collection.
+class PromotionQueue {
+ public:
+  void Initialize(Address start_address) {
+    front_ = rear_ = reinterpret_cast<HeapObject**>(start_address);
+  }
+
+  bool is_empty() { return front_ <= rear_; }
+
+  void insert(HeapObject* object, Map* map) {
+    *(--rear_) = object;
+    *(--rear_) = map;
+    // Assert no overflow into live objects.
+    ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
+  }
+
+  void remove(HeapObject** object, Map** map) {
+    *object = *(--front_);
+    *map = Map::cast(*(--front_));
+    // Assert no underflow.
+    ASSERT(front_ >= rear_);
+  }
+
+ private:
+  // The front of the queue is higher in memory than the rear.
+  HeapObject** front_;
+  HeapObject** rear_;
+};
+
+
+// Shared state read by the scavenge collector and set by ScavengeObject.
+static PromotionQueue promotion_queue;
+
+
+#ifdef DEBUG
+// Visitor class to verify pointers in code or data space do not point into
+// new space.
+class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object**end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
+      }
+    }
+  }
+};
+
+
+static void VerifyNonPointerSpacePointers() {
+  // Verify that there are no pointers to new space in spaces where we
+  // do not expect them.
+  VerifyNonPointerSpacePointersVisitor v;
+  HeapObjectIterator code_it(Heap::code_space());
+  while (code_it.has_next()) {
+    HeapObject* object = code_it.next();
+    object->Iterate(&v);
+  }
+
+  HeapObjectIterator data_it(Heap::old_data_space());
+  while (data_it.has_next()) data_it.next()->Iterate(&v);
+}
+#endif
+
+
+void Heap::Scavenge() {
+#ifdef DEBUG
+  if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
+#endif
+
+  gc_state_ = SCAVENGE;
+
+  // Implements Cheney's copying algorithm
+  LOG(ResourceEvent("scavenge", "begin"));
+
+  // Clear descriptor cache.
+  DescriptorLookupCache::Clear();
+
+  // Used for updating survived_since_last_expansion_ at function end.
+  int survived_watermark = PromotedSpaceSize();
+
+  if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
+      survived_since_last_expansion_ > new_space_.Capacity()) {
+    // Grow the size of new space if there is room to grow and enough
+    // data has survived scavenge since the last expansion.
+    new_space_.Grow();
+    survived_since_last_expansion_ = 0;
+  }
+
+  // Flip the semispaces.  After flipping, to space is empty, from space has
+  // live objects.
+  new_space_.Flip();
+  new_space_.ResetAllocationInfo();
+
+  // We need to sweep newly copied objects which can be either in the
+  // to space or promoted to the old generation.  For to-space
+  // objects, we treat the bottom of the to space as a queue.  Newly
+  // copied and unswept objects lie between a 'front' mark and the
+  // allocation pointer.
+  //
+  // Promoted objects can go into various old-generation spaces, and
+  // can be allocated internally in the spaces (from the free list).
+  // We treat the top of the to space as a queue of addresses of
+  // promoted objects.  The addresses of newly promoted and unswept
+  // objects lie between a 'front' mark and a 'rear' mark that is
+  // updated as a side effect of promoting an object.
+  //
+  // There is guaranteed to be enough room at the top of the to space
+  // for the addresses of promoted objects: every object promoted
+  // frees up its size in bytes from the top of the new space, and
+  // objects are at least one pointer in size.
+  Address new_space_front = new_space_.ToSpaceLow();
+  promotion_queue.Initialize(new_space_.ToSpaceHigh());
+
+  ScavengeVisitor scavenge_visitor;
+  // Copy roots.
+  IterateRoots(&scavenge_visitor);
+
+  // Copy objects reachable from weak pointers.
+  GlobalHandles::IterateWeakRoots(&scavenge_visitor);
+
+  // Copy objects reachable from the old generation.  By definition,
+  // there are no intergenerational pointers in code or data spaces.
+  IterateRSet(old_pointer_space_, &ScavengePointer);
+  IterateRSet(map_space_, &ScavengePointer);
+  lo_space_->IterateRSet(&ScavengePointer);
+
+  // Copy objects reachable from cells by scavenging cell values directly.
+  HeapObjectIterator cell_iterator(cell_space_);
+  while (cell_iterator.has_next()) {
+    HeapObject* cell = cell_iterator.next();
+    if (cell->IsJSGlobalPropertyCell()) {
+      Address value_address =
+          reinterpret_cast<Address>(cell) +
+          (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
+      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+    }
+  }
+
+  do {
+    ASSERT(new_space_front <= new_space_.top());
+
+    // The addresses new_space_front and new_space_.top() define a
+    // queue of unprocessed copied objects.  Process them until the
+    // queue is empty.
+    while (new_space_front < new_space_.top()) {
+      HeapObject* object = HeapObject::FromAddress(new_space_front);
+      object->Iterate(&scavenge_visitor);
+      new_space_front += object->Size();
+    }
+
+    // Promote and process all the to-be-promoted objects.
+    while (!promotion_queue.is_empty()) {
+      HeapObject* source;
+      Map* map;
+      promotion_queue.remove(&source, &map);
+      // Copy the from-space object to its new location (given by the
+      // forwarding address) and fix its map.
+      HeapObject* target = source->map_word().ToForwardingAddress();
+      CopyBlock(reinterpret_cast<Object**>(target->address()),
+                reinterpret_cast<Object**>(source->address()),
+                source->SizeFromMap(map));
+      target->set_map(map);
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+      // Update NewSpace stats if necessary.
+      RecordCopiedObject(target);
+#endif
+      // Visit the newly copied object for pointers to new space.
+      target->Iterate(&scavenge_visitor);
+      UpdateRSet(target);
+    }
+
+    // Take another spin if there are now unswept objects in new space
+    // (there are currently no more unswept promoted objects).
+  } while (new_space_front < new_space_.top());
+
+  // Set age mark.
+  new_space_.set_age_mark(new_space_.top());
+
+  // Update how much has survived scavenge.
+  survived_since_last_expansion_ +=
+      (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
+
+  LOG(ResourceEvent("scavenge", "end"));
+
+  gc_state_ = NOT_IN_GC;
+}
+
+
+void Heap::ClearRSetRange(Address start, int size_in_bytes) {
+  uint32_t start_bit;
+  Address start_word_address =
+      Page::ComputeRSetBitPosition(start, 0, &start_bit);
+  uint32_t end_bit;
+  Address end_word_address =
+      Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
+                                   0,
+                                   &end_bit);
+
+  // We want to clear the bits in the starting word starting with the
+  // first bit, and in the ending word up to and including the last
+  // bit.  Build a pair of bitmasks to do that.
+  uint32_t start_bitmask = start_bit - 1;
+  uint32_t end_bitmask = ~((end_bit << 1) - 1);
+
+  // If the start address and end address are the same, we mask that
+  // word once, otherwise mask the starting and ending word
+  // separately and all the ones in between.
+  if (start_word_address == end_word_address) {
+    Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
+  } else {
+    Memory::uint32_at(start_word_address) &= start_bitmask;
+    Memory::uint32_at(end_word_address) &= end_bitmask;
+    start_word_address += kIntSize;
+    memset(start_word_address, 0, end_word_address - start_word_address);
+  }
+}
+
+
+class UpdateRSetVisitor: public ObjectVisitor {
+ public:
+
+  void VisitPointer(Object** p) {
+    UpdateRSet(p);
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    // Update a store into slots [start, end), used (a) to update remembered
+    // set when promoting a young object to old space or (b) to rebuild
+    // remembered sets after a mark-compact collection.
+    for (Object** p = start; p < end; p++) UpdateRSet(p);
+  }
+ private:
+
+  void UpdateRSet(Object** p) {
+    // The remembered set should not be set.  It should be clear for objects
+    // newly copied to old space, and it is cleared before rebuilding in the
+    // mark-compact collector.
+    ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
+    if (Heap::InNewSpace(*p)) {
+      Page::SetRSet(reinterpret_cast<Address>(p), 0);
+    }
+  }
+};
+
+
+int Heap::UpdateRSet(HeapObject* obj) {
+  ASSERT(!InNewSpace(obj));
+  // Special handling of fixed arrays to iterate the body based on the start
+  // address and offset.  Just iterating the pointers as in UpdateRSetVisitor
+  // will not work because Page::SetRSet needs to have the start of the
+  // object for large object pages.
+  if (obj->IsFixedArray()) {
+    FixedArray* array = FixedArray::cast(obj);
+    int length = array->length();
+    for (int i = 0; i < length; i++) {
+      int offset = FixedArray::kHeaderSize + i * kPointerSize;
+      ASSERT(!Page::IsRSetSet(obj->address(), offset));
+      if (Heap::InNewSpace(array->get(i))) {
+        Page::SetRSet(obj->address(), offset);
+      }
+    }
+  } else if (!obj->IsCode()) {
+    // Skip code object, we know it does not contain inter-generational
+    // pointers.
+    UpdateRSetVisitor v;
+    obj->Iterate(&v);
+  }
+  return obj->Size();
+}
+
+
+void Heap::RebuildRSets() {
+  // By definition, we do not care about remembered set bits in code,
+  // data, or cell spaces.
+  map_space_->ClearRSet();
+  RebuildRSets(map_space_);
+
+  old_pointer_space_->ClearRSet();
+  RebuildRSets(old_pointer_space_);
+
+  Heap::lo_space_->ClearRSet();
+  RebuildRSets(lo_space_);
+}
+
+
+void Heap::RebuildRSets(PagedSpace* space) {
+  HeapObjectIterator it(space);
+  while (it.has_next()) Heap::UpdateRSet(it.next());
+}
+
+
+void Heap::RebuildRSets(LargeObjectSpace* space) {
+  LargeObjectIterator it(space);
+  while (it.has_next()) Heap::UpdateRSet(it.next());
+}
+
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+void Heap::RecordCopiedObject(HeapObject* obj) {
+  bool should_record = false;
+#ifdef DEBUG
+  should_record = FLAG_heap_stats;
+#endif
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  should_record = should_record || FLAG_log_gc;
+#endif
+  if (should_record) {
+    if (new_space_.Contains(obj)) {
+      new_space_.RecordAllocation(obj);
+    } else {
+      new_space_.RecordPromotion(obj);
+    }
+  }
+}
+#endif  // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+
+
+
+HeapObject* Heap::MigrateObject(HeapObject* source,
+                                HeapObject* target,
+                                int size) {
+  // Copy the content of source to target.
+  CopyBlock(reinterpret_cast<Object**>(target->address()),
+            reinterpret_cast<Object**>(source->address()),
+            size);
+
+  // Set the forwarding address.
+  source->set_map_word(MapWord::FromForwardingAddress(target));
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+  // Update NewSpace stats if necessary.
+  RecordCopiedObject(target);
+#endif
+
+  return target;
+}
+
+
+static inline bool IsShortcutCandidate(HeapObject* object, Map* map) {
+  STATIC_ASSERT(kNotStringTag != 0 && kSymbolTag != 0);
+  ASSERT(object->map() == map);
+  InstanceType type = map->instance_type();
+  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return false;
+  ASSERT(object->IsString() && !object->IsSymbol());
+  return ConsString::cast(object)->unchecked_second() == Heap::empty_string();
+}
+
+
+void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
+  ASSERT(InFromSpace(object));
+  MapWord first_word = object->map_word();
+  ASSERT(!first_word.IsForwardingAddress());
+
+  // Optimization: Bypass flattened ConsString objects.
+  if (IsShortcutCandidate(object, first_word.ToMap())) {
+    object = HeapObject::cast(ConsString::cast(object)->unchecked_first());
+    *p = object;
+    // After patching *p we have to repeat the checks that object is in the
+    // active semispace of the young generation and not already copied.
+    if (!InNewSpace(object)) return;
+    first_word = object->map_word();
+    if (first_word.IsForwardingAddress()) {
+      *p = first_word.ToForwardingAddress();
+      return;
+    }
+  }
+
+  int object_size = object->SizeFromMap(first_word.ToMap());
+  // We rely on live objects in new space to be at least two pointers,
+  // so we can store the from-space address and map pointer of promoted
+  // objects in the to space.
+  ASSERT(object_size >= 2 * kPointerSize);
+
+  // If the object should be promoted, we try to copy it to old space.
+  if (ShouldBePromoted(object->address(), object_size)) {
+    Object* result;
+    if (object_size > MaxObjectSizeInPagedSpace()) {
+      result = lo_space_->AllocateRawFixedArray(object_size);
+      if (!result->IsFailure()) {
+        // Save the from-space object pointer and its map pointer at the
+        // top of the to space to be swept and copied later.  Write the
+        // forwarding address over the map word of the from-space
+        // object.
+        HeapObject* target = HeapObject::cast(result);
+        promotion_queue.insert(object, first_word.ToMap());
+        object->set_map_word(MapWord::FromForwardingAddress(target));
+
+        // Give the space allocated for the result a proper map by
+        // treating it as a free list node (not linked into the free
+        // list).
+        FreeListNode* node = FreeListNode::FromAddress(target->address());
+        node->set_size(object_size);
+
+        *p = target;
+        return;
+      }
+    } else {
+      OldSpace* target_space = Heap::TargetSpace(object);
+      ASSERT(target_space == Heap::old_pointer_space_ ||
+             target_space == Heap::old_data_space_);
+      result = target_space->AllocateRaw(object_size);
+      if (!result->IsFailure()) {
+        HeapObject* target = HeapObject::cast(result);
+        if (target_space == Heap::old_pointer_space_) {
+          // Save the from-space object pointer and its map pointer at the
+          // top of the to space to be swept and copied later.  Write the
+          // forwarding address over the map word of the from-space
+          // object.
+          promotion_queue.insert(object, first_word.ToMap());
+          object->set_map_word(MapWord::FromForwardingAddress(target));
+
+          // Give the space allocated for the result a proper map by
+          // treating it as a free list node (not linked into the free
+          // list).
+          FreeListNode* node = FreeListNode::FromAddress(target->address());
+          node->set_size(object_size);
+
+          *p = target;
+        } else {
+          // Objects promoted to the data space can be copied immediately
+          // and not revisited---we will never sweep that space for
+          // pointers and the copied objects do not contain pointers to
+          // new space objects.
+          *p = MigrateObject(object, target, object_size);
+#ifdef DEBUG
+          VerifyNonPointerSpacePointersVisitor v;
+          (*p)->Iterate(&v);
+#endif
+        }
+        return;
+      }
+    }
+  }
+  // The object should remain in new space or the old space allocation failed.
+  Object* result = new_space_.AllocateRaw(object_size);
+  // Failed allocation at this point is utterly unexpected.
+  ASSERT(!result->IsFailure());
+  *p = MigrateObject(object, HeapObject::cast(result), object_size);
+}
+
+
+void Heap::ScavengePointer(HeapObject** p) {
+  ScavengeObject(p, *p);
+}
+
+
+Object* Heap::AllocatePartialMap(InstanceType instance_type,
+                                 int instance_size) {
+  Object* result = AllocateRawMap();
+  if (result->IsFailure()) return result;
+
+  // Map::cast cannot be used due to uninitialized map field.
+  reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
+  reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
+  reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
+  reinterpret_cast<Map*>(result)->set_inobject_properties(0);
+  reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
+  return result;
+}
+
+
+Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
+  Object* result = AllocateRawMap();
+  if (result->IsFailure()) return result;
+
+  Map* map = reinterpret_cast<Map*>(result);
+  map->set_map(meta_map());
+  map->set_instance_type(instance_type);
+  map->set_prototype(null_value());
+  map->set_constructor(null_value());
+  map->set_instance_size(instance_size);
+  map->set_inobject_properties(0);
+  map->set_pre_allocated_property_fields(0);
+  map->set_instance_descriptors(empty_descriptor_array());
+  map->set_code_cache(empty_fixed_array());
+  map->set_unused_property_fields(0);
+  map->set_bit_field(0);
+  map->set_bit_field2(0);
+  return map;
+}
+
+
+const Heap::StringTypeTable Heap::string_type_table[] = {
+#define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
+  {type, size, k##camel_name##MapRootIndex},
+  STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
+#undef STRING_TYPE_ELEMENT
+};
+
+
+const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
+#define CONSTANT_SYMBOL_ELEMENT(name, contents)                                \
+  {contents, k##name##RootIndex},
+  SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
+#undef CONSTANT_SYMBOL_ELEMENT
+};
+
+
+const Heap::StructTable Heap::struct_table[] = {
+#define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
+  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
+  STRUCT_LIST(STRUCT_TABLE_ELEMENT)
+#undef STRUCT_TABLE_ELEMENT
+};
+
+
+bool Heap::CreateInitialMaps() {
+  Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
+  if (obj->IsFailure()) return false;
+  // Map::cast cannot be used due to uninitialized map field.
+  Map* new_meta_map = reinterpret_cast<Map*>(obj);
+  set_meta_map(new_meta_map);
+  new_meta_map->set_map(new_meta_map);
+
+  obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
+  if (obj->IsFailure()) return false;
+  set_fixed_array_map(Map::cast(obj));
+
+  obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
+  if (obj->IsFailure()) return false;
+  set_oddball_map(Map::cast(obj));
+
+  // Allocate the empty array
+  obj = AllocateEmptyFixedArray();
+  if (obj->IsFailure()) return false;
+  set_empty_fixed_array(FixedArray::cast(obj));
+
+  obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+  if (obj->IsFailure()) return false;
+  set_null_value(obj);
+
+  // Allocate the empty descriptor array.
+  obj = AllocateEmptyFixedArray();
+  if (obj->IsFailure()) return false;
+  set_empty_descriptor_array(DescriptorArray::cast(obj));
+
+  // Fix the instance_descriptors for the existing maps.
+  meta_map()->set_instance_descriptors(empty_descriptor_array());
+  meta_map()->set_code_cache(empty_fixed_array());
+
+  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
+  fixed_array_map()->set_code_cache(empty_fixed_array());
+
+  oddball_map()->set_instance_descriptors(empty_descriptor_array());
+  oddball_map()->set_code_cache(empty_fixed_array());
+
+  // Fix prototype object for existing maps.
+  meta_map()->set_prototype(null_value());
+  meta_map()->set_constructor(null_value());
+
+  fixed_array_map()->set_prototype(null_value());
+  fixed_array_map()->set_constructor(null_value());
+
+  oddball_map()->set_prototype(null_value());
+  oddball_map()->set_constructor(null_value());
+
+  obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
+  if (obj->IsFailure()) return false;
+  set_heap_number_map(Map::cast(obj));
+
+  obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
+  if (obj->IsFailure()) return false;
+  set_proxy_map(Map::cast(obj));
+
+  for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
+    const StringTypeTable& entry = string_type_table[i];
+    obj = AllocateMap(entry.type, entry.size);
+    if (obj->IsFailure()) return false;
+    roots_[entry.index] = Map::cast(obj);
+  }
+
+  obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_undetectable_short_string_map(Map::cast(obj));
+  Map::cast(obj)->set_is_undetectable();
+
+  obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_undetectable_medium_string_map(Map::cast(obj));
+  Map::cast(obj)->set_is_undetectable();
+
+  obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_undetectable_long_string_map(Map::cast(obj));
+  Map::cast(obj)->set_is_undetectable();
+
+  obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_undetectable_short_ascii_string_map(Map::cast(obj));
+  Map::cast(obj)->set_is_undetectable();
+
+  obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_undetectable_medium_ascii_string_map(Map::cast(obj));
+  Map::cast(obj)->set_is_undetectable();
+
+  obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_undetectable_long_ascii_string_map(Map::cast(obj));
+  Map::cast(obj)->set_is_undetectable();
+
+  obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_byte_array_map(Map::cast(obj));
+
+  obj = AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_pixel_array_map(Map::cast(obj));
+
+  obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
+  if (obj->IsFailure()) return false;
+  set_code_map(Map::cast(obj));
+
+  obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
+                    JSGlobalPropertyCell::kSize);
+  if (obj->IsFailure()) return false;
+  set_global_property_cell_map(Map::cast(obj));
+
+  obj = AllocateMap(FILLER_TYPE, kPointerSize);
+  if (obj->IsFailure()) return false;
+  set_one_pointer_filler_map(Map::cast(obj));
+
+  obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
+  if (obj->IsFailure()) return false;
+  set_two_pointer_filler_map(Map::cast(obj));
+
+  for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
+    const StructTable& entry = struct_table[i];
+    obj = AllocateMap(entry.type, entry.size);
+    if (obj->IsFailure()) return false;
+    roots_[entry.index] = Map::cast(obj);
+  }
+
+  obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+  if (obj->IsFailure()) return false;
+  set_hash_table_map(Map::cast(obj));
+
+  obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+  if (obj->IsFailure()) return false;
+  set_context_map(Map::cast(obj));
+
+  obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+  if (obj->IsFailure()) return false;
+  set_catch_context_map(Map::cast(obj));
+
+  obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+  if (obj->IsFailure()) return false;
+  set_global_context_map(Map::cast(obj));
+
+  obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+  if (obj->IsFailure()) return false;
+  set_boilerplate_function_map(Map::cast(obj));
+
+  obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize);
+  if (obj->IsFailure()) return false;
+  set_shared_function_info_map(Map::cast(obj));
+
+  ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
+  return true;
+}
+
+
+Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
+  // Statically ensure that it is safe to allocate heap numbers in paged
+  // spaces.
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = OLD_DATA_SPACE;
+
+  Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
+  if (result->IsFailure()) return result;
+
+  HeapObject::cast(result)->set_map(heap_number_map());
+  HeapNumber::cast(result)->set_value(value);
+  return result;
+}
+
+
+Object* Heap::AllocateHeapNumber(double value) {
+  // Use general version, if we're forced to always allocate.
+  if (always_allocate()) return AllocateHeapNumber(value, TENURED);
+
+  // This version of AllocateHeapNumber is optimized for
+  // allocation in new space.
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+  ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+  Object* result = new_space_.AllocateRaw(HeapNumber::kSize);
+  if (result->IsFailure()) return result;
+  HeapObject::cast(result)->set_map(heap_number_map());
+  HeapNumber::cast(result)->set_value(value);
+  return result;
+}
+
+
+Object* Heap::AllocateJSGlobalPropertyCell(Object* value) {
+  Object* result = AllocateRawCell();
+  if (result->IsFailure()) return result;
+  HeapObject::cast(result)->set_map(global_property_cell_map());
+  JSGlobalPropertyCell::cast(result)->set_value(value);
+  return result;
+}
+
+
+Object* Heap::CreateOddball(Map* map,
+                            const char* to_string,
+                            Object* to_number) {
+  Object* result = Allocate(map, OLD_DATA_SPACE);
+  if (result->IsFailure()) return result;
+  return Oddball::cast(result)->Initialize(to_string, to_number);
+}
+
+
+bool Heap::CreateApiObjects() {
+  Object* obj;
+
+  obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+  if (obj->IsFailure()) return false;
+  set_neander_map(Map::cast(obj));
+
+  obj = Heap::AllocateJSObjectFromMap(neander_map());
+  if (obj->IsFailure()) return false;
+  Object* elements = AllocateFixedArray(2);
+  if (elements->IsFailure()) return false;
+  FixedArray::cast(elements)->set(0, Smi::FromInt(0));
+  JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
+  set_message_listeners(JSObject::cast(obj));
+
+  return true;
+}
+
+
+void Heap::CreateCEntryStub() {
+  CEntryStub stub(1);
+  set_c_entry_code(*stub.GetCode());
+}
+
+
+#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+void Heap::CreateRegExpCEntryStub() {
+  RegExpCEntryStub stub;
+  set_re_c_entry_code(*stub.GetCode());
+}
+#endif
+
+
+void Heap::CreateCEntryDebugBreakStub() {
+  CEntryDebugBreakStub stub;
+  set_c_entry_debug_break_code(*stub.GetCode());
+}
+
+
+void Heap::CreateJSEntryStub() {
+  JSEntryStub stub;
+  set_js_entry_code(*stub.GetCode());
+}
+
+
+void Heap::CreateJSConstructEntryStub() {
+  JSConstructEntryStub stub;
+  set_js_construct_entry_code(*stub.GetCode());
+}
+
+
+void Heap::CreateFixedStubs() {
+  // Here we create roots for fixed stubs. They are needed at GC
+  // for cooking and uncooking (check out frames.cc).
+  // The eliminates the need for doing dictionary lookup in the
+  // stub cache for these stubs.
+  HandleScope scope;
+  // gcc-4.4 has problem generating correct code of following snippet:
+  // {  CEntryStub stub;
+  //    c_entry_code_ = *stub.GetCode();
+  // }
+  // {  CEntryDebugBreakStub stub;
+  //    c_entry_debug_break_code_ = *stub.GetCode();
+  // }
+  // To workaround the problem, make separate functions without inlining.
+  Heap::CreateCEntryStub();
+  Heap::CreateCEntryDebugBreakStub();
+  Heap::CreateJSEntryStub();
+  Heap::CreateJSConstructEntryStub();
+#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+  Heap::CreateRegExpCEntryStub();
+#endif
+}
+
+
+bool Heap::CreateInitialObjects() {
+  Object* obj;
+
+  // The -0 value must be set before NumberFromDouble works.
+  obj = AllocateHeapNumber(-0.0, TENURED);
+  if (obj->IsFailure()) return false;
+  set_minus_zero_value(obj);
+  ASSERT(signbit(minus_zero_value()->Number()) != 0);
+
+  obj = AllocateHeapNumber(OS::nan_value(), TENURED);
+  if (obj->IsFailure()) return false;
+  set_nan_value(obj);
+
+  obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+  if (obj->IsFailure()) return false;
+  set_undefined_value(obj);
+  ASSERT(!InNewSpace(undefined_value()));
+
+  // Allocate initial symbol table.
+  obj = SymbolTable::Allocate(kInitialSymbolTableSize);
+  if (obj->IsFailure()) return false;
+  // Don't use set_symbol_table() due to asserts.
+  roots_[kSymbolTableRootIndex] = obj;
+
+  // Assign the print strings for oddballs after creating symboltable.
+  Object* symbol = LookupAsciiSymbol("undefined");
+  if (symbol->IsFailure()) return false;
+  Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
+  Oddball::cast(undefined_value())->set_to_number(nan_value());
+
+  // Assign the print strings for oddballs after creating symboltable.
+  symbol = LookupAsciiSymbol("null");
+  if (symbol->IsFailure()) return false;
+  Oddball::cast(null_value())->set_to_string(String::cast(symbol));
+  Oddball::cast(null_value())->set_to_number(Smi::FromInt(0));
+
+  // Allocate the null_value
+  obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
+  if (obj->IsFailure()) return false;
+
+  obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1));
+  if (obj->IsFailure()) return false;
+  set_true_value(obj);
+
+  obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0));
+  if (obj->IsFailure()) return false;
+  set_false_value(obj);
+
+  obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1));
+  if (obj->IsFailure()) return false;
+  set_the_hole_value(obj);
+
+  obj = CreateOddball(
+      oddball_map(), "no_interceptor_result_sentinel", Smi::FromInt(-2));
+  if (obj->IsFailure()) return false;
+  set_no_interceptor_result_sentinel(obj);
+
+  obj = CreateOddball(oddball_map(), "termination_exception", Smi::FromInt(-3));
+  if (obj->IsFailure()) return false;
+  set_termination_exception(obj);
+
+  // Allocate the empty string.
+  obj = AllocateRawAsciiString(0, TENURED);
+  if (obj->IsFailure()) return false;
+  set_empty_string(String::cast(obj));
+
+  for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
+    obj = LookupAsciiSymbol(constant_symbol_table[i].contents);
+    if (obj->IsFailure()) return false;
+    roots_[constant_symbol_table[i].index] = String::cast(obj);
+  }
+
+  // Allocate the hidden symbol which is used to identify the hidden properties
+  // in JSObjects. The hash code has a special value so that it will not match
+  // the empty string when searching for the property. It cannot be part of the
+  // loop above because it needs to be allocated manually with the special
+  // hash code in place. The hash code for the hidden_symbol is zero to ensure
+  // that it will always be at the first entry in property descriptors.
+  obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
+  if (obj->IsFailure()) return false;
+  hidden_symbol_ = String::cast(obj);
+
+  // Allocate the proxy for __proto__.
+  obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
+  if (obj->IsFailure()) return false;
+  set_prototype_accessors(Proxy::cast(obj));
+
+  // Allocate the code_stubs dictionary. The initial size is set to avoid
+  // expanding the dictionary during bootstrapping.
+  obj = NumberDictionary::Allocate(128);
+  if (obj->IsFailure()) return false;
+  set_code_stubs(NumberDictionary::cast(obj));
+
+  // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
+  // is set to avoid expanding the dictionary during bootstrapping.
+  obj = NumberDictionary::Allocate(64);
+  if (obj->IsFailure()) return false;
+  set_non_monomorphic_cache(NumberDictionary::cast(obj));
+
+  CreateFixedStubs();
+
+  // Allocate the number->string conversion cache
+  obj = AllocateFixedArray(kNumberStringCacheSize * 2);
+  if (obj->IsFailure()) return false;
+  set_number_string_cache(FixedArray::cast(obj));
+
+  // Allocate cache for single character strings.
+  obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
+  if (obj->IsFailure()) return false;
+  set_single_character_string_cache(FixedArray::cast(obj));
+
+  // Allocate cache for external strings pointing to native source code.
+  obj = AllocateFixedArray(Natives::GetBuiltinsCount());
+  if (obj->IsFailure()) return false;
+  set_natives_source_cache(FixedArray::cast(obj));
+
+  // Handling of script id generation is in Factory::NewScript.
+  set_last_script_id(undefined_value());
+
+  // Initialize keyed lookup cache.
+  KeyedLookupCache::Clear();
+
+  // Initialize context slot cache.
+  ContextSlotCache::Clear();
+
+  // Initialize descriptor cache.
+  DescriptorLookupCache::Clear();
+
+  // Initialize compilation cache.
+  CompilationCache::Clear();
+
+  return true;
+}
+
+
+static inline int double_get_hash(double d) {
+  DoubleRepresentation rep(d);
+  return ((static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) &
+          (Heap::kNumberStringCacheSize - 1));
+}
+
+
+static inline int smi_get_hash(Smi* smi) {
+  return (smi->value() & (Heap::kNumberStringCacheSize - 1));
+}
+
+
+
+Object* Heap::GetNumberStringCache(Object* number) {
+  int hash;
+  if (number->IsSmi()) {
+    hash = smi_get_hash(Smi::cast(number));
+  } else {
+    hash = double_get_hash(number->Number());
+  }
+  Object* key = number_string_cache()->get(hash * 2);
+  if (key == number) {
+    return String::cast(number_string_cache()->get(hash * 2 + 1));
+  } else if (key->IsHeapNumber() &&
+             number->IsHeapNumber() &&
+             key->Number() == number->Number()) {
+    return String::cast(number_string_cache()->get(hash * 2 + 1));
+  }
+  return undefined_value();
+}
+
+
+void Heap::SetNumberStringCache(Object* number, String* string) {
+  int hash;
+  if (number->IsSmi()) {
+    hash = smi_get_hash(Smi::cast(number));
+    number_string_cache()->set(hash * 2, number, SKIP_WRITE_BARRIER);
+  } else {
+    hash = double_get_hash(number->Number());
+    number_string_cache()->set(hash * 2, number);
+  }
+  number_string_cache()->set(hash * 2 + 1, string);
+}
+
+
+Object* Heap::SmiOrNumberFromDouble(double value,
+                                    bool new_object,
+                                    PretenureFlag pretenure) {
+  // We need to distinguish the minus zero value and this cannot be
+  // done after conversion to int. Doing this by comparing bit
+  // patterns is faster than using fpclassify() et al.
+  static const DoubleRepresentation plus_zero(0.0);
+  static const DoubleRepresentation minus_zero(-0.0);
+  static const DoubleRepresentation nan(OS::nan_value());
+  ASSERT(minus_zero_value() != NULL);
+  ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits));
+
+  DoubleRepresentation rep(value);
+  if (rep.bits == plus_zero.bits) return Smi::FromInt(0);  // not uncommon
+  if (rep.bits == minus_zero.bits) {
+    return new_object ? AllocateHeapNumber(-0.0, pretenure)
+                      : minus_zero_value();
+  }
+  if (rep.bits == nan.bits) {
+    return new_object
+        ? AllocateHeapNumber(OS::nan_value(), pretenure)
+        : nan_value();
+  }
+
+  // Try to represent the value as a tagged small integer.
+  int int_value = FastD2I(value);
+  if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
+    return Smi::FromInt(int_value);
+  }
+
+  // Materialize the value in the heap.
+  return AllocateHeapNumber(value, pretenure);
+}
+
+
+Object* Heap::NumberToString(Object* number) {
+  Object* cached = GetNumberStringCache(number);
+  if (cached != undefined_value()) {
+    return cached;
+  }
+
+  char arr[100];
+  Vector<char> buffer(arr, ARRAY_SIZE(arr));
+  const char* str;
+  if (number->IsSmi()) {
+    int num = Smi::cast(number)->value();
+    str = IntToCString(num, buffer);
+  } else {
+    double num = HeapNumber::cast(number)->value();
+    str = DoubleToCString(num, buffer);
+  }
+  Object* result = AllocateStringFromAscii(CStrVector(str));
+
+  if (!result->IsFailure()) {
+    SetNumberStringCache(number, String::cast(result));
+  }
+  return result;
+}
+
+
+Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
+  return SmiOrNumberFromDouble(value,
+                               true /* number object must be new */,
+                               pretenure);
+}
+
+
+Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
+  return SmiOrNumberFromDouble(value,
+                               false /* use preallocated NaN, -0.0 */,
+                               pretenure);
+}
+
+
+Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
+  // Statically ensure that it is safe to allocate proxies in paged spaces.
+  STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
+  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+  Object* result = Allocate(proxy_map(), space);
+  if (result->IsFailure()) return result;
+
+  Proxy::cast(result)->set_proxy(proxy);
+  return result;
+}
+
+
+Object* Heap::AllocateSharedFunctionInfo(Object* name) {
+  Object* result = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
+  if (result->IsFailure()) return result;
+
+  SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
+  share->set_name(name);
+  Code* illegal = Builtins::builtin(Builtins::Illegal);
+  share->set_code(illegal);
+  Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
+  share->set_construct_stub(construct_stub);
+  share->set_expected_nof_properties(0);
+  share->set_length(0);
+  share->set_formal_parameter_count(0);
+  share->set_instance_class_name(Object_symbol());
+  share->set_function_data(undefined_value());
+  share->set_script(undefined_value());
+  share->set_start_position_and_type(0);
+  share->set_debug_info(undefined_value());
+  share->set_inferred_name(empty_string());
+  share->set_compiler_hints(0);
+  share->set_this_property_assignments_count(0);
+  share->set_this_property_assignments(undefined_value());
+  return result;
+}
+
+
+Object* Heap::AllocateConsString(String* first, String* second) {
+  int first_length = first->length();
+  if (first_length == 0) return second;
+
+  int second_length = second->length();
+  if (second_length == 0) return first;
+
+  int length = first_length + second_length;
+  bool is_ascii = first->IsAsciiRepresentation()
+      && second->IsAsciiRepresentation();
+
+  // Make sure that an out of memory exception is thrown if the length
+  // of the new cons string is too large to fit in a Smi.
+  if (length > Smi::kMaxValue || length < -0) {
+    Top::context()->mark_out_of_memory();
+    return Failure::OutOfMemoryException();
+  }
+
+  // If the resulting string is small make a flat string.
+  if (length < String::kMinNonFlatLength) {
+    ASSERT(first->IsFlat());
+    ASSERT(second->IsFlat());
+    if (is_ascii) {
+      Object* result = AllocateRawAsciiString(length);
+      if (result->IsFailure()) return result;
+      // Copy the characters into the new object.
+      char* dest = SeqAsciiString::cast(result)->GetChars();
+      // Copy first part.
+      char* src = SeqAsciiString::cast(first)->GetChars();
+      for (int i = 0; i < first_length; i++) *dest++ = src[i];
+      // Copy second part.
+      src = SeqAsciiString::cast(second)->GetChars();
+      for (int i = 0; i < second_length; i++) *dest++ = src[i];
+      return result;
+    } else {
+      Object* result = AllocateRawTwoByteString(length);
+      if (result->IsFailure()) return result;
+      // Copy the characters into the new object.
+      uc16* dest = SeqTwoByteString::cast(result)->GetChars();
+      String::WriteToFlat(first, dest, 0, first_length);
+      String::WriteToFlat(second, dest + first_length, 0, second_length);
+      return result;
+    }
+  }
+
+  Map* map;
+  if (length <= String::kMaxShortStringSize) {
+    map = is_ascii ? short_cons_ascii_string_map()
+      : short_cons_string_map();
+  } else if (length <= String::kMaxMediumStringSize) {
+    map = is_ascii ? medium_cons_ascii_string_map()
+      : medium_cons_string_map();
+  } else {
+    map = is_ascii ? long_cons_ascii_string_map()
+      : long_cons_string_map();
+  }
+
+  Object* result = Allocate(map, NEW_SPACE);
+  if (result->IsFailure()) return result;
+  ASSERT(InNewSpace(result));
+  ConsString* cons_string = ConsString::cast(result);
+  cons_string->set_first(first, SKIP_WRITE_BARRIER);
+  cons_string->set_second(second, SKIP_WRITE_BARRIER);
+  cons_string->set_length(length);
+  return result;
+}
+
+
+Object* Heap::AllocateSlicedString(String* buffer,
+                                   int start,
+                                   int end) {
+  int length = end - start;
+
+  // If the resulting string is small make a sub string.
+  if (length <= String::kMinNonFlatLength) {
+    return Heap::AllocateSubString(buffer, start, end);
+  }
+
+  Map* map;
+  if (length <= String::kMaxShortStringSize) {
+    map = buffer->IsAsciiRepresentation() ?
+      short_sliced_ascii_string_map() :
+      short_sliced_string_map();
+  } else if (length <= String::kMaxMediumStringSize) {
+    map = buffer->IsAsciiRepresentation() ?
+      medium_sliced_ascii_string_map() :
+      medium_sliced_string_map();
+  } else {
+    map = buffer->IsAsciiRepresentation() ?
+      long_sliced_ascii_string_map() :
+      long_sliced_string_map();
+  }
+
+  Object* result = Allocate(map, NEW_SPACE);
+  if (result->IsFailure()) return result;
+
+  SlicedString* sliced_string = SlicedString::cast(result);
+  sliced_string->set_buffer(buffer);
+  sliced_string->set_start(start);
+  sliced_string->set_length(length);
+
+  return result;
+}
+
+
+Object* Heap::AllocateSubString(String* buffer,
+                                int start,
+                                int end) {
+  int length = end - start;
+
+  if (length == 1) {
+    return Heap::LookupSingleCharacterStringFromCode(
+        buffer->Get(start));
+  }
+
+  // Make an attempt to flatten the buffer to reduce access time.
+  if (!buffer->IsFlat()) {
+    buffer->TryFlatten();
+  }
+
+  Object* result = buffer->IsAsciiRepresentation()
+      ? AllocateRawAsciiString(length)
+      : AllocateRawTwoByteString(length);
+  if (result->IsFailure()) return result;
+
+  // Copy the characters into the new object.
+  String* string_result = String::cast(result);
+  StringHasher hasher(length);
+  int i = 0;
+  for (; i < length && hasher.is_array_index(); i++) {
+    uc32 c = buffer->Get(start + i);
+    hasher.AddCharacter(c);
+    string_result->Set(i, c);
+  }
+  for (; i < length; i++) {
+    uc32 c = buffer->Get(start + i);
+    hasher.AddCharacterNoIndex(c);
+    string_result->Set(i, c);
+  }
+  string_result->set_length_field(hasher.GetHashField());
+  return result;
+}
+
+
+Object* Heap::AllocateExternalStringFromAscii(
+    ExternalAsciiString::Resource* resource) {
+  Map* map;
+  int length = resource->length();
+  if (length <= String::kMaxShortStringSize) {
+    map = short_external_ascii_string_map();
+  } else if (length <= String::kMaxMediumStringSize) {
+    map = medium_external_ascii_string_map();
+  } else {
+    map = long_external_ascii_string_map();
+  }
+
+  Object* result = Allocate(map, NEW_SPACE);
+  if (result->IsFailure()) return result;
+
+  ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
+  external_string->set_length(length);
+  external_string->set_resource(resource);
+
+  return result;
+}
+
+
+Object* Heap::AllocateExternalStringFromTwoByte(
+    ExternalTwoByteString::Resource* resource) {
+  int length = resource->length();
+
+  Map* map = ExternalTwoByteString::StringMap(length);
+  Object* result = Allocate(map, NEW_SPACE);
+  if (result->IsFailure()) return result;
+
+  ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
+  external_string->set_length(length);
+  external_string->set_resource(resource);
+
+  return result;
+}
+
+
+Object* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
+  if (code <= String::kMaxAsciiCharCode) {
+    Object* value = Heap::single_character_string_cache()->get(code);
+    if (value != Heap::undefined_value()) return value;
+
+    char buffer[1];
+    buffer[0] = static_cast<char>(code);
+    Object* result = LookupSymbol(Vector<const char>(buffer, 1));
+
+    if (result->IsFailure()) return result;
+    Heap::single_character_string_cache()->set(code, result);
+    return result;
+  }
+
+  Object* result = Heap::AllocateRawTwoByteString(1);
+  if (result->IsFailure()) return result;
+  String* answer = String::cast(result);
+  answer->Set(0, code);
+  return answer;
+}
+
+
+Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
+  if (pretenure == NOT_TENURED) {
+    return AllocateByteArray(length);
+  }
+  int size = ByteArray::SizeFor(length);
+  AllocationSpace space =
+      size > MaxObjectSizeInPagedSpace() ? LO_SPACE : OLD_DATA_SPACE;
+
+  Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
+
+  if (result->IsFailure()) return result;
+
+  reinterpret_cast<Array*>(result)->set_map(byte_array_map());
+  reinterpret_cast<Array*>(result)->set_length(length);
+  return result;
+}
+
+
+Object* Heap::AllocateByteArray(int length) {
+  int size = ByteArray::SizeFor(length);
+  AllocationSpace space =
+      size > MaxObjectSizeInPagedSpace() ? LO_SPACE : NEW_SPACE;
+
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = LO_SPACE;
+
+  Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
+
+  if (result->IsFailure()) return result;
+
+  reinterpret_cast<Array*>(result)->set_map(byte_array_map());
+  reinterpret_cast<Array*>(result)->set_length(length);
+  return result;
+}
+
+
+void Heap::CreateFillerObjectAt(Address addr, int size) {
+  if (size == 0) return;
+  HeapObject* filler = HeapObject::FromAddress(addr);
+  if (size == kPointerSize) {
+    filler->set_map(Heap::one_pointer_filler_map());
+  } else {
+    filler->set_map(Heap::byte_array_map());
+    ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
+  }
+}
+
+
+Object* Heap::AllocatePixelArray(int length,
+                                 uint8_t* external_pointer,
+                                 PretenureFlag pretenure) {
+  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = OLD_DATA_SPACE;
+
+  Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
+
+  if (result->IsFailure()) return result;
+
+  reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
+  reinterpret_cast<PixelArray*>(result)->set_length(length);
+  reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
+
+  return result;
+}
+
+
+Object* Heap::CreateCode(const CodeDesc& desc,
+                         ZoneScopeInfo* sinfo,
+                         Code::Flags flags,
+                         Handle<Object> self_reference) {
+  // Compute size
+  int body_size = RoundUp(desc.instr_size + desc.reloc_size, kObjectAlignment);
+  int sinfo_size = 0;
+  if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
+  int obj_size = Code::SizeFor(body_size, sinfo_size);
+  ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
+  Object* result;
+  if (obj_size > MaxObjectSizeInPagedSpace()) {
+    result = lo_space_->AllocateRawCode(obj_size);
+  } else {
+    result = code_space_->AllocateRaw(obj_size);
+  }
+
+  if (result->IsFailure()) return result;
+
+  // Initialize the object
+  HeapObject::cast(result)->set_map(code_map());
+  Code* code = Code::cast(result);
+  ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
+  code->set_instruction_size(desc.instr_size);
+  code->set_relocation_size(desc.reloc_size);
+  code->set_sinfo_size(sinfo_size);
+  code->set_flags(flags);
+  // Allow self references to created code object by patching the handle to
+  // point to the newly allocated Code object.
+  if (!self_reference.is_null()) {
+    *(self_reference.location()) = code;
+  }
+  // Migrate generated code.
+  // The generated code can contain Object** values (typically from handles)
+  // that are dereferenced during the copy to point directly to the actual heap
+  // objects. These pointers can include references to the code object itself,
+  // through the self_reference parameter.
+  code->CopyFrom(desc);
+  if (sinfo != NULL) sinfo->Serialize(code);  // write scope info
+
+#ifdef DEBUG
+  code->Verify();
+#endif
+  return code;
+}
+
+
+Object* Heap::CopyCode(Code* code) {
+  // Allocate an object the same size as the code object.
+  int obj_size = code->Size();
+  Object* result;
+  if (obj_size > MaxObjectSizeInPagedSpace()) {
+    result = lo_space_->AllocateRawCode(obj_size);
+  } else {
+    result = code_space_->AllocateRaw(obj_size);
+  }
+
+  if (result->IsFailure()) return result;
+
+  // Copy code object.
+  Address old_addr = code->address();
+  Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
+  CopyBlock(reinterpret_cast<Object**>(new_addr),
+            reinterpret_cast<Object**>(old_addr),
+            obj_size);
+  // Relocate the copy.
+  Code* new_code = Code::cast(result);
+  ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
+  new_code->Relocate(new_addr - old_addr);
+  return new_code;
+}
+
+
+Object* Heap::Allocate(Map* map, AllocationSpace space) {
+  ASSERT(gc_state_ == NOT_IN_GC);
+  ASSERT(map->instance_type() != MAP_TYPE);
+  Object* result = AllocateRaw(map->instance_size(),
+                               space,
+                               TargetSpaceId(map->instance_type()));
+  if (result->IsFailure()) return result;
+  HeapObject::cast(result)->set_map(map);
+  return result;
+}
+
+
+Object* Heap::InitializeFunction(JSFunction* function,
+                                 SharedFunctionInfo* shared,
+                                 Object* prototype) {
+  ASSERT(!prototype->IsMap());
+  function->initialize_properties();
+  function->initialize_elements();
+  function->set_shared(shared);
+  function->set_prototype_or_initial_map(prototype);
+  function->set_context(undefined_value());
+  function->set_literals(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  return function;
+}
+
+
+Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
+  // Allocate the prototype.  Make sure to use the object function
+  // from the function's context, since the function can be from a
+  // different context.
+  JSFunction* object_function =
+      function->context()->global_context()->object_function();
+  Object* prototype = AllocateJSObject(object_function);
+  if (prototype->IsFailure()) return prototype;
+  // When creating the prototype for the function we must set its
+  // constructor to the function.
+  Object* result =
+      JSObject::cast(prototype)->SetProperty(constructor_symbol(),
+                                             function,
+                                             DONT_ENUM);
+  if (result->IsFailure()) return result;
+  return prototype;
+}
+
+
+Object* Heap::AllocateFunction(Map* function_map,
+                               SharedFunctionInfo* shared,
+                               Object* prototype) {
+  Object* result = Allocate(function_map, OLD_POINTER_SPACE);
+  if (result->IsFailure()) return result;
+  return InitializeFunction(JSFunction::cast(result), shared, prototype);
+}
+
+
+Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
+  // To get fast allocation and map sharing for arguments objects we
+  // allocate them based on an arguments boilerplate.
+
+  // This calls Copy directly rather than using Heap::AllocateRaw so we
+  // duplicate the check here.
+  ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+
+  JSObject* boilerplate =
+      Top::context()->global_context()->arguments_boilerplate();
+
+  // Make the clone.
+  Map* map = boilerplate->map();
+  int object_size = map->instance_size();
+  Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
+  if (result->IsFailure()) return result;
+
+  // Copy the content. The arguments boilerplate doesn't have any
+  // fields that point to new space so it's safe to skip the write
+  // barrier here.
+  CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
+            reinterpret_cast<Object**>(boilerplate->address()),
+            object_size);
+
+  // Set the two properties.
+  JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
+                                                callee);
+  JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
+                                                Smi::FromInt(length),
+                                                SKIP_WRITE_BARRIER);
+
+  // Check the state of the object
+  ASSERT(JSObject::cast(result)->HasFastProperties());
+  ASSERT(JSObject::cast(result)->HasFastElements());
+
+  return result;
+}
+
+
+Object* Heap::AllocateInitialMap(JSFunction* fun) {
+  ASSERT(!fun->has_initial_map());
+
+  // First create a new map with the size and number of in-object properties
+  // suggested by the function.
+  int instance_size = fun->shared()->CalculateInstanceSize();
+  int in_object_properties = fun->shared()->CalculateInObjectProperties();
+  Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
+  if (map_obj->IsFailure()) return map_obj;
+
+  // Fetch or allocate prototype.
+  Object* prototype;
+  if (fun->has_instance_prototype()) {
+    prototype = fun->instance_prototype();
+  } else {
+    prototype = AllocateFunctionPrototype(fun);
+    if (prototype->IsFailure()) return prototype;
+  }
+  Map* map = Map::cast(map_obj);
+  map->set_inobject_properties(in_object_properties);
+  map->set_unused_property_fields(in_object_properties);
+  map->set_prototype(prototype);
+
+  // If the function has only simple this property assignments add field
+  // descriptors for these to the initial map as the object cannot be
+  // constructed without having these properties.
+  ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
+  if (fun->shared()->has_only_this_property_assignments() &&
+      fun->shared()->this_property_assignments_count() > 0) {
+    int count = fun->shared()->this_property_assignments_count();
+    if (count > in_object_properties) {
+      count = in_object_properties;
+    }
+    Object* descriptors_obj = DescriptorArray::Allocate(count);
+    if (descriptors_obj->IsFailure()) return descriptors_obj;
+    DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
+    for (int i = 0; i < count; i++) {
+      String* name = fun->shared()->GetThisPropertyAssignmentName(i);
+      ASSERT(name->IsSymbol());
+      FieldDescriptor field(name, i, NONE);
+      descriptors->Set(i, &field);
+    }
+    descriptors->Sort();
+    map->set_instance_descriptors(descriptors);
+    map->set_pre_allocated_property_fields(count);
+    map->set_unused_property_fields(in_object_properties - count);
+  }
+  return map;
+}
+
+
+void Heap::InitializeJSObjectFromMap(JSObject* obj,
+                                     FixedArray* properties,
+                                     Map* map) {
+  obj->set_properties(properties);
+  obj->initialize_elements();
+  // TODO(1240798): Initialize the object's body using valid initial values
+  // according to the object's initial map.  For example, if the map's
+  // instance type is JS_ARRAY_TYPE, the length field should be initialized
+  // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
+  // fixed array (eg, Heap::empty_fixed_array()).  Currently, the object
+  // verification code has to cope with (temporarily) invalid objects.  See
+  // for example, JSArray::JSArrayVerify).
+  obj->InitializeBody(map->instance_size());
+}
+
+
+Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
+  // JSFunctions should be allocated using AllocateFunction to be
+  // properly initialized.
+  ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
+
+  // Both types of globla objects should be allocated using
+  // AllocateGloblaObject to be properly initialized.
+  ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
+  ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
+
+  // Allocate the backing storage for the properties.
+  int prop_size =
+      map->pre_allocated_property_fields() +
+      map->unused_property_fields() -
+      map->inobject_properties();
+  ASSERT(prop_size >= 0);
+  Object* properties = AllocateFixedArray(prop_size, pretenure);
+  if (properties->IsFailure()) return properties;
+
+  // Allocate the JSObject.
+  AllocationSpace space =
+      (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
+  if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+  Object* obj = Allocate(map, space);
+  if (obj->IsFailure()) return obj;
+
+  // Initialize the JSObject.
+  InitializeJSObjectFromMap(JSObject::cast(obj),
+                            FixedArray::cast(properties),
+                            map);
+  return obj;
+}
+
+
+Object* Heap::AllocateJSObject(JSFunction* constructor,
+                               PretenureFlag pretenure) {
+  // Allocate the initial map if absent.
+  if (!constructor->has_initial_map()) {
+    Object* initial_map = AllocateInitialMap(constructor);
+    if (initial_map->IsFailure()) return initial_map;
+    constructor->set_initial_map(Map::cast(initial_map));
+    Map::cast(initial_map)->set_constructor(constructor);
+  }
+  // Allocate the object based on the constructors initial map.
+  Object* result =
+      AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
+  // Make sure result is NOT a global object if valid.
+  ASSERT(result->IsFailure() || !result->IsGlobalObject());
+  return result;
+}
+
+
+Object* Heap::AllocateGlobalObject(JSFunction* constructor) {
+  ASSERT(constructor->has_initial_map());
+  Map* map = constructor->initial_map();
+
+  // Make sure no field properties are described in the initial map.
+  // This guarantees us that normalizing the properties does not
+  // require us to change property values to JSGlobalPropertyCells.
+  ASSERT(map->NextFreePropertyIndex() == 0);
+
+  // Make sure we don't have a ton of pre-allocated slots in the
+  // global objects. They will be unused once we normalize the object.
+  ASSERT(map->unused_property_fields() == 0);
+  ASSERT(map->inobject_properties() == 0);
+
+  // Initial size of the backing store to avoid resize of the storage during
+  // bootstrapping. The size differs between the JS global object ad the
+  // builtins object.
+  int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
+
+  // Allocate a dictionary object for backing storage.
+  Object* obj =
+      StringDictionary::Allocate(
+          map->NumberOfDescribedProperties() * 2 + initial_size);
+  if (obj->IsFailure()) return obj;
+  StringDictionary* dictionary = StringDictionary::cast(obj);
+
+  // The global object might be created from an object template with accessors.
+  // Fill these accessors into the dictionary.
+  DescriptorArray* descs = map->instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    PropertyDetails details = descs->GetDetails(i);
+    ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
+    PropertyDetails d =
+        PropertyDetails(details.attributes(), CALLBACKS, details.index());
+    Object* value = descs->GetCallbacksObject(i);
+    value = Heap::AllocateJSGlobalPropertyCell(value);
+    if (value->IsFailure()) return value;
+
+    Object* result = dictionary->Add(descs->GetKey(i), value, d);
+    if (result->IsFailure()) return result;
+    dictionary = StringDictionary::cast(result);
+  }
+
+  // Allocate the global object and initialize it with the backing store.
+  obj = Allocate(map, OLD_POINTER_SPACE);
+  if (obj->IsFailure()) return obj;
+  JSObject* global = JSObject::cast(obj);
+  InitializeJSObjectFromMap(global, dictionary, map);
+
+  // Create a new map for the global object.
+  obj = map->CopyDropDescriptors();
+  if (obj->IsFailure()) return obj;
+  Map* new_map = Map::cast(obj);
+
+  // Setup the global object as a normalized object.
+  global->set_map(new_map);
+  global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
+  global->set_properties(dictionary);
+
+  // Make sure result is a global object with properties in dictionary.
+  ASSERT(global->IsGlobalObject());
+  ASSERT(!global->HasFastProperties());
+  return global;
+}
+
+
+Object* Heap::CopyJSObject(JSObject* source) {
+  // Never used to copy functions.  If functions need to be copied we
+  // have to be careful to clear the literals array.
+  ASSERT(!source->IsJSFunction());
+
+  // Make the clone.
+  Map* map = source->map();
+  int object_size = map->instance_size();
+  Object* clone;
+
+  // If we're forced to always allocate, we use the general allocation
+  // functions which may leave us with an object in old space.
+  if (always_allocate()) {
+    clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
+    if (clone->IsFailure()) return clone;
+    Address clone_address = HeapObject::cast(clone)->address();
+    CopyBlock(reinterpret_cast<Object**>(clone_address),
+              reinterpret_cast<Object**>(source->address()),
+              object_size);
+    // Update write barrier for all fields that lie beyond the header.
+    for (int offset = JSObject::kHeaderSize;
+         offset < object_size;
+         offset += kPointerSize) {
+      RecordWrite(clone_address, offset);
+    }
+  } else {
+    clone = new_space_.AllocateRaw(object_size);
+    if (clone->IsFailure()) return clone;
+    ASSERT(Heap::InNewSpace(clone));
+    // Since we know the clone is allocated in new space, we can copy
+    // the contents without worrying about updating the write barrier.
+    CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
+              reinterpret_cast<Object**>(source->address()),
+              object_size);
+  }
+
+  FixedArray* elements = FixedArray::cast(source->elements());
+  FixedArray* properties = FixedArray::cast(source->properties());
+  // Update elements if necessary.
+  if (elements->length()> 0) {
+    Object* elem = CopyFixedArray(elements);
+    if (elem->IsFailure()) return elem;
+    JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
+  }
+  // Update properties if necessary.
+  if (properties->length() > 0) {
+    Object* prop = CopyFixedArray(properties);
+    if (prop->IsFailure()) return prop;
+    JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
+  }
+  // Return the new clone.
+  return clone;
+}
+
+
+Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
+                                        JSGlobalProxy* object) {
+  // Allocate initial map if absent.
+  if (!constructor->has_initial_map()) {
+    Object* initial_map = AllocateInitialMap(constructor);
+    if (initial_map->IsFailure()) return initial_map;
+    constructor->set_initial_map(Map::cast(initial_map));
+    Map::cast(initial_map)->set_constructor(constructor);
+  }
+
+  Map* map = constructor->initial_map();
+
+  // Check that the already allocated object has the same size as
+  // objects allocated using the constructor.
+  ASSERT(map->instance_size() == object->map()->instance_size());
+
+  // Allocate the backing storage for the properties.
+  int prop_size = map->unused_property_fields() - map->inobject_properties();
+  Object* properties = AllocateFixedArray(prop_size, TENURED);
+  if (properties->IsFailure()) return properties;
+
+  // Reset the map for the object.
+  object->set_map(constructor->initial_map());
+
+  // Reinitialize the object from the constructor map.
+  InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
+  return object;
+}
+
+
+Object* Heap::AllocateStringFromAscii(Vector<const char> string,
+                                      PretenureFlag pretenure) {
+  Object* result = AllocateRawAsciiString(string.length(), pretenure);
+  if (result->IsFailure()) return result;
+
+  // Copy the characters into the new object.
+  SeqAsciiString* string_result = SeqAsciiString::cast(result);
+  for (int i = 0; i < string.length(); i++) {
+    string_result->SeqAsciiStringSet(i, string[i]);
+  }
+  return result;
+}
+
+
+Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
+                                     PretenureFlag pretenure) {
+  // Count the number of characters in the UTF-8 string and check if
+  // it is an ASCII string.
+  Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
+  decoder->Reset(string.start(), string.length());
+  int chars = 0;
+  bool is_ascii = true;
+  while (decoder->has_more()) {
+    uc32 r = decoder->GetNext();
+    if (r > String::kMaxAsciiCharCode) is_ascii = false;
+    chars++;
+  }
+
+  // If the string is ascii, we do not need to convert the characters
+  // since UTF8 is backwards compatible with ascii.
+  if (is_ascii) return AllocateStringFromAscii(string, pretenure);
+
+  Object* result = AllocateRawTwoByteString(chars, pretenure);
+  if (result->IsFailure()) return result;
+
+  // Convert and copy the characters into the new object.
+  String* string_result = String::cast(result);
+  decoder->Reset(string.start(), string.length());
+  for (int i = 0; i < chars; i++) {
+    uc32 r = decoder->GetNext();
+    string_result->Set(i, r);
+  }
+  return result;
+}
+
+
+Object* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
+                                        PretenureFlag pretenure) {
+  // Check if the string is an ASCII string.
+  int i = 0;
+  while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
+
+  Object* result;
+  if (i == string.length()) {  // It's an ASCII string.
+    result = AllocateRawAsciiString(string.length(), pretenure);
+  } else {  // It's not an ASCII string.
+    result = AllocateRawTwoByteString(string.length(), pretenure);
+  }
+  if (result->IsFailure()) return result;
+
+  // Copy the characters into the new object, which may be either ASCII or
+  // UTF-16.
+  String* string_result = String::cast(result);
+  for (int i = 0; i < string.length(); i++) {
+    string_result->Set(i, string[i]);
+  }
+  return result;
+}
+
+
+Map* Heap::SymbolMapForString(String* string) {
+  // If the string is in new space it cannot be used as a symbol.
+  if (InNewSpace(string)) return NULL;
+
+  // Find the corresponding symbol map for strings.
+  Map* map = string->map();
+
+  if (map == short_ascii_string_map()) return short_ascii_symbol_map();
+  if (map == medium_ascii_string_map()) return medium_ascii_symbol_map();
+  if (map == long_ascii_string_map()) return long_ascii_symbol_map();
+
+  if (map == short_string_map()) return short_symbol_map();
+  if (map == medium_string_map()) return medium_symbol_map();
+  if (map == long_string_map()) return long_symbol_map();
+
+  if (map == short_cons_string_map()) return short_cons_symbol_map();
+  if (map == medium_cons_string_map()) return medium_cons_symbol_map();
+  if (map == long_cons_string_map()) return long_cons_symbol_map();
+
+  if (map == short_cons_ascii_string_map()) {
+    return short_cons_ascii_symbol_map();
+  }
+  if (map == medium_cons_ascii_string_map()) {
+    return medium_cons_ascii_symbol_map();
+  }
+  if (map == long_cons_ascii_string_map()) {
+    return long_cons_ascii_symbol_map();
+  }
+
+  if (map == short_sliced_string_map()) return short_sliced_symbol_map();
+  if (map == medium_sliced_string_map()) return medium_sliced_symbol_map();
+  if (map == long_sliced_string_map()) return long_sliced_symbol_map();
+
+  if (map == short_sliced_ascii_string_map()) {
+    return short_sliced_ascii_symbol_map();
+  }
+  if (map == medium_sliced_ascii_string_map()) {
+    return medium_sliced_ascii_symbol_map();
+  }
+  if (map == long_sliced_ascii_string_map()) {
+    return long_sliced_ascii_symbol_map();
+  }
+
+  if (map == short_external_string_map()) {
+    return short_external_symbol_map();
+  }
+  if (map == medium_external_string_map()) {
+    return medium_external_symbol_map();
+  }
+  if (map == long_external_string_map()) {
+    return long_external_symbol_map();
+  }
+
+  if (map == short_external_ascii_string_map()) {
+    return short_external_ascii_symbol_map();
+  }
+  if (map == medium_external_ascii_string_map()) {
+    return medium_external_ascii_symbol_map();
+  }
+  if (map == long_external_ascii_string_map()) {
+    return long_external_ascii_symbol_map();
+  }
+
+  // No match found.
+  return NULL;
+}
+
+
+Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
+                                     int chars,
+                                     uint32_t length_field) {
+  // Ensure the chars matches the number of characters in the buffer.
+  ASSERT(static_cast<unsigned>(chars) == buffer->Length());
+  // Determine whether the string is ascii.
+  bool is_ascii = true;
+  while (buffer->has_more() && is_ascii) {
+    if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) is_ascii = false;
+  }
+  buffer->Rewind();
+
+  // Compute map and object size.
+  int size;
+  Map* map;
+
+  if (is_ascii) {
+    if (chars <= String::kMaxShortStringSize) {
+      map = short_ascii_symbol_map();
+    } else if (chars <= String::kMaxMediumStringSize) {
+      map = medium_ascii_symbol_map();
+    } else {
+      map = long_ascii_symbol_map();
+    }
+    size = SeqAsciiString::SizeFor(chars);
+  } else {
+    if (chars <= String::kMaxShortStringSize) {
+      map = short_symbol_map();
+    } else if (chars <= String::kMaxMediumStringSize) {
+      map = medium_symbol_map();
+    } else {
+      map = long_symbol_map();
+    }
+    size = SeqTwoByteString::SizeFor(chars);
+  }
+
+  // Allocate string.
+  AllocationSpace space =
+      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_DATA_SPACE;
+  Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
+  if (result->IsFailure()) return result;
+
+  reinterpret_cast<HeapObject*>(result)->set_map(map);
+  // The hash value contains the length of the string.
+  String* answer = String::cast(result);
+  answer->set_length_field(length_field);
+
+  ASSERT_EQ(size, answer->Size());
+
+  // Fill in the characters.
+  for (int i = 0; i < chars; i++) {
+    answer->Set(i, buffer->GetNext());
+  }
+  return answer;
+}
+
+
+Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
+  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = OLD_DATA_SPACE;
+
+  int size = SeqAsciiString::SizeFor(length);
+
+  Object* result = Failure::OutOfMemoryException();
+  if (space == NEW_SPACE) {
+    result = size <= kMaxObjectSizeInNewSpace
+        ? new_space_.AllocateRaw(size)
+        : lo_space_->AllocateRaw(size);
+  } else {
+    if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+    result = AllocateRaw(size, space, OLD_DATA_SPACE);
+  }
+  if (result->IsFailure()) return result;
+
+  // Determine the map based on the string's length.
+  Map* map;
+  if (length <= String::kMaxShortStringSize) {
+    map = short_ascii_string_map();
+  } else if (length <= String::kMaxMediumStringSize) {
+    map = medium_ascii_string_map();
+  } else {
+    map = long_ascii_string_map();
+  }
+
+  // Partially initialize the object.
+  HeapObject::cast(result)->set_map(map);
+  String::cast(result)->set_length(length);
+  ASSERT_EQ(size, HeapObject::cast(result)->Size());
+  return result;
+}
+
+
+Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
+  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = OLD_DATA_SPACE;
+
+  int size = SeqTwoByteString::SizeFor(length);
+
+  Object* result = Failure::OutOfMemoryException();
+  if (space == NEW_SPACE) {
+    result = size <= kMaxObjectSizeInNewSpace
+        ? new_space_.AllocateRaw(size)
+        : lo_space_->AllocateRaw(size);
+  } else {
+    if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+    result = AllocateRaw(size, space, OLD_DATA_SPACE);
+  }
+  if (result->IsFailure()) return result;
+
+  // Determine the map based on the string's length.
+  Map* map;
+  if (length <= String::kMaxShortStringSize) {
+    map = short_string_map();
+  } else if (length <= String::kMaxMediumStringSize) {
+    map = medium_string_map();
+  } else {
+    map = long_string_map();
+  }
+
+  // Partially initialize the object.
+  HeapObject::cast(result)->set_map(map);
+  String::cast(result)->set_length(length);
+  ASSERT_EQ(size, HeapObject::cast(result)->Size());
+  return result;
+}
+
+
+Object* Heap::AllocateEmptyFixedArray() {
+  int size = FixedArray::SizeFor(0);
+  Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
+  if (result->IsFailure()) return result;
+  // Initialize the object.
+  reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
+  reinterpret_cast<Array*>(result)->set_length(0);
+  return result;
+}
+
+
+Object* Heap::AllocateRawFixedArray(int length) {
+  // Use the general function if we're forced to always allocate.
+  if (always_allocate()) return AllocateFixedArray(length, TENURED);
+  // Allocate the raw data for a fixed array.
+  int size = FixedArray::SizeFor(length);
+  return size <= kMaxObjectSizeInNewSpace
+      ? new_space_.AllocateRaw(size)
+      : lo_space_->AllocateRawFixedArray(size);
+}
+
+
+Object* Heap::CopyFixedArray(FixedArray* src) {
+  int len = src->length();
+  Object* obj = AllocateRawFixedArray(len);
+  if (obj->IsFailure()) return obj;
+  if (Heap::InNewSpace(obj)) {
+    HeapObject* dst = HeapObject::cast(obj);
+    CopyBlock(reinterpret_cast<Object**>(dst->address()),
+              reinterpret_cast<Object**>(src->address()),
+              FixedArray::SizeFor(len));
+    return obj;
+  }
+  HeapObject::cast(obj)->set_map(src->map());
+  FixedArray* result = FixedArray::cast(obj);
+  result->set_length(len);
+  // Copy the content
+  WriteBarrierMode mode = result->GetWriteBarrierMode();
+  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
+  return result;
+}
+
+
+Object* Heap::AllocateFixedArray(int length) {
+  ASSERT(length >= 0);
+  if (length == 0) return empty_fixed_array();
+  Object* result = AllocateRawFixedArray(length);
+  if (!result->IsFailure()) {
+    // Initialize header.
+    reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
+    FixedArray* array = FixedArray::cast(result);
+    array->set_length(length);
+    Object* value = undefined_value();
+    // Initialize body.
+    for (int index = 0; index < length; index++) {
+      array->set(index, value, SKIP_WRITE_BARRIER);
+    }
+  }
+  return result;
+}
+
+
+Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
+  ASSERT(empty_fixed_array()->IsFixedArray());
+  if (length == 0) return empty_fixed_array();
+
+  // New space can't cope with forced allocation.
+  if (always_allocate()) pretenure = TENURED;
+
+  int size = FixedArray::SizeFor(length);
+  Object* result = Failure::OutOfMemoryException();
+  if (pretenure != TENURED) {
+    result = size <= kMaxObjectSizeInNewSpace
+        ? new_space_.AllocateRaw(size)
+        : lo_space_->AllocateRawFixedArray(size);
+  }
+  if (result->IsFailure()) {
+    if (size > MaxObjectSizeInPagedSpace()) {
+      result = lo_space_->AllocateRawFixedArray(size);
+    } else {
+      AllocationSpace space =
+          (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
+      result = AllocateRaw(size, space, OLD_POINTER_SPACE);
+    }
+    if (result->IsFailure()) return result;
+  }
+  // Initialize the object.
+  reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
+  FixedArray* array = FixedArray::cast(result);
+  array->set_length(length);
+  Object* value = undefined_value();
+  for (int index = 0; index < length; index++) {
+    array->set(index, value, SKIP_WRITE_BARRIER);
+  }
+  return array;
+}
+
+
+Object* Heap::AllocateFixedArrayWithHoles(int length) {
+  if (length == 0) return empty_fixed_array();
+  Object* result = AllocateRawFixedArray(length);
+  if (!result->IsFailure()) {
+    // Initialize header.
+    reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
+    FixedArray* array = FixedArray::cast(result);
+    array->set_length(length);
+    // Initialize body.
+    Object* value = the_hole_value();
+    for (int index = 0; index < length; index++)  {
+      array->set(index, value, SKIP_WRITE_BARRIER);
+    }
+  }
+  return result;
+}
+
+
+Object* Heap::AllocateHashTable(int length) {
+  Object* result = Heap::AllocateFixedArray(length);
+  if (result->IsFailure()) return result;
+  reinterpret_cast<Array*>(result)->set_map(hash_table_map());
+  ASSERT(result->IsHashTable());
+  return result;
+}
+
+
+Object* Heap::AllocateGlobalContext() {
+  Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
+  if (result->IsFailure()) return result;
+  Context* context = reinterpret_cast<Context*>(result);
+  context->set_map(global_context_map());
+  ASSERT(context->IsGlobalContext());
+  ASSERT(result->IsContext());
+  return result;
+}
+
+
+Object* Heap::AllocateFunctionContext(int length, JSFunction* function) {
+  ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
+  Object* result = Heap::AllocateFixedArray(length);
+  if (result->IsFailure()) return result;
+  Context* context = reinterpret_cast<Context*>(result);
+  context->set_map(context_map());
+  context->set_closure(function);
+  context->set_fcontext(context);
+  context->set_previous(NULL);
+  context->set_extension(NULL);
+  context->set_global(function->context()->global());
+  ASSERT(!context->IsGlobalContext());
+  ASSERT(context->is_function_context());
+  ASSERT(result->IsContext());
+  return result;
+}
+
+
+Object* Heap::AllocateWithContext(Context* previous,
+                                  JSObject* extension,
+                                  bool is_catch_context) {
+  Object* result = Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
+  if (result->IsFailure()) return result;
+  Context* context = reinterpret_cast<Context*>(result);
+  context->set_map(is_catch_context ? catch_context_map() : context_map());
+  context->set_closure(previous->closure());
+  context->set_fcontext(previous->fcontext());
+  context->set_previous(previous);
+  context->set_extension(extension);
+  context->set_global(previous->global());
+  ASSERT(!context->IsGlobalContext());
+  ASSERT(!context->is_function_context());
+  ASSERT(result->IsContext());
+  return result;
+}
+
+
+Object* Heap::AllocateStruct(InstanceType type) {
+  Map* map;
+  switch (type) {
+#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
+STRUCT_LIST(MAKE_CASE)
+#undef MAKE_CASE
+    default:
+      UNREACHABLE();
+      return Failure::InternalError();
+  }
+  int size = map->instance_size();
+  AllocationSpace space =
+      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
+  Object* result = Heap::Allocate(map, space);
+  if (result->IsFailure()) return result;
+  Struct::cast(result)->InitializeBody(size);
+  return result;
+}
+
+
+bool Heap::IdleNotification() {
+  static const int kIdlesBeforeScavenge = 4;
+  static const int kIdlesBeforeMarkSweep = 7;
+  static const int kIdlesBeforeMarkCompact = 8;
+  static int number_idle_notifications = 0;
+  static int last_gc_count = gc_count_;
+
+  bool finished = false;
+
+  if (last_gc_count == gc_count_) {
+    number_idle_notifications++;
+  } else {
+    number_idle_notifications = 0;
+    last_gc_count = gc_count_;
+  }
+
+  if (number_idle_notifications == kIdlesBeforeScavenge) {
+    CollectGarbage(0, NEW_SPACE);
+    new_space_.Shrink();
+    last_gc_count = gc_count_;
+
+  } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
+    CollectAllGarbage(false);
+    new_space_.Shrink();
+    last_gc_count = gc_count_;
+
+  } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
+    CollectAllGarbage(true);
+    new_space_.Shrink();
+    last_gc_count = gc_count_;
+    number_idle_notifications = 0;
+    finished = true;
+  }
+
+  // Uncommit unused memory in new space.
+  Heap::UncommitFromSpace();
+  return finished;
+}
+
+
+#ifdef DEBUG
+
+void Heap::Print() {
+  if (!HasBeenSetup()) return;
+  Top::PrintStack();
+  AllSpaces spaces;
+  while (Space* space = spaces.next()) space->Print();
+}
+
+
+void Heap::ReportCodeStatistics(const char* title) {
+  PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
+  PagedSpace::ResetCodeStatistics();
+  // We do not look for code in new space, map space, or old space.  If code
+  // somehow ends up in those spaces, we would miss it here.
+  code_space_->CollectCodeStatistics();
+  lo_space_->CollectCodeStatistics();
+  PagedSpace::ReportCodeStatistics();
+}
+
+
+// This function expects that NewSpace's allocated objects histogram is
+// populated (via a call to CollectStatistics or else as a side effect of a
+// just-completed scavenge collection).
+void Heap::ReportHeapStatistics(const char* title) {
+  USE(title);
+  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
+         title, gc_count_);
+  PrintF("mark-compact GC : %d\n", mc_count_);
+  PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
+  PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
+
+  PrintF("\n");
+  PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
+  GlobalHandles::PrintStats();
+  PrintF("\n");
+
+  PrintF("Heap statistics : ");
+  MemoryAllocator::ReportStatistics();
+  PrintF("To space : ");
+  new_space_.ReportStatistics();
+  PrintF("Old pointer space : ");
+  old_pointer_space_->ReportStatistics();
+  PrintF("Old data space : ");
+  old_data_space_->ReportStatistics();
+  PrintF("Code space : ");
+  code_space_->ReportStatistics();
+  PrintF("Map space : ");
+  map_space_->ReportStatistics();
+  PrintF("Cell space : ");
+  cell_space_->ReportStatistics();
+  PrintF("Large object space : ");
+  lo_space_->ReportStatistics();
+  PrintF(">>>>>> ========================================= >>>>>>\n");
+}
+
+#endif  // DEBUG
+
+bool Heap::Contains(HeapObject* value) {
+  return Contains(value->address());
+}
+
+
+bool Heap::Contains(Address addr) {
+  if (OS::IsOutsideAllocatedSpace(addr)) return false;
+  return HasBeenSetup() &&
+    (new_space_.ToSpaceContains(addr) ||
+     old_pointer_space_->Contains(addr) ||
+     old_data_space_->Contains(addr) ||
+     code_space_->Contains(addr) ||
+     map_space_->Contains(addr) ||
+     cell_space_->Contains(addr) ||
+     lo_space_->SlowContains(addr));
+}
+
+
+bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
+  return InSpace(value->address(), space);
+}
+
+
+bool Heap::InSpace(Address addr, AllocationSpace space) {
+  if (OS::IsOutsideAllocatedSpace(addr)) return false;
+  if (!HasBeenSetup()) return false;
+
+  switch (space) {
+    case NEW_SPACE:
+      return new_space_.ToSpaceContains(addr);
+    case OLD_POINTER_SPACE:
+      return old_pointer_space_->Contains(addr);
+    case OLD_DATA_SPACE:
+      return old_data_space_->Contains(addr);
+    case CODE_SPACE:
+      return code_space_->Contains(addr);
+    case MAP_SPACE:
+      return map_space_->Contains(addr);
+    case CELL_SPACE:
+      return cell_space_->Contains(addr);
+    case LO_SPACE:
+      return lo_space_->SlowContains(addr);
+  }
+
+  return false;
+}
+
+
+#ifdef DEBUG
+void Heap::Verify() {
+  ASSERT(HasBeenSetup());
+
+  VerifyPointersVisitor visitor;
+  IterateRoots(&visitor);
+
+  new_space_.Verify();
+
+  VerifyPointersAndRSetVisitor rset_visitor;
+  old_pointer_space_->Verify(&rset_visitor);
+  map_space_->Verify(&rset_visitor);
+
+  VerifyPointersVisitor no_rset_visitor;
+  old_data_space_->Verify(&no_rset_visitor);
+  code_space_->Verify(&no_rset_visitor);
+  cell_space_->Verify(&no_rset_visitor);
+
+  lo_space_->Verify();
+}
+#endif  // DEBUG
+
+
+Object* Heap::LookupSymbol(Vector<const char> string) {
+  Object* symbol = NULL;
+  Object* new_table = symbol_table()->LookupSymbol(string, &symbol);
+  if (new_table->IsFailure()) return new_table;
+  // Can't use set_symbol_table because SymbolTable::cast knows that
+  // SymbolTable is a singleton and checks for identity.
+  roots_[kSymbolTableRootIndex] = new_table;
+  ASSERT(symbol != NULL);
+  return symbol;
+}
+
+
+Object* Heap::LookupSymbol(String* string) {
+  if (string->IsSymbol()) return string;
+  Object* symbol = NULL;
+  Object* new_table = symbol_table()->LookupString(string, &symbol);
+  if (new_table->IsFailure()) return new_table;
+  // Can't use set_symbol_table because SymbolTable::cast knows that
+  // SymbolTable is a singleton and checks for identity.
+  roots_[kSymbolTableRootIndex] = new_table;
+  ASSERT(symbol != NULL);
+  return symbol;
+}
+
+
+bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
+  if (string->IsSymbol()) {
+    *symbol = string;
+    return true;
+  }
+  return symbol_table()->LookupSymbolIfExists(string, symbol);
+}
+
+
+#ifdef DEBUG
+void Heap::ZapFromSpace() {
+  ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
+  for (Address a = new_space_.FromSpaceLow();
+       a < new_space_.FromSpaceHigh();
+       a += kPointerSize) {
+    Memory::Address_at(a) = kFromSpaceZapValue;
+  }
+}
+#endif  // DEBUG
+
+
+int Heap::IterateRSetRange(Address object_start,
+                           Address object_end,
+                           Address rset_start,
+                           ObjectSlotCallback copy_object_func) {
+  Address object_address = object_start;
+  Address rset_address = rset_start;
+  int set_bits_count = 0;
+
+  // Loop over all the pointers in [object_start, object_end).
+  while (object_address < object_end) {
+    uint32_t rset_word = Memory::uint32_at(rset_address);
+    if (rset_word != 0) {
+      uint32_t result_rset = rset_word;
+      for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
+        // Do not dereference pointers at or past object_end.
+        if ((rset_word & bitmask) != 0 && object_address < object_end) {
+          Object** object_p = reinterpret_cast<Object**>(object_address);
+          if (Heap::InNewSpace(*object_p)) {
+            copy_object_func(reinterpret_cast<HeapObject**>(object_p));
+          }
+          // If this pointer does not need to be remembered anymore, clear
+          // the remembered set bit.
+          if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
+          set_bits_count++;
+        }
+        object_address += kPointerSize;
+      }
+      // Update the remembered set if it has changed.
+      if (result_rset != rset_word) {
+        Memory::uint32_at(rset_address) = result_rset;
+      }
+    } else {
+      // No bits in the word were set.  This is the common case.
+      object_address += kPointerSize * kBitsPerInt;
+    }
+    rset_address += kIntSize;
+  }
+  return set_bits_count;
+}
+
+
+void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
+  ASSERT(Page::is_rset_in_use());
+  ASSERT(space == old_pointer_space_ || space == map_space_);
+
+  static void* paged_rset_histogram = StatsTable::CreateHistogram(
+      "V8.RSetPaged",
+      0,
+      Page::kObjectAreaSize / kPointerSize,
+      30);
+
+  PageIterator it(space, PageIterator::PAGES_IN_USE);
+  while (it.has_next()) {
+    Page* page = it.next();
+    int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
+                                 page->RSetStart(), copy_object_func);
+    if (paged_rset_histogram != NULL) {
+      StatsTable::AddHistogramSample(paged_rset_histogram, count);
+    }
+  }
+}
+
+
+#ifdef DEBUG
+#define SYNCHRONIZE_TAG(tag) v->Synchronize(tag)
+#else
+#define SYNCHRONIZE_TAG(tag)
+#endif
+
+void Heap::IterateRoots(ObjectVisitor* v) {
+  IterateStrongRoots(v);
+  v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
+  SYNCHRONIZE_TAG("symbol_table");
+}
+
+
+void Heap::IterateStrongRoots(ObjectVisitor* v) {
+  v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
+  SYNCHRONIZE_TAG("strong_root_list");
+
+  v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
+  SYNCHRONIZE_TAG("symbol");
+
+  Bootstrapper::Iterate(v);
+  SYNCHRONIZE_TAG("bootstrapper");
+  Top::Iterate(v);
+  SYNCHRONIZE_TAG("top");
+  Relocatable::Iterate(v);
+  SYNCHRONIZE_TAG("relocatable");
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  Debug::Iterate(v);
+#endif
+  SYNCHRONIZE_TAG("debug");
+  CompilationCache::Iterate(v);
+  SYNCHRONIZE_TAG("compilationcache");
+
+  // Iterate over local handles in handle scopes.
+  HandleScopeImplementer::Iterate(v);
+  SYNCHRONIZE_TAG("handlescope");
+
+  // Iterate over the builtin code objects and code stubs in the heap. Note
+  // that it is not strictly necessary to iterate over code objects on
+  // scavenge collections.  We still do it here because this same function
+  // is used by the mark-sweep collector and the deserializer.
+  Builtins::IterateBuiltins(v);
+  SYNCHRONIZE_TAG("builtins");
+
+  // Iterate over global handles.
+  GlobalHandles::IterateRoots(v);
+  SYNCHRONIZE_TAG("globalhandles");
+
+  // Iterate over pointers being held by inactive threads.
+  ThreadManager::Iterate(v);
+  SYNCHRONIZE_TAG("threadmanager");
+}
+#undef SYNCHRONIZE_TAG
+
+
+// Flag is set when the heap has been configured.  The heap can be repeatedly
+// configured through the API until it is setup.
+static bool heap_configured = false;
+
+// TODO(1236194): Since the heap size is configurable on the command line
+// and through the API, we should gracefully handle the case that the heap
+// size is not big enough to fit all the initial objects.
+bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) {
+  if (HasBeenSetup()) return false;
+
+  if (semispace_size > 0) semispace_size_ = semispace_size;
+  if (old_gen_size > 0) old_generation_size_ = old_gen_size;
+
+  // The new space size must be a power of two to support single-bit testing
+  // for containment.
+  semispace_size_ = RoundUpToPowerOf2(semispace_size_);
+  initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_);
+  young_generation_size_ = 2 * semispace_size_;
+  external_allocation_limit_ = 10 * semispace_size_;
+
+  // The old generation is paged.
+  old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize);
+
+  heap_configured = true;
+  return true;
+}
+
+
+bool Heap::ConfigureHeapDefault() {
+  return ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size);
+}
+
+
+int Heap::PromotedSpaceSize() {
+  return old_pointer_space_->Size()
+      + old_data_space_->Size()
+      + code_space_->Size()
+      + map_space_->Size()
+      + cell_space_->Size()
+      + lo_space_->Size();
+}
+
+
+int Heap::PromotedExternalMemorySize() {
+  if (amount_of_external_allocated_memory_
+      <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
+  return amount_of_external_allocated_memory_
+      - amount_of_external_allocated_memory_at_last_global_gc_;
+}
+
+
+bool Heap::Setup(bool create_heap_objects) {
+  // Initialize heap spaces and initial maps and objects. Whenever something
+  // goes wrong, just return false. The caller should check the results and
+  // call Heap::TearDown() to release allocated memory.
+  //
+  // If the heap is not yet configured (eg, through the API), configure it.
+  // Configuration is based on the flags new-space-size (really the semispace
+  // size) and old-space-size if set or the initial values of semispace_size_
+  // and old_generation_size_ otherwise.
+  if (!heap_configured) {
+    if (!ConfigureHeapDefault()) return false;
+  }
+
+  // Setup memory allocator and reserve a chunk of memory for new
+  // space.  The chunk is double the size of the new space to ensure
+  // that we can find a pair of semispaces that are contiguous and
+  // aligned to their size.
+  if (!MemoryAllocator::Setup(MaxCapacity())) return false;
+  void* chunk =
+      MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_);
+  if (chunk == NULL) return false;
+
+  // Align the pair of semispaces to their size, which must be a power
+  // of 2.
+  ASSERT(IsPowerOf2(young_generation_size_));
+  Address new_space_start =
+      RoundUp(reinterpret_cast<byte*>(chunk), young_generation_size_);
+  if (!new_space_.Setup(new_space_start, young_generation_size_)) return false;
+
+  // Initialize old pointer space.
+  old_pointer_space_ =
+      new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
+  if (old_pointer_space_ == NULL) return false;
+  if (!old_pointer_space_->Setup(NULL, 0)) return false;
+
+  // Initialize old data space.
+  old_data_space_ =
+      new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
+  if (old_data_space_ == NULL) return false;
+  if (!old_data_space_->Setup(NULL, 0)) return false;
+
+  // Initialize the code space, set its maximum capacity to the old
+  // generation size. It needs executable memory.
+  // On 64-bit platform(s), we put all code objects in a 2 GB range of
+  // virtual address space, so that they can call each other with near calls.
+  if (code_range_size_ > 0) {
+    if (!CodeRange::Setup(code_range_size_)) {
+      return false;
+    }
+  }
+
+  code_space_ =
+      new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
+  if (code_space_ == NULL) return false;
+  if (!code_space_->Setup(NULL, 0)) return false;
+
+  // Initialize map space.
+  map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
+  if (map_space_ == NULL) return false;
+  if (!map_space_->Setup(NULL, 0)) return false;
+
+  // Initialize global property cell space.
+  cell_space_ = new CellSpace(old_generation_size_, CELL_SPACE);
+  if (cell_space_ == NULL) return false;
+  if (!cell_space_->Setup(NULL, 0)) return false;
+
+  // The large object code space may contain code or data.  We set the memory
+  // to be non-executable here for safety, but this means we need to enable it
+  // explicitly when allocating large code objects.
+  lo_space_ = new LargeObjectSpace(LO_SPACE);
+  if (lo_space_ == NULL) return false;
+  if (!lo_space_->Setup()) return false;
+
+  if (create_heap_objects) {
+    // Create initial maps.
+    if (!CreateInitialMaps()) return false;
+    if (!CreateApiObjects()) return false;
+
+    // Create initial objects
+    if (!CreateInitialObjects()) return false;
+  }
+
+  LOG(IntEvent("heap-capacity", Capacity()));
+  LOG(IntEvent("heap-available", Available()));
+
+  return true;
+}
+
+
+void Heap::SetStackLimit(intptr_t limit) {
+  // On 64 bit machines, pointers are generally out of range of Smis.  We write
+  // something that looks like an out of range Smi to the GC.
+
+  // Set up the special root array entry containing the stack guard.
+  // This is actually an address, but the tag makes the GC ignore it.
+  roots_[kStackLimitRootIndex] =
+    reinterpret_cast<Object*>((limit & ~kSmiTagMask) | kSmiTag);
+}
+
+
+void Heap::TearDown() {
+  GlobalHandles::TearDown();
+
+  new_space_.TearDown();
+
+  if (old_pointer_space_ != NULL) {
+    old_pointer_space_->TearDown();
+    delete old_pointer_space_;
+    old_pointer_space_ = NULL;
+  }
+
+  if (old_data_space_ != NULL) {
+    old_data_space_->TearDown();
+    delete old_data_space_;
+    old_data_space_ = NULL;
+  }
+
+  if (code_space_ != NULL) {
+    code_space_->TearDown();
+    delete code_space_;
+    code_space_ = NULL;
+  }
+
+  if (map_space_ != NULL) {
+    map_space_->TearDown();
+    delete map_space_;
+    map_space_ = NULL;
+  }
+
+  if (cell_space_ != NULL) {
+    cell_space_->TearDown();
+    delete cell_space_;
+    cell_space_ = NULL;
+  }
+
+  if (lo_space_ != NULL) {
+    lo_space_->TearDown();
+    delete lo_space_;
+    lo_space_ = NULL;
+  }
+
+  MemoryAllocator::TearDown();
+}
+
+
+void Heap::Shrink() {
+  // Try to shrink all paged spaces.
+  PagedSpaces spaces;
+  while (PagedSpace* space = spaces.next()) space->Shrink();
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void Heap::Protect() {
+  if (HasBeenSetup()) {
+    AllSpaces spaces;
+    while (Space* space = spaces.next()) space->Protect();
+  }
+}
+
+
+void Heap::Unprotect() {
+  if (HasBeenSetup()) {
+    AllSpaces spaces;
+    while (Space* space = spaces.next()) space->Unprotect();
+  }
+}
+
+#endif
+
+
+#ifdef DEBUG
+
+class PrintHandleVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++)
+      PrintF("  handle %p to %p\n", p, *p);
+  }
+};
+
+void Heap::PrintHandles() {
+  PrintF("Handles:\n");
+  PrintHandleVisitor v;
+  HandleScopeImplementer::Iterate(&v);
+}
+
+#endif
+
+
+Space* AllSpaces::next() {
+  switch (counter_++) {
+    case NEW_SPACE:
+      return Heap::new_space();
+    case OLD_POINTER_SPACE:
+      return Heap::old_pointer_space();
+    case OLD_DATA_SPACE:
+      return Heap::old_data_space();
+    case CODE_SPACE:
+      return Heap::code_space();
+    case MAP_SPACE:
+      return Heap::map_space();
+    case CELL_SPACE:
+      return Heap::cell_space();
+    case LO_SPACE:
+      return Heap::lo_space();
+    default:
+      return NULL;
+  }
+}
+
+
+PagedSpace* PagedSpaces::next() {
+  switch (counter_++) {
+    case OLD_POINTER_SPACE:
+      return Heap::old_pointer_space();
+    case OLD_DATA_SPACE:
+      return Heap::old_data_space();
+    case CODE_SPACE:
+      return Heap::code_space();
+    case MAP_SPACE:
+      return Heap::map_space();
+    case CELL_SPACE:
+      return Heap::cell_space();
+    default:
+      return NULL;
+  }
+}
+
+
+
+OldSpace* OldSpaces::next() {
+  switch (counter_++) {
+    case OLD_POINTER_SPACE:
+      return Heap::old_pointer_space();
+    case OLD_DATA_SPACE:
+      return Heap::old_data_space();
+    case CODE_SPACE:
+      return Heap::code_space();
+    default:
+      return NULL;
+  }
+}
+
+
+SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
+}
+
+
+SpaceIterator::~SpaceIterator() {
+  // Delete active iterator if any.
+  delete iterator_;
+}
+
+
+bool SpaceIterator::has_next() {
+  // Iterate until no more spaces.
+  return current_space_ != LAST_SPACE;
+}
+
+
+ObjectIterator* SpaceIterator::next() {
+  if (iterator_ != NULL) {
+    delete iterator_;
+    iterator_ = NULL;
+    // Move to the next space
+    current_space_++;
+    if (current_space_ > LAST_SPACE) {
+      return NULL;
+    }
+  }
+
+  // Return iterator for the new current space.
+  return CreateIterator();
+}
+
+
+// Create an iterator for the space to iterate.
+ObjectIterator* SpaceIterator::CreateIterator() {
+  ASSERT(iterator_ == NULL);
+
+  switch (current_space_) {
+    case NEW_SPACE:
+      iterator_ = new SemiSpaceIterator(Heap::new_space());
+      break;
+    case OLD_POINTER_SPACE:
+      iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
+      break;
+    case OLD_DATA_SPACE:
+      iterator_ = new HeapObjectIterator(Heap::old_data_space());
+      break;
+    case CODE_SPACE:
+      iterator_ = new HeapObjectIterator(Heap::code_space());
+      break;
+    case MAP_SPACE:
+      iterator_ = new HeapObjectIterator(Heap::map_space());
+      break;
+    case CELL_SPACE:
+      iterator_ = new HeapObjectIterator(Heap::cell_space());
+      break;
+    case LO_SPACE:
+      iterator_ = new LargeObjectIterator(Heap::lo_space());
+      break;
+  }
+
+  // Return the newly allocated iterator;
+  ASSERT(iterator_ != NULL);
+  return iterator_;
+}
+
+
+HeapIterator::HeapIterator() {
+  Init();
+}
+
+
+HeapIterator::~HeapIterator() {
+  Shutdown();
+}
+
+
+void HeapIterator::Init() {
+  // Start the iteration.
+  space_iterator_ = new SpaceIterator();
+  object_iterator_ = space_iterator_->next();
+}
+
+
+void HeapIterator::Shutdown() {
+  // Make sure the last iterator is deallocated.
+  delete space_iterator_;
+  space_iterator_ = NULL;
+  object_iterator_ = NULL;
+}
+
+
+bool HeapIterator::has_next() {
+  // No iterator means we are done.
+  if (object_iterator_ == NULL) return false;
+
+  if (object_iterator_->has_next_object()) {
+    // If the current iterator has more objects we are fine.
+    return true;
+  } else {
+    // Go though the spaces looking for one that has objects.
+    while (space_iterator_->has_next()) {
+      object_iterator_ = space_iterator_->next();
+      if (object_iterator_->has_next_object()) {
+        return true;
+      }
+    }
+  }
+  // Done with the last space.
+  object_iterator_ = NULL;
+  return false;
+}
+
+
+HeapObject* HeapIterator::next() {
+  if (has_next()) {
+    return object_iterator_->next_object();
+  } else {
+    return NULL;
+  }
+}
+
+
+void HeapIterator::reset() {
+  // Restart the iterator.
+  Shutdown();
+  Init();
+}
+
+
+#ifdef DEBUG
+
+static bool search_for_any_global;
+static Object* search_target;
+static bool found_target;
+static List<Object*> object_stack(20);
+
+
+// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
+static const int kMarkTag = 2;
+
+static void MarkObjectRecursively(Object** p);
+class MarkObjectVisitor : public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    // Copy all HeapObject pointers in [start, end)
+    for (Object** p = start; p < end; p++) {
+      if ((*p)->IsHeapObject())
+        MarkObjectRecursively(p);
+    }
+  }
+};
+
+static MarkObjectVisitor mark_visitor;
+
+static void MarkObjectRecursively(Object** p) {
+  if (!(*p)->IsHeapObject()) return;
+
+  HeapObject* obj = HeapObject::cast(*p);
+
+  Object* map = obj->map();
+
+  if (!map->IsHeapObject()) return;  // visited before
+
+  if (found_target) return;  // stop if target found
+  object_stack.Add(obj);
+  if ((search_for_any_global && obj->IsJSGlobalObject()) ||
+      (!search_for_any_global && (obj == search_target))) {
+    found_target = true;
+    return;
+  }
+
+  // not visited yet
+  Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
+
+  Address map_addr = map_p->address();
+
+  obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+
+  MarkObjectRecursively(&map);
+
+  obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
+                   &mark_visitor);
+
+  if (!found_target)  // don't pop if found the target
+    object_stack.RemoveLast();
+}
+
+
+static void UnmarkObjectRecursively(Object** p);
+class UnmarkObjectVisitor : public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    // Copy all HeapObject pointers in [start, end)
+    for (Object** p = start; p < end; p++) {
+      if ((*p)->IsHeapObject())
+        UnmarkObjectRecursively(p);
+    }
+  }
+};
+
+static UnmarkObjectVisitor unmark_visitor;
+
+static void UnmarkObjectRecursively(Object** p) {
+  if (!(*p)->IsHeapObject()) return;
+
+  HeapObject* obj = HeapObject::cast(*p);
+
+  Object* map = obj->map();
+
+  if (map->IsHeapObject()) return;  // unmarked already
+
+  Address map_addr = reinterpret_cast<Address>(map);
+
+  map_addr -= kMarkTag;
+
+  ASSERT_TAG_ALIGNED(map_addr);
+
+  HeapObject* map_p = HeapObject::FromAddress(map_addr);
+
+  obj->set_map(reinterpret_cast<Map*>(map_p));
+
+  UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
+
+  obj->IterateBody(Map::cast(map_p)->instance_type(),
+                   obj->SizeFromMap(Map::cast(map_p)),
+                   &unmark_visitor);
+}
+
+
+static void MarkRootObjectRecursively(Object** root) {
+  if (search_for_any_global) {
+    ASSERT(search_target == NULL);
+  } else {
+    ASSERT(search_target->IsHeapObject());
+  }
+  found_target = false;
+  object_stack.Clear();
+
+  MarkObjectRecursively(root);
+  UnmarkObjectRecursively(root);
+
+  if (found_target) {
+    PrintF("=====================================\n");
+    PrintF("====        Path to object       ====\n");
+    PrintF("=====================================\n\n");
+
+    ASSERT(!object_stack.is_empty());
+    for (int i = 0; i < object_stack.length(); i++) {
+      if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
+      Object* obj = object_stack[i];
+      obj->Print();
+    }
+    PrintF("=====================================\n");
+  }
+}
+
+
+// Helper class for visiting HeapObjects recursively.
+class MarkRootVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    // Visit all HeapObject pointers in [start, end)
+    for (Object** p = start; p < end; p++) {
+      if ((*p)->IsHeapObject())
+        MarkRootObjectRecursively(p);
+    }
+  }
+};
+
+
+// Triggers a depth-first traversal of reachable objects from roots
+// and finds a path to a specific heap object and prints it.
+void Heap::TracePathToObject() {
+  search_target = NULL;
+  search_for_any_global = false;
+
+  MarkRootVisitor root_visitor;
+  IterateRoots(&root_visitor);
+}
+
+
+// Triggers a depth-first traversal of reachable objects from roots
+// and finds a path to any global object and prints it. Useful for
+// determining the source for leaks of global objects.
+void Heap::TracePathToGlobal() {
+  search_target = NULL;
+  search_for_any_global = true;
+
+  MarkRootVisitor root_visitor;
+  IterateRoots(&root_visitor);
+}
+#endif
+
+
+GCTracer::GCTracer()
+    : start_time_(0.0),
+      start_size_(0.0),
+      gc_count_(0),
+      full_gc_count_(0),
+      is_compacting_(false),
+      marked_count_(0) {
+  // These two fields reflect the state of the previous full collection.
+  // Set them before they are changed by the collector.
+  previous_has_compacted_ = MarkCompactCollector::HasCompacted();
+  previous_marked_count_ = MarkCompactCollector::previous_marked_count();
+  if (!FLAG_trace_gc) return;
+  start_time_ = OS::TimeCurrentMillis();
+  start_size_ = SizeOfHeapObjects();
+}
+
+
+GCTracer::~GCTracer() {
+  if (!FLAG_trace_gc) return;
+  // Printf ONE line iff flag is set.
+  PrintF("%s %.1f -> %.1f MB, %d ms.\n",
+         CollectorString(),
+         start_size_, SizeOfHeapObjects(),
+         static_cast<int>(OS::TimeCurrentMillis() - start_time_));
+
+#if defined(ENABLE_LOGGING_AND_PROFILING)
+  Heap::PrintShortHeapStatistics();
+#endif
+}
+
+
+const char* GCTracer::CollectorString() {
+  switch (collector_) {
+    case SCAVENGER:
+      return "Scavenge";
+    case MARK_COMPACTOR:
+      return MarkCompactCollector::HasCompacted() ? "Mark-compact"
+                                                  : "Mark-sweep";
+  }
+  return "Unknown GC";
+}
+
+
+int KeyedLookupCache::Hash(Map* map, String* name) {
+  // Uses only lower 32 bits if pointers are larger.
+  uintptr_t addr_hash =
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> 2;
+  return (addr_hash ^ name->Hash()) % kLength;
+}
+
+
+int KeyedLookupCache::Lookup(Map* map, String* name) {
+  int index = Hash(map, name);
+  Key& key = keys_[index];
+  if ((key.map == map) && key.name->Equals(name)) {
+    return field_offsets_[index];
+  }
+  return -1;
+}
+
+
+void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
+  String* symbol;
+  if (Heap::LookupSymbolIfExists(name, &symbol)) {
+    int index = Hash(map, symbol);
+    Key& key = keys_[index];
+    key.map = map;
+    key.name = symbol;
+    field_offsets_[index] = field_offset;
+  }
+}
+
+
+void KeyedLookupCache::Clear() {
+  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
+}
+
+
+KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
+
+
+int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
+
+
+void DescriptorLookupCache::Clear() {
+  for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
+}
+
+
+DescriptorLookupCache::Key
+DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
+
+int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
+
+
+#ifdef DEBUG
+bool Heap::GarbageCollectionGreedyCheck() {
+  ASSERT(FLAG_gc_greedy);
+  if (Bootstrapper::IsActive()) return true;
+  if (disallow_allocation_failure()) return true;
+  return CollectGarbage(0, NEW_SPACE);
+}
+#endif
+
+
+TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
+  : type_(t) {
+  uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
+  uint32_t in1 = 0xffffffffu;  // generated by the FPU.
+  for (int i = 0; i < kCacheSize; i++) {
+    elements_[i].in[0] = in0;
+    elements_[i].in[1] = in1;
+    elements_[i].output = NULL;
+  }
+}
+
+
+TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
+
+
+void TranscendentalCache::Clear() {
+  for (int i = 0; i < kNumberOfCaches; i++) {
+    if (caches_[i] != NULL) {
+      delete caches_[i];
+      caches_[i] = NULL;
+    }
+  }
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/heap.h b/src/heap.h
new file mode 100644
index 0000000..e878efc
--- /dev/null
+++ b/src/heap.h
@@ -0,0 +1,1601 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HEAP_H_
+#define V8_HEAP_H_
+
+#include <math.h>
+
+#include "zone-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+// Defines all the roots in Heap.
+#define UNCONDITIONAL_STRONG_ROOT_LIST(V)                                      \
+  /* Cluster the most popular ones in a few cache lines here at the top. */    \
+  V(Smi, stack_limit, StackLimit)                                              \
+  V(Object, undefined_value, UndefinedValue)                                   \
+  V(Object, the_hole_value, TheHoleValue)                                      \
+  V(Object, null_value, NullValue)                                             \
+  V(Object, true_value, TrueValue)                                             \
+  V(Object, false_value, FalseValue)                                           \
+  V(Map, heap_number_map, HeapNumberMap)                                       \
+  V(Map, global_context_map, GlobalContextMap)                                 \
+  V(Map, fixed_array_map, FixedArrayMap)                                       \
+  V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel)       \
+  V(Map, meta_map, MetaMap)                                                    \
+  V(Object, termination_exception, TerminationException)                       \
+  V(Map, hash_table_map, HashTableMap)                                         \
+  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
+  V(Map, short_string_map, ShortStringMap)                                     \
+  V(Map, medium_string_map, MediumStringMap)                                   \
+  V(Map, long_string_map, LongStringMap)                                       \
+  V(Map, short_ascii_string_map, ShortAsciiStringMap)                          \
+  V(Map, medium_ascii_string_map, MediumAsciiStringMap)                        \
+  V(Map, long_ascii_string_map, LongAsciiStringMap)                            \
+  V(Map, short_symbol_map, ShortSymbolMap)                                     \
+  V(Map, medium_symbol_map, MediumSymbolMap)                                   \
+  V(Map, long_symbol_map, LongSymbolMap)                                       \
+  V(Map, short_ascii_symbol_map, ShortAsciiSymbolMap)                          \
+  V(Map, medium_ascii_symbol_map, MediumAsciiSymbolMap)                        \
+  V(Map, long_ascii_symbol_map, LongAsciiSymbolMap)                            \
+  V(Map, short_cons_symbol_map, ShortConsSymbolMap)                            \
+  V(Map, medium_cons_symbol_map, MediumConsSymbolMap)                          \
+  V(Map, long_cons_symbol_map, LongConsSymbolMap)                              \
+  V(Map, short_cons_ascii_symbol_map, ShortConsAsciiSymbolMap)                 \
+  V(Map, medium_cons_ascii_symbol_map, MediumConsAsciiSymbolMap)               \
+  V(Map, long_cons_ascii_symbol_map, LongConsAsciiSymbolMap)                   \
+  V(Map, short_sliced_symbol_map, ShortSlicedSymbolMap)                        \
+  V(Map, medium_sliced_symbol_map, MediumSlicedSymbolMap)                      \
+  V(Map, long_sliced_symbol_map, LongSlicedSymbolMap)                          \
+  V(Map, short_sliced_ascii_symbol_map, ShortSlicedAsciiSymbolMap)             \
+  V(Map, medium_sliced_ascii_symbol_map, MediumSlicedAsciiSymbolMap)           \
+  V(Map, long_sliced_ascii_symbol_map, LongSlicedAsciiSymbolMap)               \
+  V(Map, short_external_symbol_map, ShortExternalSymbolMap)                    \
+  V(Map, medium_external_symbol_map, MediumExternalSymbolMap)                  \
+  V(Map, long_external_symbol_map, LongExternalSymbolMap)                      \
+  V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap)         \
+  V(Map, medium_external_ascii_symbol_map, MediumExternalAsciiSymbolMap)       \
+  V(Map, long_external_ascii_symbol_map, LongExternalAsciiSymbolMap)           \
+  V(Map, short_cons_string_map, ShortConsStringMap)                            \
+  V(Map, medium_cons_string_map, MediumConsStringMap)                          \
+  V(Map, long_cons_string_map, LongConsStringMap)                              \
+  V(Map, short_cons_ascii_string_map, ShortConsAsciiStringMap)                 \
+  V(Map, medium_cons_ascii_string_map, MediumConsAsciiStringMap)               \
+  V(Map, long_cons_ascii_string_map, LongConsAsciiStringMap)                   \
+  V(Map, short_sliced_string_map, ShortSlicedStringMap)                        \
+  V(Map, medium_sliced_string_map, MediumSlicedStringMap)                      \
+  V(Map, long_sliced_string_map, LongSlicedStringMap)                          \
+  V(Map, short_sliced_ascii_string_map, ShortSlicedAsciiStringMap)             \
+  V(Map, medium_sliced_ascii_string_map, MediumSlicedAsciiStringMap)           \
+  V(Map, long_sliced_ascii_string_map, LongSlicedAsciiStringMap)               \
+  V(Map, short_external_string_map, ShortExternalStringMap)                    \
+  V(Map, medium_external_string_map, MediumExternalStringMap)                  \
+  V(Map, long_external_string_map, LongExternalStringMap)                      \
+  V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap)         \
+  V(Map, medium_external_ascii_string_map, MediumExternalAsciiStringMap)       \
+  V(Map, long_external_ascii_string_map, LongExternalAsciiStringMap)           \
+  V(Map, undetectable_short_string_map, UndetectableShortStringMap)            \
+  V(Map, undetectable_medium_string_map, UndetectableMediumStringMap)          \
+  V(Map, undetectable_long_string_map, UndetectableLongStringMap)              \
+  V(Map, undetectable_short_ascii_string_map, UndetectableShortAsciiStringMap) \
+  V(Map,                                                                       \
+    undetectable_medium_ascii_string_map,                                      \
+    UndetectableMediumAsciiStringMap)                                          \
+  V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap)   \
+  V(Map, byte_array_map, ByteArrayMap)                                         \
+  V(Map, pixel_array_map, PixelArrayMap)                                       \
+  V(Map, context_map, ContextMap)                                              \
+  V(Map, catch_context_map, CatchContextMap)                                   \
+  V(Map, code_map, CodeMap)                                                    \
+  V(Map, oddball_map, OddballMap)                                              \
+  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
+  V(Map, boilerplate_function_map, BoilerplateFunctionMap)                     \
+  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
+  V(Map, proxy_map, ProxyMap)                                                  \
+  V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
+  V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
+  V(Object, nan_value, NanValue)                                               \
+  V(Object, minus_zero_value, MinusZeroValue)                                  \
+  V(String, empty_string, EmptyString)                                         \
+  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
+  V(Map, neander_map, NeanderMap)                                              \
+  V(JSObject, message_listeners, MessageListeners)                             \
+  V(Proxy, prototype_accessors, PrototypeAccessors)                            \
+  V(NumberDictionary, code_stubs, CodeStubs)                                   \
+  V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache)              \
+  V(Code, js_entry_code, JsEntryCode)                                          \
+  V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
+  V(Code, c_entry_code, CEntryCode)                                            \
+  V(Code, c_entry_debug_break_code, CEntryDebugBreakCode)                      \
+  V(FixedArray, number_string_cache, NumberStringCache)                        \
+  V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
+  V(FixedArray, natives_source_cache, NativesSourceCache)                      \
+  V(Object, last_script_id, LastScriptId)                                      \
+
+#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+#define STRONG_ROOT_LIST(V)                                                    \
+  UNCONDITIONAL_STRONG_ROOT_LIST(V)                                            \
+  V(Code, re_c_entry_code, RegExpCEntryCode)
+#else
+#define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V)
+#endif
+
+#define ROOT_LIST(V)                                  \
+  STRONG_ROOT_LIST(V)                                 \
+  V(SymbolTable, symbol_table, SymbolTable)
+
+#define SYMBOL_LIST(V)                                                   \
+  V(Array_symbol, "Array")                                               \
+  V(Object_symbol, "Object")                                             \
+  V(Proto_symbol, "__proto__")                                           \
+  V(StringImpl_symbol, "StringImpl")                                     \
+  V(arguments_symbol, "arguments")                                       \
+  V(Arguments_symbol, "Arguments")                                       \
+  V(arguments_shadow_symbol, ".arguments")                               \
+  V(call_symbol, "call")                                                 \
+  V(apply_symbol, "apply")                                               \
+  V(caller_symbol, "caller")                                             \
+  V(boolean_symbol, "boolean")                                           \
+  V(Boolean_symbol, "Boolean")                                           \
+  V(callee_symbol, "callee")                                             \
+  V(constructor_symbol, "constructor")                                   \
+  V(code_symbol, ".code")                                                \
+  V(result_symbol, ".result")                                            \
+  V(catch_var_symbol, ".catch-var")                                      \
+  V(empty_symbol, "")                                                    \
+  V(eval_symbol, "eval")                                                 \
+  V(function_symbol, "function")                                         \
+  V(length_symbol, "length")                                             \
+  V(name_symbol, "name")                                                 \
+  V(number_symbol, "number")                                             \
+  V(Number_symbol, "Number")                                             \
+  V(RegExp_symbol, "RegExp")                                             \
+  V(object_symbol, "object")                                             \
+  V(prototype_symbol, "prototype")                                       \
+  V(string_symbol, "string")                                             \
+  V(String_symbol, "String")                                             \
+  V(Date_symbol, "Date")                                                 \
+  V(this_symbol, "this")                                                 \
+  V(to_string_symbol, "toString")                                        \
+  V(char_at_symbol, "CharAt")                                            \
+  V(undefined_symbol, "undefined")                                       \
+  V(value_of_symbol, "valueOf")                                          \
+  V(InitializeVarGlobal_symbol, "InitializeVarGlobal")                   \
+  V(InitializeConstGlobal_symbol, "InitializeConstGlobal")               \
+  V(stack_overflow_symbol, "kStackOverflowBoilerplate")                  \
+  V(illegal_access_symbol, "illegal access")                             \
+  V(out_of_memory_symbol, "out-of-memory")                               \
+  V(illegal_execution_state_symbol, "illegal execution state")           \
+  V(get_symbol, "get")                                                   \
+  V(set_symbol, "set")                                                   \
+  V(function_class_symbol, "Function")                                   \
+  V(illegal_argument_symbol, "illegal argument")                         \
+  V(MakeReferenceError_symbol, "MakeReferenceError")                     \
+  V(MakeSyntaxError_symbol, "MakeSyntaxError")                           \
+  V(MakeTypeError_symbol, "MakeTypeError")                               \
+  V(invalid_lhs_in_assignment_symbol, "invalid_lhs_in_assignment")       \
+  V(invalid_lhs_in_for_in_symbol, "invalid_lhs_in_for_in")               \
+  V(invalid_lhs_in_postfix_op_symbol, "invalid_lhs_in_postfix_op")       \
+  V(invalid_lhs_in_prefix_op_symbol, "invalid_lhs_in_prefix_op")         \
+  V(illegal_return_symbol, "illegal_return")                             \
+  V(illegal_break_symbol, "illegal_break")                               \
+  V(illegal_continue_symbol, "illegal_continue")                         \
+  V(unknown_label_symbol, "unknown_label")                               \
+  V(redeclaration_symbol, "redeclaration")                               \
+  V(failure_symbol, "<failure>")                                         \
+  V(space_symbol, " ")                                                   \
+  V(exec_symbol, "exec")                                                 \
+  V(zero_symbol, "0")                                                    \
+  V(global_eval_symbol, "GlobalEval")                                    \
+  V(identity_hash_symbol, "v8::IdentityHash")
+
+
+// Forward declaration of the GCTracer class.
+class GCTracer;
+
+
+// The all static Heap captures the interface to the global object heap.
+// All JavaScript contexts by this process share the same object heap.
+
+class Heap : public AllStatic {
+ public:
+  // Configure heap size before setup. Return false if the heap has been
+  // setup already.
+  static bool ConfigureHeap(int semispace_size, int old_gen_size);
+  static bool ConfigureHeapDefault();
+
+  // Initializes the global object heap. If create_heap_objects is true,
+  // also creates the basic non-mutable objects.
+  // Returns whether it succeeded.
+  static bool Setup(bool create_heap_objects);
+
+  // Destroys all memory allocated by the heap.
+  static void TearDown();
+
+  // Sets the stack limit in the roots_ array.  Some architectures generate code
+  // that looks here, because it is faster than loading from the static jslimit_
+  // variable.
+  static void SetStackLimit(intptr_t limit);
+
+  // Returns whether Setup has been called.
+  static bool HasBeenSetup();
+
+  // Returns the maximum heap capacity.
+  static int MaxCapacity() {
+    return young_generation_size_ + old_generation_size_;
+  }
+  static int SemiSpaceSize() { return semispace_size_; }
+  static int InitialSemiSpaceSize() { return initial_semispace_size_; }
+  static int YoungGenerationSize() { return young_generation_size_; }
+  static int OldGenerationSize() { return old_generation_size_; }
+
+  // Returns the capacity of the heap in bytes w/o growing. Heap grows when
+  // more spaces are needed until it reaches the limit.
+  static int Capacity();
+
+  // Returns the available bytes in space w/o growing.
+  // Heap doesn't guarantee that it can allocate an object that requires
+  // all available bytes. Check MaxHeapObjectSize() instead.
+  static int Available();
+
+  // Returns the maximum object size in paged space.
+  static inline int MaxObjectSizeInPagedSpace();
+
+  // Returns of size of all objects residing in the heap.
+  static int SizeOfObjects();
+
+  // Return the starting address and a mask for the new space.  And-masking an
+  // address with the mask will result in the start address of the new space
+  // for all addresses in either semispace.
+  static Address NewSpaceStart() { return new_space_.start(); }
+  static uintptr_t NewSpaceMask() { return new_space_.mask(); }
+  static Address NewSpaceTop() { return new_space_.top(); }
+
+  static NewSpace* new_space() { return &new_space_; }
+  static OldSpace* old_pointer_space() { return old_pointer_space_; }
+  static OldSpace* old_data_space() { return old_data_space_; }
+  static OldSpace* code_space() { return code_space_; }
+  static MapSpace* map_space() { return map_space_; }
+  static CellSpace* cell_space() { return cell_space_; }
+  static LargeObjectSpace* lo_space() { return lo_space_; }
+
+  static bool always_allocate() { return always_allocate_scope_depth_ != 0; }
+  static Address always_allocate_scope_depth_address() {
+    return reinterpret_cast<Address>(&always_allocate_scope_depth_);
+  }
+
+  static Address* NewSpaceAllocationTopAddress() {
+    return new_space_.allocation_top_address();
+  }
+  static Address* NewSpaceAllocationLimitAddress() {
+    return new_space_.allocation_limit_address();
+  }
+
+  // Uncommit unused semi space.
+  static bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+
+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect the heap by marking all spaces read-only/writable.
+  static void Protect();
+  static void Unprotect();
+#endif
+
+  // Allocates and initializes a new JavaScript object based on a
+  // constructor.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateJSObject(JSFunction* constructor,
+                                  PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocates and initializes a new global object based on a constructor.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateGlobalObject(JSFunction* constructor);
+
+  // Returns a deep copy of the JavaScript object.
+  // Properties and elements are copied too.
+  // Returns failure if allocation failed.
+  static Object* CopyJSObject(JSObject* source);
+
+  // Allocates the function prototype.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateFunctionPrototype(JSFunction* function);
+
+  // Reinitialize an JSGlobalProxy based on a constructor.  The object
+  // must have the same size as objects allocated using the
+  // constructor.  The object is reinitialized and behaves as an
+  // object that has been freshly allocated using the constructor.
+  static Object* ReinitializeJSGlobalProxy(JSFunction* constructor,
+                                           JSGlobalProxy* global);
+
+  // Allocates and initializes a new JavaScript object based on a map.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateJSObjectFromMap(Map* map,
+                                         PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocates a heap object based on the map.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this function does not perform a garbage collection.
+  static Object* Allocate(Map* map, AllocationSpace space);
+
+  // Allocates a JS Map in the heap.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this function does not perform a garbage collection.
+  static Object* AllocateMap(InstanceType instance_type, int instance_size);
+
+  // Allocates a partial map for bootstrapping.
+  static Object* AllocatePartialMap(InstanceType instance_type,
+                                    int instance_size);
+
+  // Allocate a map for the specified function
+  static Object* AllocateInitialMap(JSFunction* fun);
+
+  // Allocates and fully initializes a String.  There are two String
+  // encodings: ASCII and two byte. One should choose between the three string
+  // allocation functions based on the encoding of the string buffer used to
+  // initialized the string.
+  //   - ...FromAscii initializes the string from a buffer that is ASCII
+  //     encoded (it does not check that the buffer is ASCII encoded) and the
+  //     result will be ASCII encoded.
+  //   - ...FromUTF8 initializes the string from a buffer that is UTF-8
+  //     encoded.  If the characters are all single-byte characters, the
+  //     result will be ASCII encoded, otherwise it will converted to two
+  //     byte.
+  //   - ...FromTwoByte initializes the string from a buffer that is two-byte
+  //     encoded.  If the characters are all single-byte characters, the
+  //     result will be converted to ASCII, otherwise it will be left as
+  //     two-byte.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateStringFromAscii(
+      Vector<const char> str,
+      PretenureFlag pretenure = NOT_TENURED);
+  static Object* AllocateStringFromUtf8(
+      Vector<const char> str,
+      PretenureFlag pretenure = NOT_TENURED);
+  static Object* AllocateStringFromTwoByte(
+      Vector<const uc16> str,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocates a symbol in old space based on the character stream.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this function does not perform a garbage collection.
+  static inline Object* AllocateSymbol(Vector<const char> str,
+                                       int chars,
+                                       uint32_t length_field);
+
+  static Object* AllocateInternalSymbol(unibrow::CharacterStream* buffer,
+                                        int chars,
+                                        uint32_t length_field);
+
+  static Object* AllocateExternalSymbol(Vector<const char> str,
+                                        int chars);
+
+
+  // Allocates and partially initializes a String.  There are two String
+  // encodings: ASCII and two byte.  These functions allocate a string of the
+  // given length and set its map and length fields.  The characters of the
+  // string are uninitialized.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateRawAsciiString(
+      int length,
+      PretenureFlag pretenure = NOT_TENURED);
+  static Object* AllocateRawTwoByteString(
+      int length,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  // Computes a single character string where the character has code.
+  // A cache is used for ascii codes.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed. Please note this does not perform a garbage collection.
+  static Object* LookupSingleCharacterStringFromCode(uint16_t code);
+
+  // Allocate a byte array of the specified length
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateByteArray(int length, PretenureFlag pretenure);
+
+  // Allocate a non-tenured byte array of the specified length
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateByteArray(int length);
+
+  // Allocate a pixel array of the specified length
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocatePixelArray(int length,
+                                    uint8_t* external_pointer,
+                                    PretenureFlag pretenure);
+
+  // Allocate a tenured JS global property cell.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateJSGlobalPropertyCell(Object* value);
+
+  // Allocates a fixed array initialized with undefined values
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateFixedArray(int length, PretenureFlag pretenure);
+  // Allocate uninitialized, non-tenured fixed array with length elements.
+  static Object* AllocateFixedArray(int length);
+
+  // Make a copy of src and return it. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  static Object* CopyFixedArray(FixedArray* src);
+
+  // Allocates a fixed array initialized with the hole values.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateFixedArrayWithHoles(int length);
+
+  // AllocateHashTable is identical to AllocateFixedArray except
+  // that the resulting object has hash_table_map as map.
+  static Object* AllocateHashTable(int length);
+
+  // Allocate a global (but otherwise uninitialized) context.
+  static Object* AllocateGlobalContext();
+
+  // Allocate a function context.
+  static Object* AllocateFunctionContext(int length, JSFunction* closure);
+
+  // Allocate a 'with' context.
+  static Object* AllocateWithContext(Context* previous,
+                                     JSObject* extension,
+                                     bool is_catch_context);
+
+  // Allocates a new utility object in the old generation.
+  static Object* AllocateStruct(InstanceType type);
+
+  // Allocates a function initialized with a shared part.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateFunction(Map* function_map,
+                                  SharedFunctionInfo* shared,
+                                  Object* prototype);
+
+  // Indicies for direct access into argument objects.
+  static const int arguments_callee_index = 0;
+  static const int arguments_length_index = 1;
+
+  // Allocates an arguments object - optionally with an elements array.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateArgumentsObject(Object* callee, int length);
+
+  // Converts a double into either a Smi or a HeapNumber object.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* NewNumberFromDouble(double value,
+                                     PretenureFlag pretenure = NOT_TENURED);
+
+  // Same as NewNumberFromDouble, but may return a preallocated/immutable
+  // number object (e.g., minus_zero_value_, nan_value_)
+  static Object* NumberFromDouble(double value,
+                                  PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocated a HeapNumber from value.
+  static Object* AllocateHeapNumber(double value, PretenureFlag pretenure);
+  static Object* AllocateHeapNumber(double value);  // pretenure = NOT_TENURED
+
+  // Converts an int into either a Smi or a HeapNumber object.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static inline Object* NumberFromInt32(int32_t value);
+
+  // Converts an int into either a Smi or a HeapNumber object.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static inline Object* NumberFromUint32(uint32_t value);
+
+  // Allocates a new proxy object.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateProxy(Address proxy,
+                               PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocates a new SharedFunctionInfo object.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateSharedFunctionInfo(Object* name);
+
+  // Allocates a new cons string object.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateConsString(String* first, String* second);
+
+  // Allocates a new sliced string object which is a slice of an underlying
+  // string buffer stretching from the index start (inclusive) to the index
+  // end (exclusive).
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateSlicedString(String* buffer,
+                                      int start,
+                                      int end);
+
+  // Allocates a new sub string object which is a substring of an underlying
+  // string buffer stretching from the index start (inclusive) to the index
+  // end (exclusive).
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateSubString(String* buffer,
+                                   int start,
+                                   int end);
+
+  // Allocate a new external string object, which is backed by a string
+  // resource that resides outside the V8 heap.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateExternalStringFromAscii(
+      ExternalAsciiString::Resource* resource);
+  static Object* AllocateExternalStringFromTwoByte(
+      ExternalTwoByteString::Resource* resource);
+
+  // Allocates an uninitialized object.  The memory is non-executable if the
+  // hardware and OS allow.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this function does not perform a garbage collection.
+  static inline Object* AllocateRaw(int size_in_bytes,
+                                    AllocationSpace space,
+                                    AllocationSpace retry_space);
+
+  // Initialize a filler object to keep the ability to iterate over the heap
+  // when shortening objects.
+  static void CreateFillerObjectAt(Address addr, int size);
+
+  // Makes a new native code object
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed. On success, the pointer to the Code object is stored in the
+  // self_reference. This allows generated code to reference its own Code
+  // object by containing this pointer.
+  // Please note this function does not perform a garbage collection.
+  static Object* CreateCode(const CodeDesc& desc,
+                            ZoneScopeInfo* sinfo,
+                            Code::Flags flags,
+                            Handle<Object> self_reference);
+
+  static Object* CopyCode(Code* code);
+  // Finds the symbol for string in the symbol table.
+  // If not found, a new symbol is added to the table and returned.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
+  // failed.
+  // Please note this function does not perform a garbage collection.
+  static Object* LookupSymbol(Vector<const char> str);
+  static Object* LookupAsciiSymbol(const char* str) {
+    return LookupSymbol(CStrVector(str));
+  }
+  static Object* LookupSymbol(String* str);
+  static bool LookupSymbolIfExists(String* str, String** symbol);
+
+  // Compute the matching symbol map for a string if possible.
+  // NULL is returned if string is in new space or not flattened.
+  static Map* SymbolMapForString(String* str);
+
+  // Converts the given boolean condition to JavaScript boolean value.
+  static Object* ToBoolean(bool condition) {
+    return condition ? true_value() : false_value();
+  }
+
+  // Code that should be run before and after each GC.  Includes some
+  // reporting/verification activities when compiled with DEBUG set.
+  static void GarbageCollectionPrologue();
+  static void GarbageCollectionEpilogue();
+
+  // Code that should be executed after the garbage collection proper.
+  static void PostGarbageCollectionProcessing();
+
+  // Performs garbage collection operation.
+  // Returns whether required_space bytes are available after the collection.
+  static bool CollectGarbage(int required_space, AllocationSpace space);
+
+  // Performs a full garbage collection. Force compaction if the
+  // parameter is true.
+  static void CollectAllGarbage(bool force_compaction);
+
+  // Performs a full garbage collection if a context has been disposed
+  // since the last time the check was performed.
+  static void CollectAllGarbageIfContextDisposed();
+
+  // Notify the heap that a context has been disposed.
+  static void NotifyContextDisposed();
+
+  // Utility to invoke the scavenger. This is needed in test code to
+  // ensure correct callback for weak global handles.
+  static void PerformScavenge();
+
+#ifdef DEBUG
+  // Utility used with flag gc-greedy.
+  static bool GarbageCollectionGreedyCheck();
+#endif
+
+  static void SetGlobalGCPrologueCallback(GCCallback callback) {
+    global_gc_prologue_callback_ = callback;
+  }
+  static void SetGlobalGCEpilogueCallback(GCCallback callback) {
+    global_gc_epilogue_callback_ = callback;
+  }
+
+  // Heap root getters.  We have versions with and without type::cast() here.
+  // You can't use type::cast during GC because the assert fails.
+#define ROOT_ACCESSOR(type, name, camel_name)                                  \
+  static inline type* name() {                                                 \
+    return type::cast(roots_[k##camel_name##RootIndex]);                       \
+  }                                                                            \
+  static inline type* raw_unchecked_##name() {                                 \
+    return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]);          \
+  }
+  ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+// Utility type maps
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name)                                  \
+    static inline Map* name##_map() {                                          \
+      return Map::cast(roots_[k##Name##MapRootIndex]);                         \
+    }
+  STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, str) static inline String* name() {              \
+    return String::cast(roots_[k##name##RootIndex]);                           \
+  }
+  SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+  // The hidden_symbol is special because it is the empty string, but does
+  // not match the empty string.
+  static String* hidden_symbol() { return hidden_symbol_; }
+
+  // Iterates over all roots in the heap.
+  static void IterateRoots(ObjectVisitor* v);
+  // Iterates over all strong roots in the heap.
+  static void IterateStrongRoots(ObjectVisitor* v);
+
+  // Iterates remembered set of an old space.
+  static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
+
+  // Iterates a range of remembered set addresses starting with rset_start
+  // corresponding to the range of allocated pointers
+  // [object_start, object_end).
+  // Returns the number of bits that were set.
+  static int IterateRSetRange(Address object_start,
+                              Address object_end,
+                              Address rset_start,
+                              ObjectSlotCallback copy_object_func);
+
+  // Returns whether the object resides in new space.
+  static inline bool InNewSpace(Object* object);
+  static inline bool InFromSpace(Object* object);
+  static inline bool InToSpace(Object* object);
+
+  // Checks whether an address/object in the heap (including auxiliary
+  // area and unused area).
+  static bool Contains(Address addr);
+  static bool Contains(HeapObject* value);
+
+  // Checks whether an address/object in a space.
+  // Currently used by tests and heap verification only.
+  static bool InSpace(Address addr, AllocationSpace space);
+  static bool InSpace(HeapObject* value, AllocationSpace space);
+
+  // Finds out which space an object should get promoted to based on its type.
+  static inline OldSpace* TargetSpace(HeapObject* object);
+  static inline AllocationSpace TargetSpaceId(InstanceType type);
+
+  // Sets the stub_cache_ (only used when expanding the dictionary).
+  static void public_set_code_stubs(NumberDictionary* value) {
+    roots_[kCodeStubsRootIndex] = value;
+  }
+
+  // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
+  static void public_set_non_monomorphic_cache(NumberDictionary* value) {
+    roots_[kNonMonomorphicCacheRootIndex] = value;
+  }
+
+  // Update the next script id.
+  static inline void SetLastScriptId(Object* last_script_id);
+
+  // Generated code can embed this address to get access to the roots.
+  static Object** roots_address() { return roots_; }
+
+#ifdef DEBUG
+  static void Print();
+  static void PrintHandles();
+
+  // Verify the heap is in its normal state before or after a GC.
+  static void Verify();
+
+  // Report heap statistics.
+  static void ReportHeapStatistics(const char* title);
+  static void ReportCodeStatistics(const char* title);
+
+  // Fill in bogus values in from space
+  static void ZapFromSpace();
+#endif
+
+#if defined(ENABLE_LOGGING_AND_PROFILING)
+  // Print short heap statistics.
+  static void PrintShortHeapStatistics();
+#endif
+
+  // Makes a new symbol object
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this function does not perform a garbage collection.
+  static Object* CreateSymbol(const char* str, int length, int hash);
+  static Object* CreateSymbol(String* str);
+
+  // Write barrier support for address[offset] = o.
+  static inline void RecordWrite(Address address, int offset);
+
+  // Given an address occupied by a live code object, return that object.
+  static Object* FindCodeObject(Address a);
+
+  // Invoke Shrink on shrinkable spaces.
+  static void Shrink();
+
+  enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
+  static inline HeapState gc_state() { return gc_state_; }
+
+#ifdef DEBUG
+  static bool IsAllocationAllowed() { return allocation_allowed_; }
+  static inline bool allow_allocation(bool enable);
+
+  static bool disallow_allocation_failure() {
+    return disallow_allocation_failure_;
+  }
+
+  static void TracePathToObject();
+  static void TracePathToGlobal();
+#endif
+
+  // Callback function passed to Heap::Iterate etc.  Copies an object if
+  // necessary, the object might be promoted to an old space.  The caller must
+  // ensure the precondition that the object is (a) a heap object and (b) in
+  // the heap's from space.
+  static void ScavengePointer(HeapObject** p);
+  static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+
+  // Clear a range of remembered set addresses corresponding to the object
+  // area address 'start' with size 'size_in_bytes', eg, when adding blocks
+  // to the free list.
+  static void ClearRSetRange(Address start, int size_in_bytes);
+
+  // Rebuild remembered set in old and map spaces.
+  static void RebuildRSets();
+
+  // Commits from space if it is uncommitted.
+  static void EnsureFromSpaceIsCommitted();
+
+  //
+  // Support for the API.
+  //
+
+  static bool CreateApiObjects();
+
+  // Attempt to find the number in a small cache.  If we finds it, return
+  // the string representation of the number.  Otherwise return undefined.
+  static Object* GetNumberStringCache(Object* number);
+
+  // Update the cache with a new number-string pair.
+  static void SetNumberStringCache(Object* number, String* str);
+
+  // Entries in the cache.  Must be a power of 2.
+  static const int kNumberStringCacheSize = 64;
+
+  // Adjusts the amount of registered external memory.
+  // Returns the adjusted value.
+  static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
+
+  // Allocate unitialized fixed array (pretenure == NON_TENURE).
+  static Object* AllocateRawFixedArray(int length);
+
+  // True if we have reached the allocation limit in the old generation that
+  // should force the next GC (caused normally) to be a full one.
+  static bool OldGenerationPromotionLimitReached() {
+    return (PromotedSpaceSize() + PromotedExternalMemorySize())
+           > old_gen_promotion_limit_;
+  }
+
+  // True if we have reached the allocation limit in the old generation that
+  // should artificially cause a GC right now.
+  static bool OldGenerationAllocationLimitReached() {
+    return (PromotedSpaceSize() + PromotedExternalMemorySize())
+           > old_gen_allocation_limit_;
+  }
+
+  // Can be called when the embedding application is idle.
+  static bool IdleNotification();
+
+  // Declare all the root indices.
+  enum RootListIndex {
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+    STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+
+// Utility type maps
+#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
+  STRUCT_LIST(DECLARE_STRUCT_MAP)
+#undef DECLARE_STRUCT_MAP
+
+#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
+    SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+#undef SYMBOL_DECLARATION
+
+    kSymbolTableRootIndex,
+    kStrongRootListLength = kSymbolTableRootIndex,
+    kRootListLength
+  };
+
+  static Object* NumberToString(Object* number);
+
+ private:
+  static int semispace_size_;
+  static int initial_semispace_size_;
+  static int young_generation_size_;
+  static int old_generation_size_;
+  static size_t code_range_size_;
+
+  // For keeping track of how much data has survived
+  // scavenge since last new space expansion.
+  static int survived_since_last_expansion_;
+
+  static int always_allocate_scope_depth_;
+  static bool context_disposed_pending_;
+
+  static const int kMaxMapSpaceSize = 8*MB;
+
+#if defined(V8_TARGET_ARCH_X64)
+  static const int kMaxObjectSizeInNewSpace = 512*KB;
+#else
+  static const int kMaxObjectSizeInNewSpace = 256*KB;
+#endif
+
+  static NewSpace new_space_;
+  static OldSpace* old_pointer_space_;
+  static OldSpace* old_data_space_;
+  static OldSpace* code_space_;
+  static MapSpace* map_space_;
+  static CellSpace* cell_space_;
+  static LargeObjectSpace* lo_space_;
+  static HeapState gc_state_;
+
+  // Returns the size of object residing in non new spaces.
+  static int PromotedSpaceSize();
+
+  // Returns the amount of external memory registered since last global gc.
+  static int PromotedExternalMemorySize();
+
+  static int mc_count_;  // how many mark-compact collections happened
+  static int gc_count_;  // how many gc happened
+
+#define ROOT_ACCESSOR(type, name, camel_name)                                  \
+  static inline void set_##name(type* value) {                                 \
+    roots_[k##camel_name##RootIndex] = value;                                  \
+  }
+  ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+#ifdef DEBUG
+  static bool allocation_allowed_;
+
+  // If the --gc-interval flag is set to a positive value, this
+  // variable holds the value indicating the number of allocations
+  // remain until the next failure and garbage collection.
+  static int allocation_timeout_;
+
+  // Do we expect to be able to handle allocation failure at this
+  // time?
+  static bool disallow_allocation_failure_;
+#endif  // DEBUG
+
+  // Limit that triggers a global GC on the next (normally caused) GC.  This
+  // is checked when we have already decided to do a GC to help determine
+  // which collector to invoke.
+  static int old_gen_promotion_limit_;
+
+  // Limit that triggers a global GC as soon as is reasonable.  This is
+  // checked before expanding a paged space in the old generation and on
+  // every allocation in large object space.
+  static int old_gen_allocation_limit_;
+
+  // Limit on the amount of externally allocated memory allowed
+  // between global GCs. If reached a global GC is forced.
+  static int external_allocation_limit_;
+
+  // The amount of external memory registered through the API kept alive
+  // by global handles
+  static int amount_of_external_allocated_memory_;
+
+  // Caches the amount of external memory registered at the last global gc.
+  static int amount_of_external_allocated_memory_at_last_global_gc_;
+
+  // Indicates that an allocation has failed in the old generation since the
+  // last GC.
+  static int old_gen_exhausted_;
+
+  static Object* roots_[kRootListLength];
+
+  struct StringTypeTable {
+    InstanceType type;
+    int size;
+    RootListIndex index;
+  };
+
+  struct ConstantSymbolTable {
+    const char* contents;
+    RootListIndex index;
+  };
+
+  struct StructTable {
+    InstanceType type;
+    int size;
+    RootListIndex index;
+  };
+
+  static const StringTypeTable string_type_table[];
+  static const ConstantSymbolTable constant_symbol_table[];
+  static const StructTable struct_table[];
+
+  // The special hidden symbol which is an empty string, but does not match
+  // any string when looked up in properties.
+  static String* hidden_symbol_;
+
+  // GC callback function, called before and after mark-compact GC.
+  // Allocations in the callback function are disallowed.
+  static GCCallback global_gc_prologue_callback_;
+  static GCCallback global_gc_epilogue_callback_;
+
+  // Checks whether a global GC is necessary
+  static GarbageCollector SelectGarbageCollector(AllocationSpace space);
+
+  // Performs garbage collection
+  static void PerformGarbageCollection(AllocationSpace space,
+                                       GarbageCollector collector,
+                                       GCTracer* tracer);
+
+  // Returns either a Smi or a Number object from 'value'. If 'new_object'
+  // is false, it may return a preallocated immutable object.
+  static Object* SmiOrNumberFromDouble(double value,
+                                       bool new_object,
+                                       PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocate an uninitialized object in map space.  The behavior is identical
+  // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
+  // have to test the allocation space argument and (b) can reduce code size
+  // (since both AllocateRaw and AllocateRawMap are inlined).
+  static inline Object* AllocateRawMap();
+
+  // Allocate an uninitialized object in the global property cell space.
+  static inline Object* AllocateRawCell();
+
+  // Initializes a JSObject based on its map.
+  static void InitializeJSObjectFromMap(JSObject* obj,
+                                        FixedArray* properties,
+                                        Map* map);
+
+  static bool CreateInitialMaps();
+  static bool CreateInitialObjects();
+
+  // These four Create*EntryStub functions are here because of a gcc-4.4 bug
+  // that assigns wrong vtable entries.
+  static void CreateCEntryStub();
+  static void CreateCEntryDebugBreakStub();
+  static void CreateJSEntryStub();
+  static void CreateJSConstructEntryStub();
+  static void CreateRegExpCEntryStub();
+
+  static void CreateFixedStubs();
+
+  static Object* CreateOddball(Map* map,
+                               const char* to_string,
+                               Object* to_number);
+
+  // Allocate empty fixed array.
+  static Object* AllocateEmptyFixedArray();
+
+  // Performs a minor collection in new generation.
+  static void Scavenge();
+
+  // Performs a major collection in the whole heap.
+  static void MarkCompact(GCTracer* tracer);
+
+  // Code to be run before and after mark-compact.
+  static void MarkCompactPrologue(bool is_compacting);
+  static void MarkCompactEpilogue(bool is_compacting);
+
+  // Helper function used by CopyObject to copy a source object to an
+  // allocated target object and update the forwarding pointer in the source
+  // object.  Returns the target object.
+  static HeapObject* MigrateObject(HeapObject* source,
+                                   HeapObject* target,
+                                   int size);
+
+  // Helper function that governs the promotion policy from new space to
+  // old.  If the object's old address lies below the new space's age
+  // mark or if we've already filled the bottom 1/16th of the to space,
+  // we try to promote this object.
+  static inline bool ShouldBePromoted(Address old_address, int object_size);
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+  // Record the copy of an object in the NewSpace's statistics.
+  static void RecordCopiedObject(HeapObject* obj);
+
+  // Record statistics before and after garbage collection.
+  static void ReportStatisticsBeforeGC();
+  static void ReportStatisticsAfterGC();
+#endif
+
+  // Update an old object's remembered set
+  static int UpdateRSet(HeapObject* obj);
+
+  // Rebuild remembered set in an old space.
+  static void RebuildRSets(PagedSpace* space);
+
+  // Rebuild remembered set in the large object space.
+  static void RebuildRSets(LargeObjectSpace* space);
+
+  // Slow part of scavenge object.
+  static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
+
+  // Copy memory from src to dst.
+  static inline void CopyBlock(Object** dst, Object** src, int byte_size);
+
+  // Initializes a function with a shared part and prototype.
+  // Returns the function.
+  // Note: this code was factored out of AllocateFunction such that
+  // other parts of the VM could use it. Specifically, a function that creates
+  // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
+  // Please note this does not perform a garbage collection.
+  static inline Object* InitializeFunction(JSFunction* function,
+                                           SharedFunctionInfo* shared,
+                                           Object* prototype);
+
+  static const int kInitialSymbolTableSize = 2048;
+  static const int kInitialEvalCacheSize = 64;
+
+  friend class Factory;
+  friend class DisallowAllocationFailure;
+  friend class AlwaysAllocateScope;
+};
+
+
+class AlwaysAllocateScope {
+ public:
+  AlwaysAllocateScope() {
+    // We shouldn't hit any nested scopes, because that requires
+    // non-handle code to call handle code. The code still works but
+    // performance will degrade, so we want to catch this situation
+    // in debug mode.
+    ASSERT(Heap::always_allocate_scope_depth_ == 0);
+    Heap::always_allocate_scope_depth_++;
+  }
+
+  ~AlwaysAllocateScope() {
+    Heap::always_allocate_scope_depth_--;
+    ASSERT(Heap::always_allocate_scope_depth_ == 0);
+  }
+};
+
+
+#ifdef DEBUG
+// Visitor class to verify interior pointers that do not have remembered set
+// bits.  All heap object pointers have to point into the heap to a location
+// that has a map pointer at its first word.  Caveat: Heap::Contains is an
+// approximation because it can return true for objects in a heap space but
+// above the allocation pointer.
+class VerifyPointersVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        ASSERT(Heap::Contains(object));
+        ASSERT(object->map()->IsMap());
+      }
+    }
+  }
+};
+
+
+// Visitor class to verify interior pointers that have remembered set bits.
+// As VerifyPointersVisitor but also checks that remembered set bits are
+// always set for pointers into new space.
+class VerifyPointersAndRSetVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        ASSERT(Heap::Contains(object));
+        ASSERT(object->map()->IsMap());
+        if (Heap::InNewSpace(object)) {
+          ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
+        }
+      }
+    }
+  }
+};
+#endif
+
+
+// Space iterator for iterating over all spaces of the heap.
+// Returns each space in turn, and null when it is done.
+class AllSpaces BASE_EMBEDDED {
+ public:
+  Space* next();
+  AllSpaces() { counter_ = FIRST_SPACE; }
+ private:
+  int counter_;
+};
+
+
+// Space iterator for iterating over all old spaces of the heap: Old pointer
+// space, old data space and code space.
+// Returns each space in turn, and null when it is done.
+class OldSpaces BASE_EMBEDDED {
+ public:
+  OldSpace* next();
+  OldSpaces() { counter_ = OLD_POINTER_SPACE; }
+ private:
+  int counter_;
+};
+
+
+// Space iterator for iterating over all the paged spaces of the heap:
+// Map space, old pointer space, old data space and code space.
+// Returns each space in turn, and null when it is done.
+class PagedSpaces BASE_EMBEDDED {
+ public:
+  PagedSpace* next();
+  PagedSpaces() { counter_ = OLD_POINTER_SPACE; }
+ private:
+  int counter_;
+};
+
+
+// Space iterator for iterating over all spaces of the heap.
+// For each space an object iterator is provided. The deallocation of the
+// returned object iterators is handled by the space iterator.
+class SpaceIterator : public Malloced {
+ public:
+  SpaceIterator();
+  virtual ~SpaceIterator();
+
+  bool has_next();
+  ObjectIterator* next();
+
+ private:
+  ObjectIterator* CreateIterator();
+
+  int current_space_;  // from enum AllocationSpace.
+  ObjectIterator* iterator_;  // object iterator for the current space.
+};
+
+
+// A HeapIterator provides iteration over the whole heap It aggregates a the
+// specific iterators for the different spaces as these can only iterate over
+// one space only.
+
+class HeapIterator BASE_EMBEDDED {
+ public:
+  explicit HeapIterator();
+  virtual ~HeapIterator();
+
+  bool has_next();
+  HeapObject* next();
+  void reset();
+
+ private:
+  // Perform the initialization.
+  void Init();
+
+  // Perform all necessary shutdown (destruction) work.
+  void Shutdown();
+
+  // Space iterator for iterating all the spaces.
+  SpaceIterator* space_iterator_;
+  // Object iterator for the space currently being iterated.
+  ObjectIterator* object_iterator_;
+};
+
+
+// Cache for mapping (map, property name) into field offset.
+// Cleared at startup and prior to mark sweep collection.
+class KeyedLookupCache {
+ public:
+  // Lookup field offset for (map, name). If absent, -1 is returned.
+  static int Lookup(Map* map, String* name);
+
+  // Update an element in the cache.
+  static void Update(Map* map, String* name, int field_offset);
+
+  // Clear the cache.
+  static void Clear();
+ private:
+  static inline int Hash(Map* map, String* name);
+  static const int kLength = 64;
+  struct Key {
+    Map* map;
+    String* name;
+  };
+  static Key keys_[kLength];
+  static int field_offsets_[kLength];
+};
+
+
+
+// Cache for mapping (array, property name) into descriptor index.
+// The cache contains both positive and negative results.
+// Descriptor index equals kNotFound means the property is absent.
+// Cleared at startup and prior to any gc.
+class DescriptorLookupCache {
+ public:
+  // Lookup descriptor index for (map, name).
+  // If absent, kAbsent is returned.
+  static int Lookup(DescriptorArray* array, String* name) {
+    if (!StringShape(name).IsSymbol()) return kAbsent;
+    int index = Hash(array, name);
+    Key& key = keys_[index];
+    if ((key.array == array) && (key.name == name)) return results_[index];
+    return kAbsent;
+  }
+
+  // Update an element in the cache.
+  static void Update(DescriptorArray* array, String* name, int result) {
+    ASSERT(result != kAbsent);
+    if (StringShape(name).IsSymbol()) {
+      int index = Hash(array, name);
+      Key& key = keys_[index];
+      key.array = array;
+      key.name = name;
+      results_[index] = result;
+    }
+  }
+
+  // Clear the cache.
+  static void Clear();
+
+  static const int kAbsent = -2;
+ private:
+  static int Hash(DescriptorArray* array, String* name) {
+    // Uses only lower 32 bits if pointers are larger.
+    uintptr_t array_hash =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
+    uintptr_t name_hash =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
+    return (array_hash ^ name_hash) % kLength;
+  }
+
+  static const int kLength = 64;
+  struct Key {
+    DescriptorArray* array;
+    String* name;
+  };
+
+  static Key keys_[kLength];
+  static int results_[kLength];
+};
+
+
+// ----------------------------------------------------------------------------
+// Marking stack for tracing live objects.
+
+class MarkingStack {
+ public:
+  void Initialize(Address low, Address high) {
+    top_ = low_ = reinterpret_cast<HeapObject**>(low);
+    high_ = reinterpret_cast<HeapObject**>(high);
+    overflowed_ = false;
+  }
+
+  bool is_full() { return top_ >= high_; }
+
+  bool is_empty() { return top_ <= low_; }
+
+  bool overflowed() { return overflowed_; }
+
+  void clear_overflowed() { overflowed_ = false; }
+
+  // Push the (marked) object on the marking stack if there is room,
+  // otherwise mark the object as overflowed and wait for a rescan of the
+  // heap.
+  void Push(HeapObject* object) {
+    CHECK(object->IsHeapObject());
+    if (is_full()) {
+      object->SetOverflow();
+      overflowed_ = true;
+    } else {
+      *(top_++) = object;
+    }
+  }
+
+  HeapObject* Pop() {
+    ASSERT(!is_empty());
+    HeapObject* object = *(--top_);
+    CHECK(object->IsHeapObject());
+    return object;
+  }
+
+ private:
+  HeapObject** low_;
+  HeapObject** top_;
+  HeapObject** high_;
+  bool overflowed_;
+};
+
+
+// A helper class to document/test C++ scopes where we do not
+// expect a GC. Usage:
+//
+// /* Allocation not allowed: we cannot handle a GC in this scope. */
+// { AssertNoAllocation nogc;
+//   ...
+// }
+
+#ifdef DEBUG
+
+class DisallowAllocationFailure {
+ public:
+  DisallowAllocationFailure() {
+    old_state_ = Heap::disallow_allocation_failure_;
+    Heap::disallow_allocation_failure_ = true;
+  }
+  ~DisallowAllocationFailure() {
+    Heap::disallow_allocation_failure_ = old_state_;
+  }
+ private:
+  bool old_state_;
+};
+
+class AssertNoAllocation {
+ public:
+  AssertNoAllocation() {
+    old_state_ = Heap::allow_allocation(false);
+  }
+
+  ~AssertNoAllocation() {
+    Heap::allow_allocation(old_state_);
+  }
+
+ private:
+  bool old_state_;
+};
+
+class DisableAssertNoAllocation {
+ public:
+  DisableAssertNoAllocation() {
+    old_state_ = Heap::allow_allocation(true);
+  }
+
+  ~DisableAssertNoAllocation() {
+    Heap::allow_allocation(old_state_);
+  }
+
+ private:
+  bool old_state_;
+};
+
+#else  // ndef DEBUG
+
+class AssertNoAllocation {
+ public:
+  AssertNoAllocation() { }
+  ~AssertNoAllocation() { }
+};
+
+class DisableAssertNoAllocation {
+ public:
+  DisableAssertNoAllocation() { }
+  ~DisableAssertNoAllocation() { }
+};
+
+#endif
+
+// GCTracer collects and prints ONE line after each garbage collector
+// invocation IFF --trace_gc is used.
+
+class GCTracer BASE_EMBEDDED {
+ public:
+  GCTracer();
+
+  ~GCTracer();
+
+  // Sets the collector.
+  void set_collector(GarbageCollector collector) { collector_ = collector; }
+
+  // Sets the GC count.
+  void set_gc_count(int count) { gc_count_ = count; }
+
+  // Sets the full GC count.
+  void set_full_gc_count(int count) { full_gc_count_ = count; }
+
+  // Sets the flag that this is a compacting full GC.
+  void set_is_compacting() { is_compacting_ = true; }
+
+  // Increment and decrement the count of marked objects.
+  void increment_marked_count() { ++marked_count_; }
+  void decrement_marked_count() { --marked_count_; }
+
+  int marked_count() { return marked_count_; }
+
+ private:
+  // Returns a string matching the collector.
+  const char* CollectorString();
+
+  // Returns size of object in heap (in MB).
+  double SizeOfHeapObjects() {
+    return (static_cast<double>(Heap::SizeOfObjects())) / MB;
+  }
+
+  double start_time_;  // Timestamp set in the constructor.
+  double start_size_;  // Size of objects in heap set in constructor.
+  GarbageCollector collector_;  // Type of collector.
+
+  // A count (including this one, eg, the first collection is 1) of the
+  // number of garbage collections.
+  int gc_count_;
+
+  // A count (including this one) of the number of full garbage collections.
+  int full_gc_count_;
+
+  // True if the current GC is a compacting full collection, false
+  // otherwise.
+  bool is_compacting_;
+
+  // True if the *previous* full GC cwas a compacting collection (will be
+  // false if there has not been a previous full GC).
+  bool previous_has_compacted_;
+
+  // On a full GC, a count of the number of marked objects.  Incremented
+  // when an object is marked and decremented when an object's mark bit is
+  // cleared.  Will be zero on a scavenge collection.
+  int marked_count_;
+
+  // The count from the end of the previous full GC.  Will be zero if there
+  // was no previous full GC.
+  int previous_marked_count_;
+};
+
+
+class TranscendentalCache {
+ public:
+  enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
+
+  explicit TranscendentalCache(Type t);
+
+  // Returns a heap number with f(input), where f is a math function specified
+  // by the 'type' argument.
+  static inline Object* Get(Type type, double input) {
+    TranscendentalCache* cache = caches_[type];
+    if (cache == NULL) {
+      caches_[type] = cache = new TranscendentalCache(type);
+    }
+    return cache->Get(input);
+  }
+
+  // The cache contains raw Object pointers.  This method disposes of
+  // them before a garbage collection.
+  static void Clear();
+
+ private:
+  inline Object* Get(double input) {
+    Converter c;
+    c.dbl = input;
+    int hash = Hash(c);
+    Element e = elements_[hash];
+    if (e.in[0] == c.integers[0] &&
+        e.in[1] == c.integers[1]) {
+      ASSERT(e.output != NULL);
+      return e.output;
+    }
+    double answer = Calculate(input);
+    Object* heap_number = Heap::AllocateHeapNumber(answer);
+    if (!heap_number->IsFailure()) {
+      elements_[hash].in[0] = c.integers[0];
+      elements_[hash].in[1] = c.integers[1];
+      elements_[hash].output = heap_number;
+    }
+    return heap_number;
+  }
+
+  inline double Calculate(double input) {
+    switch (type_) {
+      case ACOS:
+        return acos(input);
+      case ASIN:
+        return asin(input);
+      case ATAN:
+        return atan(input);
+      case COS:
+        return cos(input);
+      case EXP:
+        return exp(input);
+      case LOG:
+        return log(input);
+      case SIN:
+        return sin(input);
+      case TAN:
+        return tan(input);
+      default:
+        return 0.0;  // Never happens.
+    }
+  }
+  static const int kCacheSize = 512;
+  struct Element {
+    uint32_t in[2];
+    Object* output;
+  };
+  union Converter {
+    double dbl;
+    uint32_t integers[2];
+  };
+  inline static int Hash(const Converter& c) {
+    uint32_t hash = (c.integers[0] ^ c.integers[1]);
+    hash ^= hash >> 16;
+    hash ^= hash >> 8;
+    return (hash & (kCacheSize - 1));
+  }
+  static TranscendentalCache* caches_[kNumberOfCaches];
+  Element elements_[kCacheSize];
+  Type type_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_HEAP_H_
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
new file mode 100644
index 0000000..9a5352b
--- /dev/null
+++ b/src/ia32/assembler-ia32-inl.h
@@ -0,0 +1,319 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
+#define V8_IA32_ASSEMBLER_IA32_INL_H_
+
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
+
+Condition NegateCondition(Condition cc) {
+  return static_cast<Condition>(cc ^ 1);
+}
+
+
+// The modes possibly affected by apply must be in kApplyMask.
+void RelocInfo::apply(intptr_t delta) {
+  if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
+    int32_t* p = reinterpret_cast<int32_t*>(pc_);
+    *p -= delta;  // relocate entry
+  } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
+    // Special handling of js_return when a break point is set (call
+    // instruction has been inserted).
+    int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+    *p -= delta;  // relocate entry
+  } else if (IsInternalReference(rmode_)) {
+    // absolute code pointer inside code object moves with the code object.
+    int32_t* p = reinterpret_cast<int32_t*>(pc_);
+    *p += delta;  // relocate entry
+  }
+}
+
+
+Address RelocInfo::target_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(pc_);
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return *reinterpret_cast<Object**>(pc_);
+}
+
+
+Object** RelocInfo::target_object_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object**>(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  *reinterpret_cast<Object**>(pc_) = target;
+}
+
+
+Address* RelocInfo::target_reference_address() {
+  ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+  return reinterpret_cast<Address*>(pc_);
+}
+
+
+Address RelocInfo::call_address() {
+  ASSERT(IsCallInstruction());
+  return Assembler::target_address_at(pc_ + 1);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+  ASSERT(IsCallInstruction());
+  Assembler::set_target_address_at(pc_ + 1, target);
+}
+
+
+Object* RelocInfo::call_object() {
+  ASSERT(IsCallInstruction());
+  return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+  ASSERT(IsCallInstruction());
+  return reinterpret_cast<Object**>(pc_ + 1);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+  ASSERT(IsCallInstruction());
+  *call_object_address() = target;
+}
+
+
+bool RelocInfo::IsCallInstruction() {
+  return *pc_ == 0xE8;
+}
+
+
+Immediate::Immediate(int x)  {
+  x_ = x;
+  rmode_ = RelocInfo::NONE;
+}
+
+
+Immediate::Immediate(const ExternalReference& ext) {
+  x_ = reinterpret_cast<int32_t>(ext.address());
+  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+Immediate::Immediate(const char* s) {
+  x_ = reinterpret_cast<int32_t>(s);
+  rmode_ = RelocInfo::EMBEDDED_STRING;
+}
+
+
+Immediate::Immediate(Label* internal_offset) {
+  x_ = reinterpret_cast<int32_t>(internal_offset);
+  rmode_ = RelocInfo::INTERNAL_REFERENCE;
+}
+
+
+Immediate::Immediate(Handle<Object> handle) {
+  // Verify all Objects referred by code are NOT in new space.
+  Object* obj = *handle;
+  ASSERT(!Heap::InNewSpace(obj));
+  if (obj->IsHeapObject()) {
+    x_ = reinterpret_cast<intptr_t>(handle.location());
+    rmode_ = RelocInfo::EMBEDDED_OBJECT;
+  } else {
+    // no relocation needed
+    x_ =  reinterpret_cast<intptr_t>(obj);
+    rmode_ = RelocInfo::NONE;
+  }
+}
+
+
+Immediate::Immediate(Smi* value) {
+  x_ = reinterpret_cast<intptr_t>(value);
+  rmode_ = RelocInfo::NONE;
+}
+
+
+void Assembler::emit(uint32_t x) {
+  *reinterpret_cast<uint32_t*>(pc_) = x;
+  pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emit(Handle<Object> handle) {
+  // Verify all Objects referred by code are NOT in new space.
+  Object* obj = *handle;
+  ASSERT(!Heap::InNewSpace(obj));
+  if (obj->IsHeapObject()) {
+    emit(reinterpret_cast<intptr_t>(handle.location()),
+         RelocInfo::EMBEDDED_OBJECT);
+  } else {
+    // no relocation needed
+    emit(reinterpret_cast<intptr_t>(obj));
+  }
+}
+
+
+void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
+  if (rmode != RelocInfo::NONE) RecordRelocInfo(rmode);
+  emit(x);
+}
+
+
+void Assembler::emit(const Immediate& x) {
+  if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
+    Label* label = reinterpret_cast<Label*>(x.x_);
+    emit_code_relative_offset(label);
+    return;
+  }
+  if (x.rmode_ != RelocInfo::NONE) RecordRelocInfo(x.rmode_);
+  emit(x.x_);
+}
+
+
+void Assembler::emit_code_relative_offset(Label* label) {
+  if (label->is_bound()) {
+    int32_t pos;
+    pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
+    emit(pos);
+  } else {
+    emit_disp(label, Displacement::CODE_RELATIVE);
+  }
+}
+
+
+void Assembler::emit_w(const Immediate& x) {
+  ASSERT(x.rmode_ == RelocInfo::NONE);
+  uint16_t value = static_cast<uint16_t>(x.x_);
+  reinterpret_cast<uint16_t*>(pc_)[0] = value;
+  pc_ += sizeof(uint16_t);
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+  return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+  int32_t* p = reinterpret_cast<int32_t*>(pc);
+  *p = target - (pc + sizeof(int32_t));
+  CPU::FlushICache(p, sizeof(int32_t));
+}
+
+
+Displacement Assembler::disp_at(Label* L) {
+  return Displacement(long_at(L->pos()));
+}
+
+
+void Assembler::disp_at_put(Label* L, Displacement disp) {
+  long_at_put(L->pos(), disp.data());
+}
+
+
+void Assembler::emit_disp(Label* L, Displacement::Type type) {
+  Displacement disp(L, type);
+  L->link_to(pc_offset());
+  emit(static_cast<int>(disp.data()));
+}
+
+
+void Operand::set_modrm(int mod, Register rm) {
+  ASSERT((mod & -4) == 0);
+  buf_[0] = mod << 6 | rm.code();
+  len_ = 1;
+}
+
+
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+  ASSERT(len_ == 1);
+  ASSERT((scale & -4) == 0);
+  // Use SIB with no index register only for base esp.
+  ASSERT(!index.is(esp) || base.is(esp));
+  buf_[1] = scale << 6 | index.code() << 3 | base.code();
+  len_ = 2;
+}
+
+
+void Operand::set_disp8(int8_t disp) {
+  ASSERT(len_ == 1 || len_ == 2);
+  *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
+}
+
+
+void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
+  ASSERT(len_ == 1 || len_ == 2);
+  int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
+  *p = disp;
+  len_ += sizeof(int32_t);
+  rmode_ = rmode;
+}
+
+Operand::Operand(Register reg) {
+  // reg
+  set_modrm(3, reg);
+}
+
+
+Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
+  // [disp/r]
+  set_modrm(0, ebp);
+  set_dispr(disp, rmode);
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_ASSEMBLER_IA32_INL_H_
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
new file mode 100644
index 0000000..b8dda17
--- /dev/null
+++ b/src/ia32/assembler-ia32.cc
@@ -0,0 +1,2254 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#include "v8.h"
+
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Implementation of CpuFeatures
+
+// Safe default is no features.
+uint64_t CpuFeatures::supported_ = 0;
+uint64_t CpuFeatures::enabled_ = 0;
+
+
+// The Probe method needs executable memory, so it uses Heap::CreateCode.
+// Allocation failure is silent and leads to safe default.
+void CpuFeatures::Probe() {
+  ASSERT(Heap::HasBeenSetup());
+  ASSERT(supported_ == 0);
+  if (Serializer::enabled()) return;  // No features if we might serialize.
+
+  Assembler assm(NULL, 0);
+  Label cpuid, done;
+#define __ assm.
+  // Save old esp, since we are going to modify the stack.
+  __ push(ebp);
+  __ pushfd();
+  __ push(ecx);
+  __ push(ebx);
+  __ mov(ebp, Operand(esp));
+
+  // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
+  __ pushfd();
+  __ pop(eax);
+  __ mov(edx, Operand(eax));
+  __ xor_(eax, 0x200000);  // Flip bit 21.
+  __ push(eax);
+  __ popfd();
+  __ pushfd();
+  __ pop(eax);
+  __ xor_(eax, Operand(edx));  // Different if CPUID is supported.
+  __ j(not_zero, &cpuid);
+
+  // CPUID not supported. Clear the supported features in edx:eax.
+  __ xor_(eax, Operand(eax));
+  __ xor_(edx, Operand(edx));
+  __ jmp(&done);
+
+  // Invoke CPUID with 1 in eax to get feature information in
+  // ecx:edx. Temporarily enable CPUID support because we know it's
+  // safe here.
+  __ bind(&cpuid);
+  __ mov(eax, 1);
+  supported_ = (1 << CPUID);
+  { Scope fscope(CPUID);
+    __ cpuid();
+  }
+  supported_ = 0;
+
+  // Move the result from ecx:edx to edx:eax and make sure to mark the
+  // CPUID feature as supported.
+  __ mov(eax, Operand(edx));
+  __ or_(eax, 1 << CPUID);
+  __ mov(edx, Operand(ecx));
+
+  // Done.
+  __ bind(&done);
+  __ mov(esp, Operand(ebp));
+  __ pop(ebx);
+  __ pop(ecx);
+  __ popfd();
+  __ pop(ebp);
+  __ ret(0);
+#undef __
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Object* code = Heap::CreateCode(desc,
+                                  NULL,
+                                  Code::ComputeFlags(Code::STUB),
+                                  Handle<Code>::null());
+  if (!code->IsCode()) return;
+  LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
+                      Code::cast(code), "CpuFeatures::Probe"));
+  typedef uint64_t (*F0)();
+  F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+  supported_ = probe();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Displacement
+
+void Displacement::init(Label* L, Type type) {
+  ASSERT(!L->is_bound());
+  int next = 0;
+  if (L->is_linked()) {
+    next = L->pos();
+    ASSERT(next > 0);  // Displacements must be at positions > 0
+  }
+  // Ensure that we _never_ overflow the next field.
+  ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
+  data_ = NextField::encode(next) | TypeField::encode(type);
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+
+const int RelocInfo::kApplyMask =
+  RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+    1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+  // Patch the code at the current address with the supplied instructions.
+  for (int i = 0; i < instruction_count; i++) {
+    *(pc_ + i) = *(instructions + i);
+  }
+
+  // Indicate that code has changed.
+  CPU::FlushICache(pc_, instruction_count);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+  // Call instruction takes up 5 bytes and int3 takes up one byte.
+  static const int kCallCodeSize = 5;
+  int code_size = kCallCodeSize + guard_bytes;
+
+  // Create a code patcher.
+  CodePatcher patcher(pc_, code_size);
+
+  // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+  Label check_codesize;
+  patcher.masm()->bind(&check_codesize);
+#endif
+
+  // Patch the code.
+  patcher.masm()->call(target, RelocInfo::NONE);
+
+  // Check that the size of the code generated is as expected.
+  ASSERT_EQ(kCallCodeSize,
+            patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+
+  // Add the requested number of int3 instructions after the call.
+  for (int i = 0; i < guard_bytes; i++) {
+    patcher.masm()->int3();
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
+  // [base + disp/r]
+  if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) {
+    // [base]
+    set_modrm(0, base);
+    if (base.is(esp)) set_sib(times_1, esp, base);
+  } else if (is_int8(disp) && rmode == RelocInfo::NONE) {
+    // [base + disp8]
+    set_modrm(1, base);
+    if (base.is(esp)) set_sib(times_1, esp, base);
+    set_disp8(disp);
+  } else {
+    // [base + disp/r]
+    set_modrm(2, base);
+    if (base.is(esp)) set_sib(times_1, esp, base);
+    set_dispr(disp, rmode);
+  }
+}
+
+
+Operand::Operand(Register base,
+                 Register index,
+                 ScaleFactor scale,
+                 int32_t disp,
+                 RelocInfo::Mode rmode) {
+  ASSERT(!index.is(esp));  // illegal addressing mode
+  // [base + index*scale + disp/r]
+  if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) {
+    // [base + index*scale]
+    set_modrm(0, esp);
+    set_sib(scale, index, base);
+  } else if (is_int8(disp) && rmode == RelocInfo::NONE) {
+    // [base + index*scale + disp8]
+    set_modrm(1, esp);
+    set_sib(scale, index, base);
+    set_disp8(disp);
+  } else {
+    // [base + index*scale + disp/r]
+    set_modrm(2, esp);
+    set_sib(scale, index, base);
+    set_dispr(disp, rmode);
+  }
+}
+
+
+Operand::Operand(Register index,
+                 ScaleFactor scale,
+                 int32_t disp,
+                 RelocInfo::Mode rmode) {
+  ASSERT(!index.is(esp));  // illegal addressing mode
+  // [index*scale + disp/r]
+  set_modrm(0, esp);
+  set_sib(scale, index, ebp);
+  set_dispr(disp, rmode);
+}
+
+
+bool Operand::is_reg(Register reg) const {
+  return ((buf_[0] & 0xF8) == 0xC0)  // addressing mode is register only.
+      && ((buf_[0] & 0x07) == reg.code());  // register codes match.
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Emit a single byte. Must always be inlined.
+#define EMIT(x)                                 \
+  *pc_++ = (x)
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static void InitCoverageLog();
+#endif
+
+// spare_buffer_
+byte* Assembler::spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+  if (buffer == NULL) {
+    // do our own buffer management
+    if (buffer_size <= kMinimalBufferSize) {
+      buffer_size = kMinimalBufferSize;
+
+      if (spare_buffer_ != NULL) {
+        buffer = spare_buffer_;
+        spare_buffer_ = NULL;
+      }
+    }
+    if (buffer == NULL) {
+      buffer_ = NewArray<byte>(buffer_size);
+    } else {
+      buffer_ = static_cast<byte*>(buffer);
+    }
+    buffer_size_ = buffer_size;
+    own_buffer_ = true;
+  } else {
+    // use externally provided buffer instead
+    ASSERT(buffer_size > 0);
+    buffer_ = static_cast<byte*>(buffer);
+    buffer_size_ = buffer_size;
+    own_buffer_ = false;
+  }
+
+  // Clear the buffer in debug mode unless it was provided by the
+  // caller in which case we can't be sure it's okay to overwrite
+  // existing code in it; see CodePatcher::CodePatcher(...).
+#ifdef DEBUG
+  if (own_buffer_) {
+    memset(buffer_, 0xCC, buffer_size);  // int3
+  }
+#endif
+
+  // setup buffer pointers
+  ASSERT(buffer_ != NULL);
+  pc_ = buffer_;
+  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+
+  last_pc_ = NULL;
+  current_statement_position_ = RelocInfo::kNoPosition;
+  current_position_ = RelocInfo::kNoPosition;
+  written_statement_position_ = current_statement_position_;
+  written_position_ = current_position_;
+#ifdef GENERATED_CODE_COVERAGE
+  InitCoverageLog();
+#endif
+}
+
+
+Assembler::~Assembler() {
+  if (own_buffer_) {
+    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+      spare_buffer_ = buffer_;
+    } else {
+      DeleteArray(buffer_);
+    }
+  }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+  // finalize code
+  // (at this point overflow() may be true, but the gap ensures that
+  // we are still not overlapping instructions and relocation info)
+  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
+  // setup desc
+  desc->buffer = buffer_;
+  desc->buffer_size = buffer_size_;
+  desc->instr_size = pc_offset();
+  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+  desc->origin = this;
+
+  Counters::reloc_info_size.Increment(desc->reloc_size);
+}
+
+
+void Assembler::Align(int m) {
+  ASSERT(IsPowerOf2(m));
+  while ((pc_offset() & (m - 1)) != 0) {
+    nop();
+  }
+}
+
+
+void Assembler::cpuid() {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xA2);
+}
+
+
+void Assembler::pushad() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x60);
+}
+
+
+void Assembler::popad() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x61);
+}
+
+
+void Assembler::pushfd() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x9C);
+}
+
+
+void Assembler::popfd() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x9D);
+}
+
+
+void Assembler::push(const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (x.is_int8()) {
+    EMIT(0x6a);
+    EMIT(x.x_);
+  } else {
+    EMIT(0x68);
+    emit(x);
+  }
+}
+
+
+void Assembler::push(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x50 | src.code());
+}
+
+
+void Assembler::push(const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFF);
+  emit_operand(esi, src);
+}
+
+
+void Assembler::pop(Register dst) {
+  ASSERT(reloc_info_writer.last_pc() != NULL);
+  if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
+    // (last_pc_ != NULL) is rolled into the above check
+    // If a last_pc_ is set, we need to make sure that there has not been any
+    // relocation information generated between the last instruction and this
+    // pop instruction.
+    byte instr = last_pc_[0];
+    if ((instr & ~0x7) == 0x50) {
+      int push_reg_code = instr & 0x7;
+      if (push_reg_code == dst.code()) {
+        pc_ = last_pc_;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
+        }
+      } else {
+        // Convert 'push src; pop dst' to 'mov dst, src'.
+        last_pc_[0] = 0x8b;
+        Register src = { push_reg_code };
+        EnsureSpace ensure_space(this);
+        emit_operand(dst, Operand(src));
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset());
+        }
+      }
+      last_pc_ = NULL;
+      return;
+    } else if (instr == 0xff) {  // push of an operand, convert to a move
+      byte op1 = last_pc_[1];
+      // Check if the operation is really a push
+      if ((op1 & 0x38) == (6 << 3)) {
+        op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
+        last_pc_[0] = 0x8b;
+        last_pc_[1] = op1;
+        last_pc_ = NULL;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (op->reg) eliminated\n", pc_offset());
+        }
+        return;
+      }
+    } else if ((instr == 0x89) &&
+               (last_pc_[1] == 0x04) &&
+               (last_pc_[2] == 0x24)) {
+      // 0x71283c   396  890424         mov [esp],eax
+      // 0x71283f   399  58             pop eax
+      if (dst.is(eax)) {
+        // change to
+        // 0x710fac   216  83c404         add esp,0x4
+        last_pc_[0] = 0x83;
+        last_pc_[1] = 0xc4;
+        last_pc_[2] = 0x04;
+        last_pc_ = NULL;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset());
+        }
+        return;
+      }
+    } else if (instr == 0x6a && dst.is(eax)) {  // push of immediate 8 bit
+      byte imm8 = last_pc_[1];
+      if (imm8 == 0) {
+        // 6a00         push 0x0
+        // 58           pop eax
+        last_pc_[0] = 0x31;
+        last_pc_[1] = 0xc0;
+        // change to
+        // 31c0         xor eax,eax
+        last_pc_ = NULL;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
+        }
+        return;
+      } else {
+        // 6a00         push 0xXX
+        // 58           pop eax
+        last_pc_[0] = 0xb8;
+        EnsureSpace ensure_space(this);
+        if ((imm8 & 0x80) != 0) {
+          EMIT(0xff);
+          EMIT(0xff);
+          EMIT(0xff);
+          // change to
+          // b8XXffffff   mov eax,0xffffffXX
+        } else {
+          EMIT(0x00);
+          EMIT(0x00);
+          EMIT(0x00);
+          // change to
+          // b8XX000000   mov eax,0x000000XX
+        }
+        last_pc_ = NULL;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
+        }
+        return;
+      }
+    } else if (instr == 0x68 && dst.is(eax)) {  // push of immediate 32 bit
+      // 68XXXXXXXX   push 0xXXXXXXXX
+      // 58           pop eax
+      last_pc_[0] = 0xb8;
+      last_pc_ = NULL;
+      // change to
+      // b8XXXXXXXX   mov eax,0xXXXXXXXX
+      if (FLAG_print_push_pop_elimination) {
+        PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
+      }
+      return;
+    }
+
+    // Other potential patterns for peephole:
+    // 0x712716   102  890424         mov [esp], eax
+    // 0x712719   105  8b1424         mov edx, [esp]
+  }
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x58 | dst.code());
+}
+
+
+void Assembler::pop(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x8F);
+  emit_operand(eax, dst);
+}
+
+
+void Assembler::enter(const Immediate& size) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xC8);
+  emit_w(size);
+  EMIT(0);
+}
+
+
+void Assembler::leave() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xC9);
+}
+
+
+void Assembler::mov_b(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x8A);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::mov_b(const Operand& dst, int8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xC6);
+  emit_operand(eax, dst);
+  EMIT(imm8);
+}
+
+
+void Assembler::mov_b(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x88);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::mov_w(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x8B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::mov_w(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x89);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::mov(Register dst, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xB8 | dst.code());
+  emit(imm32);
+}
+
+
+void Assembler::mov(Register dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xB8 | dst.code());
+  emit(x);
+}
+
+
+void Assembler::mov(Register dst, Handle<Object> handle) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xB8 | dst.code());
+  emit(handle);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x8B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::mov(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x89);
+  EMIT(0xC0 | src.code() << 3 | dst.code());
+}
+
+
+void Assembler::mov(const Operand& dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xC7);
+  emit_operand(eax, dst);
+  emit(x);
+}
+
+
+void Assembler::mov(const Operand& dst, Handle<Object> handle) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xC7);
+  emit_operand(eax, dst);
+  emit(handle);
+}
+
+
+void Assembler::mov(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x89);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::movsx_b(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xBE);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movsx_w(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xBF);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_b(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xB6);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_w(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xB7);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  UNIMPLEMENTED();
+  USE(cc);
+  USE(dst);
+  USE(imm32);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  UNIMPLEMENTED();
+  USE(cc);
+  USE(dst);
+  USE(handle);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode: 0f 40 + cc /r
+  EMIT(0x0F);
+  EMIT(0x40 + cc);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::xchg(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (src.is(eax) || dst.is(eax)) {  // Single-byte encoding
+    EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
+  } else {
+    EMIT(0x87);
+    EMIT(0xC0 | src.code() << 3 | dst.code());
+  }
+}
+
+
+void Assembler::adc(Register dst, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(2, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::adc(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x13);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::add(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x03);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::add(const Operand& dst, const Immediate& x) {
+  ASSERT(reloc_info_writer.last_pc() != NULL);
+  if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
+    byte instr = last_pc_[0];
+    if ((instr & 0xf8) == 0x50) {
+      // Last instruction was a push. Check whether this is a pop without a
+      // result.
+      if ((dst.is_reg(esp)) &&
+          (x.x_ == kPointerSize) && (x.rmode_ == RelocInfo::NONE)) {
+        pc_ = last_pc_;
+        last_pc_ = NULL;
+        if (FLAG_print_push_pop_elimination) {
+          PrintF("%d push/pop(noreg) eliminated\n", pc_offset());
+        }
+        return;
+      }
+    }
+  }
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(0, dst, x);
+}
+
+
+void Assembler::and_(Register dst, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(4, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::and_(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x23);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::and_(const Operand& dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(4, dst, x);
+}
+
+
+void Assembler::and_(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x21);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::cmpb(const Operand& op, int8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x80);
+  emit_operand(edi, op);  // edi == 7
+  EMIT(imm8);
+}
+
+
+void Assembler::cmpw(const Operand& op, Immediate imm16) {
+  ASSERT(imm16.is_int16());
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x81);
+  emit_operand(edi, op);
+  emit_w(imm16);
+}
+
+
+void Assembler::cmp(Register reg, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(7, Operand(reg), Immediate(imm32));
+}
+
+
+void Assembler::cmp(Register reg, Handle<Object> handle) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(7, Operand(reg), Immediate(handle));
+}
+
+
+void Assembler::cmp(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x3B);
+  emit_operand(reg, op);
+}
+
+
+void Assembler::cmp(const Operand& op, const Immediate& imm) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(7, op, imm);
+}
+
+
+void Assembler::cmp(const Operand& op, Handle<Object> handle) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(7, op, Immediate(handle));
+}
+
+
+void Assembler::cmpb_al(const Operand& op) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x38);  // CMP r/m8, r8
+  emit_operand(eax, op);  // eax has same code as register al.
+}
+
+
+void Assembler::cmpw_ax(const Operand& op) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x39);  // CMP r/m16, r16
+  emit_operand(eax, op);  // eax has same code as register ax.
+}
+
+
+void Assembler::dec_b(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFE);
+  EMIT(0xC8 | dst.code());
+}
+
+
+void Assembler::dec(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x48 | dst.code());
+}
+
+
+void Assembler::dec(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFF);
+  emit_operand(ecx, dst);
+}
+
+
+void Assembler::cdq() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x99);
+}
+
+
+void Assembler::idiv(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  EMIT(0xF8 | src.code());
+}
+
+
+void Assembler::imul(Register reg) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  EMIT(0xE8 | reg.code());
+}
+
+
+void Assembler::imul(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xAF);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::imul(Register dst, Register src, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (is_int8(imm32)) {
+    EMIT(0x6B);
+    EMIT(0xC0 | dst.code() << 3 | src.code());
+    EMIT(imm32);
+  } else {
+    EMIT(0x69);
+    EMIT(0xC0 | dst.code() << 3 | src.code());
+    emit(imm32);
+  }
+}
+
+
+void Assembler::inc(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x40 | dst.code());
+}
+
+
+void Assembler::inc(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFF);
+  emit_operand(eax, dst);
+}
+
+
+void Assembler::lea(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x8D);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::mul(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  EMIT(0xE0 | src.code());
+}
+
+
+void Assembler::neg(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  EMIT(0xD8 | dst.code());
+}
+
+
+void Assembler::not_(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  EMIT(0xD0 | dst.code());
+}
+
+
+void Assembler::or_(Register dst, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(1, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::or_(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::or_(const Operand& dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(1, dst, x);
+}
+
+
+void Assembler::or_(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x09);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::rcl(Register dst, uint8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint5(imm8));  // illegal shift count
+  if (imm8 == 1) {
+    EMIT(0xD1);
+    EMIT(0xD0 | dst.code());
+  } else {
+    EMIT(0xC1);
+    EMIT(0xD0 | dst.code());
+    EMIT(imm8);
+  }
+}
+
+
+void Assembler::sar(Register dst, uint8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint5(imm8));  // illegal shift count
+  if (imm8 == 1) {
+    EMIT(0xD1);
+    EMIT(0xF8 | dst.code());
+  } else {
+    EMIT(0xC1);
+    EMIT(0xF8 | dst.code());
+    EMIT(imm8);
+  }
+}
+
+
+void Assembler::sar(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD3);
+  EMIT(0xF8 | dst.code());
+}
+
+
+void Assembler::sbb(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x1B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::shld(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xA5);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::shl(Register dst, uint8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint5(imm8));  // illegal shift count
+  if (imm8 == 1) {
+    EMIT(0xD1);
+    EMIT(0xE0 | dst.code());
+  } else {
+    EMIT(0xC1);
+    EMIT(0xE0 | dst.code());
+    EMIT(imm8);
+  }
+}
+
+
+void Assembler::shl(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD3);
+  EMIT(0xE0 | dst.code());
+}
+
+
+void Assembler::shrd(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xAD);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::shr(Register dst, uint8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint5(imm8));  // illegal shift count
+  EMIT(0xC1);
+  EMIT(0xE8 | dst.code());
+  EMIT(imm8);
+}
+
+
+void Assembler::shr(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD3);
+  EMIT(0xE8 | dst.code());
+}
+
+
+void Assembler::shr_cl(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD1);
+  EMIT(0xE8 | dst.code());
+}
+
+
+void Assembler::sub(const Operand& dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(5, dst, x);
+}
+
+
+void Assembler::sub(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x2B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::sub(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x29);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::test(Register reg, const Immediate& imm) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Only use test against byte for registers that have a byte
+  // variant: eax, ebx, ecx, and edx.
+  if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) {
+    uint8_t imm8 = imm.x_;
+    if (reg.is(eax)) {
+      EMIT(0xA8);
+      EMIT(imm8);
+    } else {
+      emit_arith_b(0xF6, 0xC0, reg, imm8);
+    }
+  } else {
+    // This is not using emit_arith because test doesn't support
+    // sign-extension of 8-bit operands.
+    if (reg.is(eax)) {
+      EMIT(0xA9);
+    } else {
+      EMIT(0xF7);
+      EMIT(0xC0 | reg.code());
+    }
+    emit(imm);
+  }
+}
+
+
+void Assembler::test(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x85);
+  emit_operand(reg, op);
+}
+
+
+void Assembler::test(const Operand& op, const Immediate& imm) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF7);
+  emit_operand(eax, op);
+  emit(imm);
+}
+
+
+void Assembler::xor_(Register dst, int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(6, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::xor_(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x33);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::xor_(const Operand& src, Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x31);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::xor_(const Operand& dst, const Immediate& x) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(6, dst, x);
+}
+
+
+void Assembler::bt(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xA3);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::bts(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0xAB);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::hlt() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF4);
+}
+
+
+void Assembler::int3() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xCC);
+}
+
+
+void Assembler::nop() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x90);
+}
+
+
+void Assembler::rdtsc() {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::RDTSC));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0x31);
+}
+
+
+void Assembler::ret(int imm16) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint16(imm16));
+  if (imm16 == 0) {
+    EMIT(0xC3);
+  } else {
+    EMIT(0xC2);
+    EMIT(imm16 & 0xFF);
+    EMIT((imm16 >> 8) & 0xFF);
+  }
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the 32bit
+// Displacement of the last instruction using the label.
+
+
+void Assembler::print(Label* L) {
+  if (L->is_unused()) {
+    PrintF("unused label\n");
+  } else if (L->is_bound()) {
+    PrintF("bound label to %d\n", L->pos());
+  } else if (L->is_linked()) {
+    Label l = *L;
+    PrintF("unbound label");
+    while (l.is_linked()) {
+      Displacement disp = disp_at(&l);
+      PrintF("@ %d ", l.pos());
+      disp.print();
+      PrintF("\n");
+      disp.next(&l);
+    }
+  } else {
+    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+  }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = NULL;
+  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  while (L->is_linked()) {
+    Displacement disp = disp_at(L);
+    int fixup_pos = L->pos();
+    if (disp.type() == Displacement::CODE_RELATIVE) {
+      // Relative to Code* heap object pointer.
+      long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
+    } else {
+      if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
+        ASSERT(byte_at(fixup_pos - 1) == 0xE9);  // jmp expected
+      }
+      // relative address, relative to point after address
+      int imm32 = pos - (fixup_pos + sizeof(int32_t));
+      long_at_put(fixup_pos, imm32);
+    }
+    disp.next(L);
+  }
+  L->bind_to(pos);
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = NULL;
+  if (appendix->is_linked()) {
+    if (L->is_linked()) {
+      // append appendix to L's list
+      Label p;
+      Label q = *L;
+      do {
+        p = q;
+        Displacement disp = disp_at(&q);
+        disp.next(&q);
+      } while (q.is_linked());
+      Displacement disp = disp_at(&p);
+      disp.link_to(appendix);
+      disp_at_put(&p, disp);
+      p.Unuse();  // to avoid assertion failure in ~Label
+    } else {
+      // L is empty, simply use appendix
+      *L = *appendix;
+    }
+  }
+  appendix->Unuse();  // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = NULL;
+  ASSERT(!L->is_bound());  // label can only be bound once
+  bind_to(L, pc_offset());
+}
+
+
+void Assembler::call(Label* L) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (L->is_bound()) {
+    const int long_size = 5;
+    int offs = L->pos() - pc_offset();
+    ASSERT(offs <= 0);
+    // 1110 1000 #32-bit disp
+    EMIT(0xE8);
+    emit(offs - long_size);
+  } else {
+    // 1110 1000 #32-bit disp
+    EMIT(0xE8);
+    emit_disp(L, Displacement::OTHER);
+  }
+}
+
+
+void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  EMIT(0xE8);
+  emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::call(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFF);
+  emit_operand(edx, adr);
+}
+
+
+void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
+  WriteRecordedPositions();
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  EMIT(0xE8);
+  emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+void Assembler::jmp(Label* L) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (L->is_bound()) {
+    const int short_size = 2;
+    const int long_size  = 5;
+    int offs = L->pos() - pc_offset();
+    ASSERT(offs <= 0);
+    if (is_int8(offs - short_size)) {
+      // 1110 1011 #8-bit disp
+      EMIT(0xEB);
+      EMIT((offs - short_size) & 0xFF);
+    } else {
+      // 1110 1001 #32-bit disp
+      EMIT(0xE9);
+      emit(offs - long_size);
+    }
+  } else {
+    // 1110 1001 #32-bit disp
+    EMIT(0xE9);
+    emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
+  }
+}
+
+
+void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  EMIT(0xE9);
+  emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::jmp(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFF);
+  emit_operand(esp, adr);
+}
+
+
+void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  EMIT(0xE9);
+  emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+
+void Assembler::j(Condition cc, Label* L, Hint hint) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(0 <= cc && cc < 16);
+  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+  if (L->is_bound()) {
+    const int short_size = 2;
+    const int long_size  = 6;
+    int offs = L->pos() - pc_offset();
+    ASSERT(offs <= 0);
+    if (is_int8(offs - short_size)) {
+      // 0111 tttn #8-bit disp
+      EMIT(0x70 | cc);
+      EMIT((offs - short_size) & 0xFF);
+    } else {
+      // 0000 1111 1000 tttn #32-bit disp
+      EMIT(0x0F);
+      EMIT(0x80 | cc);
+      emit(offs - long_size);
+    }
+  } else {
+    // 0000 1111 1000 tttn #32-bit disp
+    // Note: could eliminate cond. jumps to this jump if condition
+    //       is the same however, seems to be rather unlikely case.
+    EMIT(0x0F);
+    EMIT(0x80 | cc);
+    emit_disp(L, Displacement::OTHER);
+  }
+}
+
+
+void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT((0 <= cc) && (cc < 16));
+  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+  // 0000 1111 1000 tttn #32-bit disp
+  EMIT(0x0F);
+  EMIT(0x80 | cc);
+  emit(entry - (pc_ + sizeof(int32_t)), rmode);
+}
+
+
+void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+  // 0000 1111 1000 tttn #32-bit disp
+  EMIT(0x0F);
+  EMIT(0x80 | cc);
+  emit(reinterpret_cast<intptr_t>(code.location()), RelocInfo::CODE_TARGET);
+}
+
+
+// FPU instructions
+
+
+void Assembler::fld(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xD9, 0xC0, i);
+}
+
+
+void Assembler::fld1() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xE8);
+}
+
+
+void Assembler::fldz() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xEE);
+}
+
+
+void Assembler::fld_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  emit_operand(eax, adr);
+}
+
+
+void Assembler::fld_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDD);
+  emit_operand(eax, adr);
+}
+
+
+void Assembler::fstp_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  emit_operand(ebx, adr);
+}
+
+
+void Assembler::fstp_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDD);
+  emit_operand(ebx, adr);
+}
+
+
+void Assembler::fild_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  emit_operand(eax, adr);
+}
+
+
+void Assembler::fild_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDF);
+  emit_operand(ebp, adr);
+}
+
+
+void Assembler::fistp_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  emit_operand(ebx, adr);
+}
+
+
+void Assembler::fisttp_s(const Operand& adr) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  emit_operand(ecx, adr);
+}
+
+
+void Assembler::fist_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  emit_operand(edx, adr);
+}
+
+
+void Assembler::fistp_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDF);
+  emit_operand(edi, adr);
+}
+
+
+void Assembler::fabs() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xE1);
+}
+
+
+void Assembler::fchs() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xE0);
+}
+
+
+void Assembler::fcos() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xFF);
+}
+
+
+void Assembler::fsin() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xFE);
+}
+
+
+void Assembler::fadd(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xC0, i);
+}
+
+
+void Assembler::fsub(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xE8, i);
+}
+
+
+void Assembler::fisub_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDA);
+  emit_operand(esp, adr);
+}
+
+
+void Assembler::fmul(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xC8, i);
+}
+
+
+void Assembler::fdiv(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xF8, i);
+}
+
+
+void Assembler::faddp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xC0, i);
+}
+
+
+void Assembler::fsubp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xE8, i);
+}
+
+
+void Assembler::fsubrp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xE0, i);
+}
+
+
+void Assembler::fmulp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xC8, i);
+}
+
+
+void Assembler::fdivp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xF8, i);
+}
+
+
+void Assembler::fprem() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xF8);
+}
+
+
+void Assembler::fprem1() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xF5);
+}
+
+
+void Assembler::fxch(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xD9, 0xC8, i);
+}
+
+
+void Assembler::fincstp() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xF7);
+}
+
+
+void Assembler::ffree(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDD, 0xC0, i);
+}
+
+
+void Assembler::ftst() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xE4);
+}
+
+
+void Assembler::fucomp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDD, 0xE8, i);
+}
+
+
+void Assembler::fucompp() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDA);
+  EMIT(0xE9);
+}
+
+
+void Assembler::fcompp() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDE);
+  EMIT(0xD9);
+}
+
+
+void Assembler::fnstsw_ax() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDF);
+  EMIT(0xE0);
+}
+
+
+void Assembler::fwait() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x9B);
+}
+
+
+void Assembler::frndint() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xFC);
+}
+
+
+void Assembler::fnclex() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  EMIT(0xE2);
+}
+
+
+void Assembler::sahf() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x9E);
+}
+
+
+void Assembler::setcc(Condition cc, Register reg) {
+  ASSERT(reg.is_byte_register());
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0x90 | cc);
+  EMIT(0xC0 | reg.code());
+}
+
+
+void Assembler::cvttss2si(Register dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0x2C);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::cvttsd2si(Register dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x2C);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x2A);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x58);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x59);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x5C);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x5E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::comisd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x2F);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movdbl(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  movsd(dst, src);
+}
+
+
+void Assembler::movdbl(const Operand& dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  movsd(dst, src);
+}
+
+
+void Assembler::movsd(const Operand& dst, XMMRegister src ) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);  // double
+  EMIT(0x0F);
+  EMIT(0x11);  // store
+  emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movsd(XMMRegister dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);  // double
+  EMIT(0x0F);
+  EMIT(0x10);  // load
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
+  Register ireg = { reg.code() };
+  emit_operand(ireg, adr);
+}
+
+
+void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
+  EMIT(0xC0 | dst.code() << 3 | src.code());
+}
+
+
+void Assembler::Print() {
+  Disassembler::Decode(stdout, buffer_, pc_);
+}
+
+
+void Assembler::RecordJSReturn() {
+  WriteRecordedPositions();
+  EnsureSpace ensure_space(this);
+  RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+  if (FLAG_debug_code) {
+    EnsureSpace ensure_space(this);
+    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+  }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+  ASSERT(pos != RelocInfo::kNoPosition);
+  ASSERT(pos >= 0);
+  current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+  ASSERT(pos != RelocInfo::kNoPosition);
+  ASSERT(pos >= 0);
+  current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+  // Write the statement position if it is different from what was written last
+  // time.
+  if (current_statement_position_ != written_statement_position_) {
+    EnsureSpace ensure_space(this);
+    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+    written_statement_position_ = current_statement_position_;
+  }
+
+  // Write the position if it is different from what was written last time and
+  // also different from the written statement position.
+  if (current_position_ != written_position_ &&
+      current_position_ != written_statement_position_) {
+    EnsureSpace ensure_space(this);
+    RecordRelocInfo(RelocInfo::POSITION, current_position_);
+    written_position_ = current_position_;
+  }
+}
+
+
+void Assembler::GrowBuffer() {
+  ASSERT(overflow());  // should not call this otherwise
+  if (!own_buffer_) FATAL("external code buffer is too small");
+
+  // compute new buffer size
+  CodeDesc desc;  // the new buffer
+  if (buffer_size_ < 4*KB) {
+    desc.buffer_size = 4*KB;
+  } else {
+    desc.buffer_size = 2*buffer_size_;
+  }
+  // Some internal data structures overflow for very large buffers,
+  // they must ensure that kMaximalBufferSize is not too large.
+  if ((desc.buffer_size > kMaximalBufferSize) ||
+      (desc.buffer_size > Heap::OldGenerationSize())) {
+    V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+  }
+
+  // setup new buffer
+  desc.buffer = NewArray<byte>(desc.buffer_size);
+  desc.instr_size = pc_offset();
+  desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+
+  // Clear the buffer in debug mode. Use 'int3' instructions to make
+  // sure to get into problems if we ever run uninitialized code.
+#ifdef DEBUG
+  memset(desc.buffer, 0xCC, desc.buffer_size);
+#endif
+
+  // copy the data
+  int pc_delta = desc.buffer - buffer_;
+  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+  memmove(desc.buffer, buffer_, desc.instr_size);
+  memmove(rc_delta + reloc_info_writer.pos(),
+          reloc_info_writer.pos(), desc.reloc_size);
+
+  // switch buffers
+  if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+    spare_buffer_ = buffer_;
+  } else {
+    DeleteArray(buffer_);
+  }
+  buffer_ = desc.buffer;
+  buffer_size_ = desc.buffer_size;
+  pc_ += pc_delta;
+  if (last_pc_ != NULL) {
+    last_pc_ += pc_delta;
+  }
+  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+                               reloc_info_writer.last_pc() + pc_delta);
+
+  // relocate runtime entries
+  for (RelocIterator it(desc); !it.done(); it.next()) {
+    RelocInfo::Mode rmode = it.rinfo()->rmode();
+    if (rmode == RelocInfo::RUNTIME_ENTRY) {
+      int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+      *p -= pc_delta;  // relocate entry
+    } else if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+      int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+      if (*p != 0) {  // 0 means uninitialized.
+        *p += pc_delta;
+      }
+    }
+  }
+
+  ASSERT(!overflow());
+}
+
+
+void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
+  ASSERT(is_uint8(op1) && is_uint8(op2));  // wrong opcode
+  ASSERT(is_uint8(imm8));
+  ASSERT((op1 & 0x01) == 0);  // should be 8bit operation
+  EMIT(op1);
+  EMIT(op2 | dst.code());
+  EMIT(imm8);
+}
+
+
+void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
+  ASSERT((0 <= sel) && (sel <= 7));
+  Register ireg = { sel };
+  if (x.is_int8()) {
+    EMIT(0x83);  // using a sign-extended 8-bit immediate.
+    emit_operand(ireg, dst);
+    EMIT(x.x_ & 0xFF);
+  } else if (dst.is_reg(eax)) {
+    EMIT((sel << 3) | 0x05);  // short form if the destination is eax.
+    emit(x);
+  } else {
+    EMIT(0x81);  // using a literal 32-bit immediate.
+    emit_operand(ireg, dst);
+    emit(x);
+  }
+}
+
+
+void Assembler::emit_operand(Register reg, const Operand& adr) {
+  const unsigned length = adr.len_;
+  ASSERT(length > 0);
+
+  // Emit updated ModRM byte containing the given register.
+  pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
+
+  // Emit the rest of the encoded operand.
+  for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
+  pc_ += length;
+
+  // Emit relocation information if necessary.
+  if (length >= sizeof(int32_t) && adr.rmode_ != RelocInfo::NONE) {
+    pc_ -= sizeof(int32_t);  // pc_ must be *at* disp32
+    RecordRelocInfo(adr.rmode_);
+    pc_ += sizeof(int32_t);
+  }
+}
+
+
+void Assembler::emit_farith(int b1, int b2, int i) {
+  ASSERT(is_uint8(b1) && is_uint8(b2));  // wrong opcode
+  ASSERT(0 <= i &&  i < 8);  // illegal stack offset
+  EMIT(b1);
+  EMIT(b2 + i);
+}
+
+
+void Assembler::dd(uint32_t data, RelocInfo::Mode reloc_info) {
+  EnsureSpace ensure_space(this);
+  emit(data, reloc_info);
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+  ASSERT(rmode != RelocInfo::NONE);
+  // Don't record external references unless the heap will be serialized.
+  if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+      !Serializer::enabled() &&
+      !FLAG_debug_code) {
+    return;
+  }
+  RelocInfo rinfo(pc_, rmode, data);
+  reloc_info_writer.Write(&rinfo);
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitCoverageLog() {
+  char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+  if (file_name != NULL) {
+    coverage_log = fopen(file_name, "aw+");
+  }
+}
+
+
+void LogGeneratedCodeCoverage(const char* file_line) {
+  const char* return_address = (&file_line)[-1];
+  char* push_insn = const_cast<char*>(return_address - 12);
+  push_insn[0] = 0xeb;  // Relative branch insn.
+  push_insn[1] = 13;    // Skip over coverage insns.
+  if (coverage_log != NULL) {
+    fprintf(coverage_log, "%s\n", file_line);
+    fflush(coverage_log);
+  }
+}
+
+#endif
+
+} }  // namespace v8::internal
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
new file mode 100644
index 0000000..610017b
--- /dev/null
+++ b/src/ia32/assembler-ia32.h
@@ -0,0 +1,877 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_IA32_ASSEMBLER_IA32_H_
+#define V8_IA32_ASSEMBLER_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+struct Register {
+  bool is_valid() const  { return 0 <= code_ && code_ < 8; }
+  bool is(Register reg) const  { return code_ == reg.code_; }
+  // eax, ebx, ecx and edx are byte registers, the rest are not.
+  bool is_byte_register() const  { return code_ <= 3; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  // (unfortunately we can't make this private in a struct)
+  int code_;
+};
+
+const Register eax = { 0 };
+const Register ecx = { 1 };
+const Register edx = { 2 };
+const Register ebx = { 3 };
+const Register esp = { 4 };
+const Register ebp = { 5 };
+const Register esi = { 6 };
+const Register edi = { 7 };
+const Register no_reg = { -1 };
+
+
+struct XMMRegister {
+  bool is_valid() const  { return 0 <= code_ && code_ < 2; }  // currently
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+
+  int code_;
+};
+
+const XMMRegister xmm0 = { 0 };
+const XMMRegister xmm1 = { 1 };
+const XMMRegister xmm2 = { 2 };
+const XMMRegister xmm3 = { 3 };
+const XMMRegister xmm4 = { 4 };
+const XMMRegister xmm5 = { 5 };
+const XMMRegister xmm6 = { 6 };
+const XMMRegister xmm7 = { 7 };
+
+enum Condition {
+  // any value < 0 is considered no_condition
+  no_condition  = -1,
+
+  overflow      =  0,
+  no_overflow   =  1,
+  below         =  2,
+  above_equal   =  3,
+  equal         =  4,
+  not_equal     =  5,
+  below_equal   =  6,
+  above         =  7,
+  negative      =  8,
+  positive      =  9,
+  parity_even   = 10,
+  parity_odd    = 11,
+  less          = 12,
+  greater_equal = 13,
+  less_equal    = 14,
+  greater       = 15,
+
+  // aliases
+  carry         = below,
+  not_carry     = above_equal,
+  zero          = equal,
+  not_zero      = not_equal,
+  sign          = negative,
+  not_sign      = positive
+};
+
+
+// Returns the equivalent of !cc.
+// Negation of the default no_condition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc);
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+  switch (cc) {
+    case below:
+      return above;
+    case above:
+      return below;
+    case above_equal:
+      return below_equal;
+    case below_equal:
+      return above_equal;
+    case less:
+      return greater;
+    case greater:
+      return less;
+    case greater_equal:
+      return less_equal;
+    case less_equal:
+      return greater_equal;
+    default:
+      return cc;
+  };
+}
+
+enum Hint {
+  no_hint = 0,
+  not_taken = 0x2e,
+  taken = 0x3e
+};
+
+// The result of negating a hint is as if the corresponding condition
+// were negated by NegateCondition.  That is, no_hint is mapped to
+// itself and not_taken and taken are mapped to each other.
+inline Hint NegateHint(Hint hint) {
+  return (hint == no_hint)
+      ? no_hint
+      : ((hint == not_taken) ? taken : not_taken);
+}
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Immediates
+
+class Immediate BASE_EMBEDDED {
+ public:
+  inline explicit Immediate(int x);
+  inline explicit Immediate(const char* s);
+  inline explicit Immediate(const ExternalReference& ext);
+  inline explicit Immediate(Handle<Object> handle);
+  inline explicit Immediate(Smi* value);
+
+  static Immediate CodeRelativeOffset(Label* label) {
+    return Immediate(label);
+  }
+
+  bool is_zero() const { return x_ == 0 && rmode_ == RelocInfo::NONE; }
+  bool is_int8() const {
+    return -128 <= x_ && x_ < 128 && rmode_ == RelocInfo::NONE;
+  }
+  bool is_int16() const {
+    return -32768 <= x_ && x_ < 32768 && rmode_ == RelocInfo::NONE;
+  }
+
+ private:
+  inline explicit Immediate(Label* value);
+
+  int x_;
+  RelocInfo::Mode rmode_;
+
+  friend class Assembler;
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+enum ScaleFactor {
+  times_1 = 0,
+  times_2 = 1,
+  times_4 = 2,
+  times_8 = 3,
+  times_pointer_size = times_4,
+  times_half_pointer_size = times_2
+};
+
+
+class Operand BASE_EMBEDDED {
+ public:
+  // reg
+  INLINE(explicit Operand(Register reg));
+
+  // [disp/r]
+  INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
+  // disp only must always be relocated
+
+  // [base + disp/r]
+  explicit Operand(Register base, int32_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  // [base + index*scale + disp/r]
+  explicit Operand(Register base,
+                   Register index,
+                   ScaleFactor scale,
+                   int32_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  // [index*scale + disp/r]
+  explicit Operand(Register index,
+                   ScaleFactor scale,
+                   int32_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  static Operand StaticVariable(const ExternalReference& ext) {
+    return Operand(reinterpret_cast<int32_t>(ext.address()),
+                   RelocInfo::EXTERNAL_REFERENCE);
+  }
+
+  static Operand StaticArray(Register index,
+                             ScaleFactor scale,
+                             const ExternalReference& arr) {
+    return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
+                   RelocInfo::EXTERNAL_REFERENCE);
+  }
+
+  // Returns true if this Operand is a wrapper for the specified register.
+  bool is_reg(Register reg) const;
+
+ private:
+  byte buf_[6];
+  // The number of bytes in buf_.
+  unsigned int len_;
+  // Only valid if len_ > 4.
+  RelocInfo::Mode rmode_;
+
+  // Set the ModRM byte without an encoded 'reg' register. The
+  // register is encoded later as part of the emit_operand operation.
+  inline void set_modrm(int mod, Register rm);
+
+  inline void set_sib(ScaleFactor scale, Register index, Register base);
+  inline void set_disp8(int8_t disp);
+  inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
+
+  friend class Assembler;
+};
+
+
+// -----------------------------------------------------------------------------
+// A Displacement describes the 32bit immediate field of an instruction which
+// may be used together with a Label in order to refer to a yet unknown code
+// position. Displacements stored in the instruction stream are used to describe
+// the instruction and to chain a list of instructions using the same Label.
+// A Displacement contains 2 different fields:
+//
+// next field: position of next displacement in the chain (0 = end of list)
+// type field: instruction type
+//
+// A next value of null (0) indicates the end of a chain (note that there can
+// be no displacement at position zero, because there is always at least one
+// instruction byte before the displacement).
+//
+// Displacement _data field layout
+//
+// |31.....2|1......0|
+// [  next  |  type  |
+
+class Displacement BASE_EMBEDDED {
+ public:
+  enum Type {
+    UNCONDITIONAL_JUMP,
+    CODE_RELATIVE,
+    OTHER
+  };
+
+  int data() const { return data_; }
+  Type type() const { return TypeField::decode(data_); }
+  void next(Label* L) const {
+    int n = NextField::decode(data_);
+    n > 0 ? L->link_to(n) : L->Unuse();
+  }
+  void link_to(Label* L) { init(L, type()); }
+
+  explicit Displacement(int data) { data_ = data; }
+
+  Displacement(Label* L, Type type) { init(L, type); }
+
+  void print() {
+    PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
+                       NextField::decode(data_));
+  }
+
+ private:
+  int data_;
+
+  class TypeField: public BitField<Type, 0, 2> {};
+  class NextField: public BitField<int,  2, 32-2> {};
+
+  void init(Label* L, Type type);
+};
+
+
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+// Example:
+//   if (CpuFeatures::IsSupported(SSE2)) {
+//     CpuFeatures::Scope fscope(SSE2);
+//     // Generate SSE2 floating point code.
+//   } else {
+//     // Generate standard x87 floating point code.
+//   }
+class CpuFeatures : public AllStatic {
+ public:
+  // Feature flags bit positions. They are mostly based on the CPUID spec.
+  // (We assign CPUID itself to one of the currently reserved bits --
+  // feel free to change this if needed.)
+  enum Feature { SSE3 = 32, SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 };
+  // Detect features of the target CPU. Set safe defaults if the serializer
+  // is enabled (snapshots must be portable).
+  static void Probe();
+  // Check whether a feature is supported by the target CPU.
+  static bool IsSupported(Feature f) {
+    return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+  }
+  // Check whether a feature is currently enabled.
+  static bool IsEnabled(Feature f) {
+    return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+  }
+  // Enable a specified feature within a scope.
+  class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+   public:
+    explicit Scope(Feature f) {
+      ASSERT(CpuFeatures::IsSupported(f));
+      old_enabled_ = CpuFeatures::enabled_;
+      CpuFeatures::enabled_ |= (static_cast<uint64_t>(1) << f);
+    }
+    ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+   private:
+    uint64_t old_enabled_;
+#else
+   public:
+    explicit Scope(Feature f) {}
+#endif
+  };
+ private:
+  static uint64_t supported_;
+  static uint64_t enabled_;
+};
+
+
+class Assembler : public Malloced {
+ private:
+  // We check before assembling an instruction that there is sufficient
+  // space to write an instruction and its relocation information.
+  // The relocation writer's position must be kGap bytes above the end of
+  // the generated instructions. This leaves enough space for the
+  // longest possible ia32 instruction, 15 bytes, and the longest possible
+  // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
+  // (There is a 15 byte limit on ia32 instruction length that rules out some
+  // otherwise valid instructions.)
+  // This allows for a single, fast space check per instruction.
+  static const int kGap = 32;
+
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  Assembler(void* buffer, int buffer_size);
+  ~Assembler();
+
+  // GetCode emits any pending (non-emitted) code and fills the descriptor
+  // desc. GetCode() is idempotent; it returns the same result if no other
+  // Assembler functions are invoked in between GetCode() calls.
+  void GetCode(CodeDesc* desc);
+
+  // Read/Modify the code target in the branch/call instruction at pc.
+  inline static Address target_address_at(Address pc);
+  inline static void set_target_address_at(Address pc, Address target);
+
+  // Distance between the address of the code target in the call instruction
+  // and the return address
+  static const int kCallTargetAddressOffset = kPointerSize;
+  // Distance between start of patched return sequence and the emitted address
+  // to jump to.
+  static const int kPatchReturnSequenceAddressOffset = 1;  // JMP imm32.
+
+
+  // ---------------------------------------------------------------------------
+  // Code generation
+  //
+  // - function names correspond one-to-one to ia32 instruction mnemonics
+  // - unless specified otherwise, instructions operate on 32bit operands
+  // - instructions on 8bit (byte) operands/registers have a trailing '_b'
+  // - instructions on 16bit (word) operands/registers have a trailing '_w'
+  // - naming conflicts with C++ keywords are resolved via a trailing '_'
+
+  // NOTE ON INTERFACE: Currently, the interface is not very consistent
+  // in the sense that some operations (e.g. mov()) can be called in more
+  // the one way to generate the same instruction: The Register argument
+  // can in some cases be replaced with an Operand(Register) argument.
+  // This should be cleaned up and made more orthogonal. The questions
+  // is: should we always use Operands instead of Registers where an
+  // Operand is possible, or should we have a Register (overloaded) form
+  // instead? We must be careful to make sure that the selected instruction
+  // is obvious from the parameters to avoid hard-to-find code generation
+  // bugs.
+
+  // Insert the smallest number of nop instructions
+  // possible to align the pc offset to a multiple
+  // of m. m must be a power of 2.
+  void Align(int m);
+
+  // Stack
+  void pushad();
+  void popad();
+
+  void pushfd();
+  void popfd();
+
+  void push(const Immediate& x);
+  void push(Register src);
+  void push(const Operand& src);
+  void push(Label* label, RelocInfo::Mode relocation_mode);
+
+  void pop(Register dst);
+  void pop(const Operand& dst);
+
+  void enter(const Immediate& size);
+  void leave();
+
+  // Moves
+  void mov_b(Register dst, const Operand& src);
+  void mov_b(const Operand& dst, int8_t imm8);
+  void mov_b(const Operand& dst, Register src);
+
+  void mov_w(Register dst, const Operand& src);
+  void mov_w(const Operand& dst, Register src);
+
+  void mov(Register dst, int32_t imm32);
+  void mov(Register dst, const Immediate& x);
+  void mov(Register dst, Handle<Object> handle);
+  void mov(Register dst, const Operand& src);
+  void mov(Register dst, Register src);
+  void mov(const Operand& dst, const Immediate& x);
+  void mov(const Operand& dst, Handle<Object> handle);
+  void mov(const Operand& dst, Register src);
+
+  void movsx_b(Register dst, const Operand& src);
+
+  void movsx_w(Register dst, const Operand& src);
+
+  void movzx_b(Register dst, const Operand& src);
+
+  void movzx_w(Register dst, const Operand& src);
+
+  // Conditional moves
+  void cmov(Condition cc, Register dst, int32_t imm32);
+  void cmov(Condition cc, Register dst, Handle<Object> handle);
+  void cmov(Condition cc, Register dst, const Operand& src);
+
+  // Exchange two registers
+  void xchg(Register dst, Register src);
+
+  // Arithmetics
+  void adc(Register dst, int32_t imm32);
+  void adc(Register dst, const Operand& src);
+
+  void add(Register dst, const Operand& src);
+  void add(const Operand& dst, const Immediate& x);
+
+  void and_(Register dst, int32_t imm32);
+  void and_(Register dst, const Operand& src);
+  void and_(const Operand& src, Register dst);
+  void and_(const Operand& dst, const Immediate& x);
+
+  void cmpb(const Operand& op, int8_t imm8);
+  void cmpb_al(const Operand& op);
+  void cmpw_ax(const Operand& op);
+  void cmpw(const Operand& op, Immediate imm16);
+  void cmp(Register reg, int32_t imm32);
+  void cmp(Register reg, Handle<Object> handle);
+  void cmp(Register reg, const Operand& op);
+  void cmp(const Operand& op, const Immediate& imm);
+  void cmp(const Operand& op, Handle<Object> handle);
+
+  void dec_b(Register dst);
+
+  void dec(Register dst);
+  void dec(const Operand& dst);
+
+  void cdq();
+
+  void idiv(Register src);
+
+  // Signed multiply instructions.
+  void imul(Register src);                               // edx:eax = eax * src.
+  void imul(Register dst, const Operand& src);           // dst = dst * src.
+  void imul(Register dst, Register src, int32_t imm32);  // dst = src * imm32.
+
+  void inc(Register dst);
+  void inc(const Operand& dst);
+
+  void lea(Register dst, const Operand& src);
+
+  // Unsigned multiply instruction.
+  void mul(Register src);                                // edx:eax = eax * reg.
+
+  void neg(Register dst);
+
+  void not_(Register dst);
+
+  void or_(Register dst, int32_t imm32);
+  void or_(Register dst, const Operand& src);
+  void or_(const Operand& dst, Register src);
+  void or_(const Operand& dst, const Immediate& x);
+
+  void rcl(Register dst, uint8_t imm8);
+
+  void sar(Register dst, uint8_t imm8);
+  void sar(Register dst);
+
+  void sbb(Register dst, const Operand& src);
+
+  void shld(Register dst, const Operand& src);
+
+  void shl(Register dst, uint8_t imm8);
+  void shl(Register dst);
+
+  void shrd(Register dst, const Operand& src);
+
+  void shr(Register dst, uint8_t imm8);
+  void shr(Register dst);
+  void shr_cl(Register dst);
+
+  void sub(const Operand& dst, const Immediate& x);
+  void sub(Register dst, const Operand& src);
+  void sub(const Operand& dst, Register src);
+
+  void test(Register reg, const Immediate& imm);
+  void test(Register reg, const Operand& op);
+  void test(const Operand& op, const Immediate& imm);
+
+  void xor_(Register dst, int32_t imm32);
+  void xor_(Register dst, const Operand& src);
+  void xor_(const Operand& src, Register dst);
+  void xor_(const Operand& dst, const Immediate& x);
+
+  // Bit operations.
+  void bt(const Operand& dst, Register src);
+  void bts(const Operand& dst, Register src);
+
+  // Miscellaneous
+  void hlt();
+  void int3();
+  void nop();
+  void rdtsc();
+  void ret(int imm16);
+
+  // Label operations & relative jumps (PPUM Appendix D)
+  //
+  // Takes a branch opcode (cc) and a label (L) and generates
+  // either a backward branch or a forward branch and links it
+  // to the label fixup chain. Usage:
+  //
+  // Label L;    // unbound label
+  // j(cc, &L);  // forward branch to unbound label
+  // bind(&L);   // bind label to the current pc
+  // j(cc, &L);  // backward branch to bound label
+  // bind(&L);   // illegal: a label may be bound only once
+  //
+  // Note: The same Label can be used for forward and backward branches
+  // but it may be bound only once.
+
+  void bind(Label* L);  // binds an unbound label L to the current code position
+
+  // Calls
+  void call(Label* L);
+  void call(byte* entry, RelocInfo::Mode rmode);
+  void call(const Operand& adr);
+  void call(Handle<Code> code, RelocInfo::Mode rmode);
+
+  // Jumps
+  void jmp(Label* L);  // unconditional jump to L
+  void jmp(byte* entry, RelocInfo::Mode rmode);
+  void jmp(const Operand& adr);
+  void jmp(Handle<Code> code, RelocInfo::Mode rmode);
+
+  // Conditional jumps
+  void j(Condition cc, Label* L, Hint hint = no_hint);
+  void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
+  void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
+
+  // Floating-point operations
+  void fld(int i);
+
+  void fld1();
+  void fldz();
+
+  void fld_s(const Operand& adr);
+  void fld_d(const Operand& adr);
+
+  void fstp_s(const Operand& adr);
+  void fstp_d(const Operand& adr);
+
+  void fild_s(const Operand& adr);
+  void fild_d(const Operand& adr);
+
+  void fist_s(const Operand& adr);
+
+  void fistp_s(const Operand& adr);
+  void fistp_d(const Operand& adr);
+
+  void fisttp_s(const Operand& adr);
+
+  void fabs();
+  void fchs();
+  void fcos();
+  void fsin();
+
+  void fadd(int i);
+  void fsub(int i);
+  void fmul(int i);
+  void fdiv(int i);
+
+  void fisub_s(const Operand& adr);
+
+  void faddp(int i = 1);
+  void fsubp(int i = 1);
+  void fsubrp(int i = 1);
+  void fmulp(int i = 1);
+  void fdivp(int i = 1);
+  void fprem();
+  void fprem1();
+
+  void fxch(int i = 1);
+  void fincstp();
+  void ffree(int i = 0);
+
+  void ftst();
+  void fucomp(int i);
+  void fucompp();
+  void fcompp();
+  void fnstsw_ax();
+  void fwait();
+  void fnclex();
+
+  void frndint();
+
+  void sahf();
+  void setcc(Condition cc, Register reg);
+
+  void cpuid();
+
+  // SSE2 instructions
+  void cvttss2si(Register dst, const Operand& src);
+  void cvttsd2si(Register dst, const Operand& src);
+
+  void cvtsi2sd(XMMRegister dst, const Operand& src);
+
+  void addsd(XMMRegister dst, XMMRegister src);
+  void subsd(XMMRegister dst, XMMRegister src);
+  void mulsd(XMMRegister dst, XMMRegister src);
+  void divsd(XMMRegister dst, XMMRegister src);
+
+  void comisd(XMMRegister dst, XMMRegister src);
+
+  // Use either movsd or movlpd.
+  void movdbl(XMMRegister dst, const Operand& src);
+  void movdbl(const Operand& dst, XMMRegister src);
+
+  // Debugging
+  void Print();
+
+  // Check the code size generated from label to here.
+  int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+
+  // Mark address of the ExitJSFrame code.
+  void RecordJSReturn();
+
+  // Record a comment relocation entry that can be used by a disassembler.
+  // Use --debug_code to enable.
+  void RecordComment(const char* msg);
+
+  void RecordPosition(int pos);
+  void RecordStatementPosition(int pos);
+  void WriteRecordedPositions();
+
+  // Writes a single word of data in the code stream.
+  // Used for inline tables, e.g., jump-tables.
+  void dd(uint32_t data, RelocInfo::Mode reloc_info);
+
+  int pc_offset() const  { return pc_ - buffer_; }
+  int current_statement_position() const { return current_statement_position_; }
+  int current_position() const  { return current_position_; }
+
+  // Check if there is less than kGap bytes available in the buffer.
+  // If this is the case, we need to grow the buffer before emitting
+  // an instruction or relocation information.
+  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+  // Get the number of bytes available in the buffer.
+  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Avoid overflows for displacements etc.
+  static const int kMaximalBufferSize = 512*MB;
+  static const int kMinimalBufferSize = 4*KB;
+
+ protected:
+  void movsd(XMMRegister dst, const Operand& src);
+  void movsd(const Operand& dst, XMMRegister src);
+
+  void emit_sse_operand(XMMRegister reg, const Operand& adr);
+  void emit_sse_operand(XMMRegister dst, XMMRegister src);
+
+
+ private:
+  byte* addr_at(int pos)  { return buffer_ + pos; }
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  uint32_t long_at(int pos)  {
+    return *reinterpret_cast<uint32_t*>(addr_at(pos));
+  }
+  void long_at_put(int pos, uint32_t x)  {
+    *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
+  }
+
+  // code emission
+  void GrowBuffer();
+  inline void emit(uint32_t x);
+  inline void emit(Handle<Object> handle);
+  inline void emit(uint32_t x, RelocInfo::Mode rmode);
+  inline void emit(const Immediate& x);
+  inline void emit_w(const Immediate& x);
+
+  // Emit the code-object-relative offset of the label's position
+  inline void emit_code_relative_offset(Label* label);
+
+  // instruction generation
+  void emit_arith_b(int op1, int op2, Register dst, int imm8);
+
+  // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
+  // with a given destination expression and an immediate operand.  It attempts
+  // to use the shortest encoding possible.
+  // sel specifies the /n in the modrm byte (see the Intel PRM).
+  void emit_arith(int sel, Operand dst, const Immediate& x);
+
+  void emit_operand(Register reg, const Operand& adr);
+
+  void emit_farith(int b1, int b2, int i);
+
+  // labels
+  void print(Label* L);
+  void bind_to(Label* L, int pos);
+  void link_to(Label* L, Label* appendix);
+
+  // displacements
+  inline Displacement disp_at(Label* L);
+  inline void disp_at_put(Label* L, Displacement disp);
+  inline void emit_disp(Label* L, Displacement::Type type);
+
+  // record reloc info for current pc_
+  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+  friend class CodePatcher;
+  friend class EnsureSpace;
+
+  // Code buffer:
+  // The buffer into which code and relocation info are generated.
+  byte* buffer_;
+  int buffer_size_;
+  // True if the assembler owns the buffer, false if buffer is external.
+  bool own_buffer_;
+  // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
+  static byte* spare_buffer_;
+
+  // code generation
+  byte* pc_;  // the program counter; moves forward
+  RelocInfoWriter reloc_info_writer;
+
+  // push-pop elimination
+  byte* last_pc_;
+
+  // source position information
+  int current_statement_position_;
+  int current_position_;
+  int written_statement_position_;
+  int written_position_;
+};
+
+
+// Helper class that ensures that there is enough space for generating
+// instructions and relocation information.  The constructor makes
+// sure that there is enough space and (in debug mode) the destructor
+// checks that we did not generate too much.
+class EnsureSpace BASE_EMBEDDED {
+ public:
+  explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+    if (assembler_->overflow()) assembler_->GrowBuffer();
+#ifdef DEBUG
+    space_before_ = assembler_->available_space();
+#endif
+  }
+
+#ifdef DEBUG
+  ~EnsureSpace() {
+    int bytes_generated = space_before_ - assembler_->available_space();
+    ASSERT(bytes_generated < assembler_->kGap);
+  }
+#endif
+
+ private:
+  Assembler* assembler_;
+#ifdef DEBUG
+  int space_before_;
+#endif
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_ASSEMBLER_IA32_H_
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
new file mode 100644
index 0000000..ad44026
--- /dev/null
+++ b/src/ia32/builtins-ia32.cc
@@ -0,0 +1,1233 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
+  // TODO(428): Don't pass the function in a static variable.
+  ExternalReference passed = ExternalReference::builtin_passed_function();
+  __ mov(Operand::StaticVariable(passed), edi);
+
+  // The actual argument count has already been loaded into register
+  // eax, but JumpToRuntime expects eax to contain the number of
+  // arguments including the receiver.
+  __ inc(eax);
+  __ JumpToRuntime(ExternalReference(id));
+}
+
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax: number of arguments
+  //  -- edi: constructor function
+  // -----------------------------------
+
+  Label non_function_call;
+  // Check that function is not a smi.
+  __ test(edi, Immediate(kSmiTagMask));
+  __ j(zero, &non_function_call);
+  // Check that function is a JSFunction.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &non_function_call);
+
+  // Jump to the function-specific construct stub.
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(Operand(ebx));
+
+  // edi: called object
+  // eax: number of arguments
+  __ bind(&non_function_call);
+
+  // Set expected number of arguments to zero (not changing eax).
+  __ Set(ebx, Immediate(0));
+  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+         RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  // Enter a construct frame.
+  __ EnterConstructFrame();
+
+  // Store a smi-tagged arguments count on the stack.
+  __ shl(eax, kSmiTagSize);
+  __ push(eax);
+
+  // Push the function to invoke on the stack.
+  __ push(edi);
+
+  // Try to allocate the object without transitioning into C code. If any of the
+  // preconditions is not met, the code bails out to the runtime call.
+  Label rt_call, allocated;
+  if (FLAG_inline_new) {
+    Label undo_allocation;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    ExternalReference debug_step_in_fp =
+        ExternalReference::debug_step_in_fp_address();
+    __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+    __ j(not_equal, &rt_call);
+#endif
+
+    // Verified that the constructor is a JSFunction.
+    // Load the initial map and verify that it is in fact a map.
+    // edi: constructor
+    __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi
+    __ test(eax, Immediate(kSmiTagMask));
+    __ j(zero, &rt_call);
+    // edi: constructor
+    // eax: initial map (if proven valid below)
+    __ CmpObjectType(eax, MAP_TYPE, ebx);
+    __ j(not_equal, &rt_call);
+
+    // Check that the constructor is not constructing a JSFunction (see comments
+    // in Runtime_NewObject in runtime.cc). In which case the initial map's
+    // instance type would be JS_FUNCTION_TYPE.
+    // edi: constructor
+    // eax: initial map
+    __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+    __ j(equal, &rt_call);
+
+    // Now allocate the JSObject on the heap.
+    // edi: constructor
+    // eax: initial map
+    __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+    __ shl(edi, kPointerSizeLog2);
+    __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+    // Allocated the JSObject, now initialize the fields.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+    __ mov(ecx, Factory::empty_fixed_array());
+    __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+    __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+    // Set extra fields in the newly allocated object.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    { Label loop, entry;
+      __ mov(edx, Factory::undefined_value());
+      __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ mov(Operand(ecx, 0), edx);
+      __ add(Operand(ecx), Immediate(kPointerSize));
+      __ bind(&entry);
+      __ cmp(ecx, Operand(edi));
+      __ j(less, &loop);
+    }
+
+    // Add the object tag to make the JSObject real, so that we can continue and
+    // jump into the continuation code at any time from now on. Any failures
+    // need to undo the allocation, so that the heap is in a consistent state
+    // and verifiable.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    __ or_(Operand(ebx), Immediate(kHeapObjectTag));
+
+    // Check if a non-empty properties array is needed.
+    // Allocate and initialize a FixedArray if it is.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    // Calculate the total number of properties described by the map.
+    __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+    __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+    __ add(edx, Operand(ecx));
+    // Calculate unused properties past the end of the in-object properties.
+    __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
+    __ sub(edx, Operand(ecx));
+    // Done if no extra properties are to be allocated.
+    __ j(zero, &allocated);
+    __ Assert(positive, "Property allocation count failed.");
+
+    // Scale the number of elements by pointer size and add the header for
+    // FixedArrays to the start of the next object calculation from above.
+    // ebx: JSObject
+    // edi: start of next object (will be start of FixedArray)
+    // edx: number of elements in properties array
+    __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                          times_pointer_size,
+                          edx,
+                          edi,
+                          ecx,
+                          no_reg,
+                          &undo_allocation,
+                          RESULT_CONTAINS_TOP);
+
+    // Initialize the FixedArray.
+    // ebx: JSObject
+    // edi: FixedArray
+    // edx: number of elements
+    // ecx: start of next object
+    __ mov(eax, Factory::fixed_array_map());
+    __ mov(Operand(edi, JSObject::kMapOffset), eax);  // setup the map
+    __ mov(Operand(edi, Array::kLengthOffset), edx);  // and length
+
+    // Initialize the fields to undefined.
+    // ebx: JSObject
+    // edi: FixedArray
+    // ecx: start of next object
+    { Label loop, entry;
+      __ mov(edx, Factory::undefined_value());
+      __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ mov(Operand(eax, 0), edx);
+      __ add(Operand(eax), Immediate(kPointerSize));
+      __ bind(&entry);
+      __ cmp(eax, Operand(ecx));
+      __ j(below, &loop);
+    }
+
+    // Store the initialized FixedArray into the properties field of
+    // the JSObject
+    // ebx: JSObject
+    // edi: FixedArray
+    __ or_(Operand(edi), Immediate(kHeapObjectTag));  // add the heap tag
+    __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+
+
+    // Continue with JSObject being successfully allocated
+    // ebx: JSObject
+    __ jmp(&allocated);
+
+    // Undo the setting of the new top so that the heap is verifiable. For
+    // example, the map's unused properties potentially do not match the
+    // allocated objects unused properties.
+    // ebx: JSObject (previous new top)
+    __ bind(&undo_allocation);
+    __ UndoAllocationInNewSpace(ebx);
+  }
+
+  // Allocate the new receiver object using the runtime call.
+  __ bind(&rt_call);
+  // Must restore edi (constructor) before calling runtime.
+  __ mov(edi, Operand(esp, 0));
+  // edi: function (constructor)
+  __ push(edi);
+  __ CallRuntime(Runtime::kNewObject, 1);
+  __ mov(ebx, Operand(eax));  // store result in ebx
+
+  // New object allocated.
+  // ebx: newly allocated object
+  __ bind(&allocated);
+  // Retrieve the function from the stack.
+  __ pop(edi);
+
+  // Retrieve smi-tagged arguments count from the stack.
+  __ mov(eax, Operand(esp, 0));
+  __ shr(eax, kSmiTagSize);
+
+  // Push the allocated receiver to the stack. We need two copies
+  // because we may have to return the original one and the calling
+  // conventions dictate that the called function pops the receiver.
+  __ push(ebx);
+  __ push(ebx);
+
+  // Setup pointer to last argument.
+  __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+  // Copy arguments and receiver to the expression stack.
+  Label loop, entry;
+  __ mov(ecx, Operand(eax));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ push(Operand(ebx, ecx, times_4, 0));
+  __ bind(&entry);
+  __ dec(ecx);
+  __ j(greater_equal, &loop);
+
+  // Call the function.
+  ParameterCount actual(eax);
+  __ InvokeFunction(edi, actual, CALL_FUNCTION);
+
+  // Restore context from the frame.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  // If the result is an object (in the ECMA sense), we should get rid
+  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+  // on page 74.
+  Label use_receiver, exit;
+
+  // If the result is a smi, it is *not* an object in the ECMA sense.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &use_receiver, not_taken);
+
+  // If the type of the result (stored in its map) is less than
+  // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(greater_equal, &exit, not_taken);
+
+  // Throw away the result of the constructor invocation and use the
+  // on-stack receiver as the result.
+  __ bind(&use_receiver);
+  __ mov(eax, Operand(esp, 0));
+
+  // Restore the arguments count and leave the construct frame.
+  __ bind(&exit);
+  __ mov(ebx, Operand(esp, kPointerSize));  // get arguments count
+  __ LeaveConstructFrame();
+
+  // Remove caller arguments from the stack and return.
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));  // 1 ~ receiver
+  __ push(ecx);
+  __ IncrementCounter(&Counters::constructed_objects, 1);
+  __ ret(0);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Clear the context before we push it when entering the JS frame.
+  __ xor_(esi, Operand(esi));  // clear esi
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Load the previous frame pointer (ebx) to access C arguments
+  __ mov(ebx, Operand(ebp, 0));
+
+  // Get the function from the frame and setup the context.
+  __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+  __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
+
+  // Push the function and the receiver onto the stack.
+  __ push(ecx);
+  __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+
+  // Load the number of arguments and setup pointer to the arguments.
+  __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+  __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+
+  // Copy arguments to the stack in a loop.
+  Label loop, entry;
+  __ xor_(ecx, Operand(ecx));  // clear ecx
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
+  __ push(Operand(edx, 0));  // dereference handle
+  __ inc(Operand(ecx));
+  __ bind(&entry);
+  __ cmp(ecx, Operand(eax));
+  __ j(not_equal, &loop);
+
+  // Get the function from the stack and call it.
+  __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));  // +1 ~ receiver
+
+  // Invoke the code.
+  if (is_construct) {
+    __ call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+            RelocInfo::CODE_TARGET);
+  } else {
+    ParameterCount actual(eax);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION);
+  }
+
+  // Exit the JS frame. Notice that this also removes the empty
+  // context and the function left on the stack by the code
+  // invocation.
+  __ LeaveInternalFrame();
+  __ ret(1 * kPointerSize);  // remove receiver
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+  // 1. Make sure we have at least one argument.
+  { Label done;
+    __ test(eax, Operand(eax));
+    __ j(not_zero, &done, taken);
+    __ pop(ebx);
+    __ push(Immediate(Factory::undefined_value()));
+    __ push(ebx);
+    __ inc(eax);
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call from the stack.
+  { Label done, non_function, function;
+    // +1 ~ return address.
+    __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));
+    __ test(edi, Immediate(kSmiTagMask));
+    __ j(zero, &non_function, not_taken);
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+    __ j(equal, &function, taken);
+
+    // Non-function called: Clear the function to force exception.
+    __ bind(&non_function);
+    __ xor_(edi, Operand(edi));
+    __ jmp(&done);
+
+    // Function called: Change context eagerly to get the right global object.
+    __ bind(&function);
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+    __ bind(&done);
+  }
+
+  // 3. Make sure first argument is an object; convert if necessary.
+  { Label call_to_object, use_global_receiver, patch_receiver, done;
+    __ mov(ebx, Operand(esp, eax, times_4, 0));
+
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ j(zero, &call_to_object);
+
+    __ cmp(ebx, Factory::null_value());
+    __ j(equal, &use_global_receiver);
+    __ cmp(ebx, Factory::undefined_value());
+    __ j(equal, &use_global_receiver);
+
+    __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+    __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+    __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+    __ j(less, &call_to_object);
+    __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+    __ j(less_equal, &done);
+
+    __ bind(&call_to_object);
+    __ EnterInternalFrame();  // preserves eax, ebx, edi
+
+    // Store the arguments count on the stack (smi tagged).
+    ASSERT(kSmiTag == 0);
+    __ shl(eax, kSmiTagSize);
+    __ push(eax);
+
+    __ push(edi);  // save edi across the call
+    __ push(ebx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(ebx, eax);
+    __ pop(edi);  // restore edi after the call
+
+    // Get the arguments count and untag it.
+    __ pop(eax);
+    __ shr(eax, kSmiTagSize);
+
+    __ LeaveInternalFrame();
+    __ jmp(&patch_receiver);
+
+    // Use the global receiver object from the called function as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalIndex =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ mov(ebx, FieldOperand(esi, kGlobalIndex));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+
+    __ bind(&patch_receiver);
+    __ mov(Operand(esp, eax, times_4, 0), ebx);
+
+    __ bind(&done);
+  }
+
+  // 4. Shift stuff one slot down the stack.
+  { Label loop;
+    __ lea(ecx, Operand(eax, +1));  // +1 ~ copy receiver too
+    __ bind(&loop);
+    __ mov(ebx, Operand(esp, ecx, times_4, 0));
+    __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
+    __ dec(ecx);
+    __ j(not_zero, &loop);
+  }
+
+  // 5. Remove TOS (copy of last arguments), but keep return address.
+  __ pop(ebx);
+  __ pop(ecx);
+  __ push(ebx);
+  __ dec(eax);
+
+  // 6. Check that function really was a function and get the code to
+  //    call from the function and check that the number of expected
+  //    arguments matches what we're providing.
+  { Label invoke;
+    __ test(edi, Operand(edi));
+    __ j(not_zero, &invoke, taken);
+    __ xor_(ebx, Operand(ebx));
+    __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+    __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+           RelocInfo::CODE_TARGET);
+
+    __ bind(&invoke);
+    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(ebx,
+           FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+    __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+    __ cmp(eax, Operand(ebx));
+    __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
+  }
+
+  // 7. Jump (tail-call) to the code in register edx without checking arguments.
+  ParameterCount expected(0);
+  __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+  __ EnterInternalFrame();
+
+  __ push(Operand(ebp, 4 * kPointerSize));  // push this
+  __ push(Operand(ebp, 2 * kPointerSize));  // push arguments
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+  if (FLAG_check_stack) {
+    // We need to catch preemptions right here, otherwise an unlucky preemption
+    // could show up as a failed apply.
+    ExternalReference stack_guard_limit =
+        ExternalReference::address_of_stack_guard_limit();
+    Label retry_preemption;
+    Label no_preemption;
+    __ bind(&retry_preemption);
+    __ mov(edi, Operand::StaticVariable(stack_guard_limit));
+    __ cmp(esp, Operand(edi));
+    __ j(above, &no_preemption, taken);
+
+    // Preemption!
+    // Because builtins always remove the receiver from the stack, we
+    // have to fake one to avoid underflowing the stack.
+    __ push(eax);
+    __ push(Immediate(Smi::FromInt(0)));
+
+    // Do call to runtime routine.
+    __ CallRuntime(Runtime::kStackGuard, 1);
+    __ pop(eax);
+    __ jmp(&retry_preemption);
+
+    __ bind(&no_preemption);
+
+    Label okay;
+    // Make ecx the space we have left.
+    __ mov(ecx, Operand(esp));
+    __ sub(ecx, Operand(edi));
+    // Make edx the space we need for the array when it is unrolled onto the
+    // stack.
+    __ mov(edx, Operand(eax));
+    __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+    __ cmp(ecx, Operand(edx));
+    __ j(greater, &okay, taken);
+
+    // Too bad: Out of stack space.
+    __ push(Operand(ebp, 4 * kPointerSize));  // push this
+    __ push(eax);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    __ bind(&okay);
+  }
+
+  // Push current index and limit.
+  const int kLimitOffset =
+      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+  __ push(eax);  // limit
+  __ push(Immediate(0));  // index
+
+  // Change context eagerly to get the right global object if
+  // necessary.
+  __ mov(edi, Operand(ebp, 4 * kPointerSize));
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // Compute the receiver.
+  Label call_to_object, use_global_receiver, push_receiver;
+  __ mov(ebx, Operand(ebp, 3 * kPointerSize));
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(zero, &call_to_object);
+  __ cmp(ebx, Factory::null_value());
+  __ j(equal, &use_global_receiver);
+  __ cmp(ebx, Factory::undefined_value());
+  __ j(equal, &use_global_receiver);
+
+  // If given receiver is already a JavaScript object then there's no
+  // reason for converting it.
+  __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(less, &call_to_object);
+  __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+  __ j(less_equal, &push_receiver);
+
+  // Convert the receiver to an object.
+  __ bind(&call_to_object);
+  __ push(ebx);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ mov(ebx, Operand(eax));
+  __ jmp(&push_receiver);
+
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ mov(ebx, FieldOperand(esi, kGlobalOffset));
+  __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+
+  // Push the receiver.
+  __ bind(&push_receiver);
+  __ push(ebx);
+
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ mov(eax, Operand(ebp, kIndexOffset));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ mov(ecx, Operand(ebp, 2 * kPointerSize));  // load arguments
+  __ push(ecx);
+  __ push(eax);
+
+  // Use inline caching to speed up access to arguments.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  // It is important that we do not have a test instruction after the
+  // call.  A test instruction after the call is used to indicate that
+  // we have generated an inline version of the keyed load.  In this
+  // case, we know that we are not generating a test instruction next.
+
+  // Remove IC arguments from the stack and push the nth argument.
+  __ add(Operand(esp), Immediate(2 * kPointerSize));
+  __ push(eax);
+
+  // Update the index on the stack and in register eax.
+  __ mov(eax, Operand(ebp, kIndexOffset));
+  __ add(Operand(eax), Immediate(1 << kSmiTagSize));
+  __ mov(Operand(ebp, kIndexOffset), eax);
+
+  __ bind(&entry);
+  __ cmp(eax, Operand(ebp, kLimitOffset));
+  __ j(not_equal, &loop);
+
+  // Invoke the function.
+  ParameterCount actual(eax);
+  __ shr(eax, kSmiTagSize);
+  __ mov(edi, Operand(ebp, 4 * kPointerSize));
+  __ InvokeFunction(edi, actual, CALL_FUNCTION);
+
+  __ LeaveInternalFrame();
+  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the global context.
+  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ mov(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the Array function from the global context.
+  __ mov(result,
+         Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+                                 Register array_function,
+                                 Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register scratch3,
+                                 int initial_capacity,
+                                 Label* gc_required) {
+  ASSERT(initial_capacity >= 0);
+
+  // Load the initial map from the array function.
+  __ mov(scratch1, FieldOperand(array_function,
+                                JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Allocate the JSArray object together with space for a fixed array with the
+  // requested elements.
+  int size = JSArray::kSize;
+  if (initial_capacity > 0) {
+    size += FixedArray::SizeFor(initial_capacity);
+  }
+  __ AllocateInNewSpace(size,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // scratch1: initial map
+  // scratch2: start of next object
+  __ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
+  __ mov(FieldOperand(result, JSArray::kPropertiesOffset),
+         Factory::empty_fixed_array());
+  // Field JSArray::kElementsOffset is initialized later.
+  __ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+
+  // If no storage is requested for the elements array just set the empty
+  // fixed array.
+  if (initial_capacity == 0) {
+    __ mov(FieldOperand(result, JSArray::kElementsOffset),
+           Factory::empty_fixed_array());
+    return;
+  }
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // scratch2: start of next object
+  __ lea(scratch1, Operand(result, JSArray::kSize));
+  __ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
+
+  // Initialize the FixedArray and fill it with holes. FixedArray length is not
+  // stored as a smi.
+  // result: JSObject
+  // scratch1: elements array
+  // scratch2: start of next object
+  __ mov(FieldOperand(scratch1, JSObject::kMapOffset),
+         Factory::fixed_array_map());
+  __ mov(FieldOperand(scratch1, Array::kLengthOffset),
+         Immediate(initial_capacity));
+
+  // Fill the FixedArray with the hole value. Inline the code if short.
+  // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
+  static const int kLoopUnfoldLimit = 4;
+  ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+  if (initial_capacity <= kLoopUnfoldLimit) {
+    // Use a scratch register here to have only one reloc info when unfolding
+    // the loop.
+    __ mov(scratch3, Factory::the_hole_value());
+    for (int i = 0; i < initial_capacity; i++) {
+      __ mov(FieldOperand(scratch1,
+                          FixedArray::kHeaderSize + i * kPointerSize),
+             scratch3);
+    }
+  } else {
+    Label loop, entry;
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ mov(Operand(scratch1, 0), Factory::the_hole_value());
+    __ add(Operand(scratch1), Immediate(kPointerSize));
+    __ bind(&entry);
+    __ cmp(scratch1, Operand(scratch2));
+    __ j(below, &loop);
+  }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array and elements_array_end  (see
+// below for when that is not the case). If the parameter fill_with_holes is
+// true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+                            Register array_function,  // Array function.
+                            Register array_size,  // As a smi.
+                            Register result,
+                            Register elements_array,
+                            Register elements_array_end,
+                            Register scratch,
+                            bool fill_with_hole,
+                            Label* gc_required) {
+  Label not_empty, allocated;
+
+  // Load the initial map from the array function.
+  __ mov(elements_array,
+         FieldOperand(array_function,
+                      JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ test(array_size, Operand(array_size));
+  __ j(not_zero, &not_empty);
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
+  __ jmp(&allocated);
+
+  // Allocate the JSArray object together with space for a FixedArray with the
+  // requested elements.
+  __ bind(&not_empty);
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+                        times_half_pointer_size,  // array_size is a smi.
+                        array_size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // elements_array: initial map
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  __ bind(&allocated);
+  __ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
+  __ mov(elements_array, Factory::empty_fixed_array());
+  __ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
+  // Field JSArray::kElementsOffset is initialized later.
+  __ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  __ lea(elements_array, Operand(result, JSArray::kSize));
+  __ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
+
+  // Initialize the fixed array. FixedArray length is not stored as a smi.
+  // result: JSObject
+  // elements_array: elements array
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  ASSERT(kSmiTag == 0);
+  __ shr(array_size, kSmiTagSize);  // Convert from smi to value.
+  __ mov(FieldOperand(elements_array, JSObject::kMapOffset),
+         Factory::fixed_array_map());
+  Label not_empty_2, fill_array;
+  __ test(array_size, Operand(array_size));
+  __ j(not_zero, &not_empty_2);
+  // Length of the FixedArray is the number of pre-allocated elements even
+  // though the actual JSArray has length 0.
+  __ mov(FieldOperand(elements_array, Array::kLengthOffset),
+         Immediate(kPreallocatedArrayElements));
+  __ jmp(&fill_array);
+  __ bind(&not_empty_2);
+  // For non-empty JSArrays the length of the FixedArray and the JSArray is the
+  // same.
+  __ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+
+  // Fill the allocated FixedArray with the hole value if requested.
+  // result: JSObject
+  // elements_array: elements array
+  // elements_array_end: start of next object
+  __ bind(&fill_array);
+  if (fill_with_hole) {
+    Label loop, entry;
+    __ mov(scratch, Factory::the_hole_value());
+    __ lea(elements_array, Operand(elements_array,
+                                   FixedArray::kHeaderSize - kHeapObjectTag));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ mov(Operand(elements_array, 0), scratch);
+    __ add(Operand(elements_array), Immediate(kPointerSize));
+    __ bind(&entry);
+    __ cmp(elements_array, Operand(elements_array_end));
+    __ j(below, &loop);
+  }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+//   edi: constructor (built-in Array function)
+//   eax: argc
+//   esp[0]: return address
+//   esp[4]: last argument
+// This function is used for both construct and normal calls of Array. Whether
+// it is a construct call or not is indicated by the construct_call parameter.
+// The only difference between handling a construct call and a normal call is
+// that for a construct call the constructor function in edi needs to be
+// preserved for entering the generic code. In both cases argc in eax needs to
+// be preserved.
+static void ArrayNativeCode(MacroAssembler* masm,
+                            bool construct_call,
+                            Label *call_generic_code) {
+  Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call;
+
+  // Push the constructor and argc. No need to tag argc as a smi, as there will
+  // be no garbage collection with this on the stack.
+  int push_count = 0;
+  if (construct_call) {
+    push_count++;
+    __ push(edi);
+  }
+  push_count++;
+  __ push(eax);
+
+  // Check for array construction with zero arguments.
+  __ test(eax, Operand(eax));
+  __ j(not_zero, &argc_one_or_more);
+
+  // Handle construction of an empty array.
+  AllocateEmptyJSArray(masm,
+                       edi,
+                       eax,
+                       ebx,
+                       ecx,
+                       edi,
+                       kPreallocatedArrayElements,
+                       &prepare_generic_code_call);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ pop(ebx);
+  if (construct_call) {
+    __ pop(edi);
+  }
+  __ ret(kPointerSize);
+
+  // Check for one argument. Bail out if argument is not smi or if it is
+  // negative.
+  __ bind(&argc_one_or_more);
+  __ cmp(eax, 1);
+  __ j(not_equal, &argc_two_or_more);
+  ASSERT(kSmiTag == 0);
+  __ test(Operand(esp, (push_count + 1) * kPointerSize),
+          Immediate(kIntptrSignBit | kSmiTagMask));
+  __ j(not_zero, &prepare_generic_code_call);
+
+  // Handle construction of an empty array of a certain size. Get the size from
+  // the stack and bail out if size is to large to actually allocate an elements
+  // array.
+  __ mov(edx, Operand(esp, (push_count + 1) * kPointerSize));
+  ASSERT(kSmiTag == 0);
+  __ cmp(edx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
+  __ j(greater_equal, &prepare_generic_code_call);
+
+  // edx: array_size (smi)
+  // edi: constructor
+  // esp[0]: argc
+  // esp[4]: constructor (only if construct_call)
+  // esp[8]: return address
+  // esp[C]: argument
+  AllocateJSArray(masm,
+                  edi,
+                  edx,
+                  eax,
+                  ebx,
+                  ecx,
+                  edi,
+                  true,
+                  &prepare_generic_code_call);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ pop(ebx);
+  if (construct_call) {
+    __ pop(edi);
+  }
+  __ ret(2 * kPointerSize);
+
+  // Handle construction of an array from a list of arguments.
+  __ bind(&argc_two_or_more);
+  ASSERT(kSmiTag == 0);
+  __ shl(eax, kSmiTagSize);  // Convet argc to a smi.
+  // eax: array_size (smi)
+  // edi: constructor
+  // esp[0] : argc
+  // esp[4]: constructor (only if construct_call)
+  // esp[8] : return address
+  // esp[C] : last argument
+  AllocateJSArray(masm,
+                  edi,
+                  eax,
+                  ebx,
+                  ecx,
+                  edx,
+                  edi,
+                  false,
+                  &prepare_generic_code_call);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ mov(eax, ebx);
+  __ pop(ebx);
+  if (construct_call) {
+    __ pop(edi);
+  }
+  __ push(eax);
+  // eax: JSArray
+  // ebx: argc
+  // edx: elements_array_end (untagged)
+  // esp[0]: JSArray
+  // esp[4]: return address
+  // esp[8]: last argument
+
+  // Location of the last argument
+  __ lea(edi, Operand(esp, 2 * kPointerSize));
+
+  // Location of the first array element (Parameter fill_with_holes to
+  // AllocateJSArrayis false, so the FixedArray is returned in ecx).
+  __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
+
+  // ebx: argc
+  // edx: location of the first array element
+  // edi: location of the last argument
+  // esp[0]: JSArray
+  // esp[4]: return address
+  // esp[8]: last argument
+  Label loop, entry;
+  __ mov(ecx, ebx);
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+  __ mov(Operand(edx, 0), eax);
+  __ add(Operand(edx), Immediate(kPointerSize));
+  __ bind(&entry);
+  __ dec(ecx);
+  __ j(greater_equal, &loop);
+
+  // Remove caller arguments from the stack and return.
+  // ebx: argc
+  // esp[0]: JSArray
+  // esp[4]: return address
+  // esp[8]: last argument
+  __ pop(eax);
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
+  __ push(ecx);
+  __ ret(0);
+
+  // Restore argc and constructor before running the generic code.
+  __ bind(&prepare_generic_code_call);
+  __ pop(eax);
+  if (construct_call) {
+    __ pop(edi);
+  }
+  __ jmp(call_generic_code);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argc
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, edi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array function shoud be a map.
+    __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ Assert(not_zero, "Unexpected initial map for Array function");
+    __ CmpObjectType(ebx, MAP_TYPE, ecx);
+    __ Assert(equal, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  ArrayNativeCode(masm, false, &generic_array_code);
+
+  // Jump to the generic array code in case the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
+  Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+  Handle<Code> array_code(code);
+  __ jmp(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argc
+  //  -- edi : constructor
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument
+  // -----------------------------------
+  Label generic_constructor;
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the builtin Array function which
+    // does always have a map.
+    GenerateLoadArrayFunction(masm, ebx);
+    __ cmp(edi, Operand(ebx));
+    __ Assert(equal, "Unexpected Array function");
+    // Initial map for the builtin Array function should be a map.
+    __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ Assert(not_zero, "Unexpected initial map for Array function");
+    __ CmpObjectType(ebx, MAP_TYPE, ecx);
+    __ Assert(equal, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as constructor.
+  ArrayNativeCode(masm, true, &generic_constructor);
+
+  // Jump to the generic construct code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_constructor);
+  Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+  Handle<Code> generic_construct_stub(code);
+  __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ push(ebp);
+  __ mov(ebp, Operand(esp));
+
+  // Store the arguments adaptor context sentinel.
+  __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Push the function on the stack.
+  __ push(edi);
+
+  // Preserve the number of arguments on the stack. Must preserve both
+  // eax and ebx because these registers are used when copying the
+  // arguments and the receiver.
+  ASSERT(kSmiTagSize == 1);
+  __ lea(ecx, Operand(eax, eax, times_1, kSmiTag));
+  __ push(ecx);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // Retrieve the number of arguments from the stack.
+  __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  // Leave the frame.
+  __ leave();
+
+  // Remove caller arguments from the stack.
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));  // 1 ~ receiver
+  __ push(ecx);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : actual number of arguments
+  //  -- ebx : expected number of arguments
+  //  -- edx : code entry to call
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments;
+  __ IncrementCounter(&Counters::arguments_adaptors, 1);
+
+  Label enough, too_few;
+  __ cmp(eax, Operand(ebx));
+  __ j(less, &too_few);
+  __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+  __ j(equal, &dont_adapt_arguments);
+
+  {  // Enough parameters: Actual >= expected.
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Copy receiver and all expected arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(eax, Operand(ebp, eax, times_4, offset));
+    __ mov(ecx, -1);  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ inc(ecx);
+    __ push(Operand(eax, 0));
+    __ sub(Operand(eax), Immediate(kPointerSize));
+    __ cmp(ecx, Operand(ebx));
+    __ j(less, &copy);
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Copy receiver and all actual arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(edi, Operand(ebp, eax, times_4, offset));
+    __ mov(ecx, -1);  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ inc(ecx);
+    __ push(Operand(edi, 0));
+    __ sub(Operand(edi), Immediate(kPointerSize));
+    __ cmp(ecx, Operand(eax));
+    __ j(less, &copy);
+
+    // Fill remaining expected arguments with undefined values.
+    Label fill;
+    __ bind(&fill);
+    __ inc(ecx);
+    __ push(Immediate(Factory::undefined_value()));
+    __ cmp(ecx, Operand(ebx));
+    __ j(less, &fill);
+
+    // Restore function pointer.
+    __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ call(Operand(edx));
+
+  // Leave frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ ret(0);
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ jmp(Operand(edx));
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/ia32/codegen-ia32-inl.h b/src/ia32/codegen-ia32-inl.h
new file mode 100644
index 0000000..44e937a
--- /dev/null
+++ b/src/ia32/codegen-ia32-inl.h
@@ -0,0 +1,56 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_IA32_CODEGEN_IA32_INL_H_
+#define V8_IA32_CODEGEN_IA32_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(SIN, args);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(COS, args);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_CODEGEN_IA32_INL_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
new file mode 100644
index 0000000..0e314b9
--- /dev/null
+++ b/src/ia32/codegen-ia32.cc
@@ -0,0 +1,7979 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "ic-inl.h"
+#include "parser.h"
+#include "register-allocator-inl.h"
+#include "runtime.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ push(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+      __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
+    }
+  }
+}
+
+
+void DeferredCode::RestoreRegisters() {
+  // Restore registers in reverse order due to the stack.
+  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ pop(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore) {
+      action &= ~kSyncedFlag;
+      __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
+    }
+  }
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenState implementation.
+
+CodeGenState::CodeGenState(CodeGenerator* owner)
+    : owner_(owner),
+      typeof_state_(NOT_INSIDE_TYPEOF),
+      destination_(NULL),
+      previous_(NULL) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::CodeGenState(CodeGenerator* owner,
+                           TypeofState typeof_state,
+                           ControlDestination* destination)
+    : owner_(owner),
+      typeof_state_(typeof_state),
+      destination_(destination),
+      previous_(owner->state()) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::~CodeGenState() {
+  ASSERT(owner_->state() == this);
+  owner_->set_state(previous_);
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator implementation
+
+CodeGenerator::CodeGenerator(int buffer_size,
+                             Handle<Script> script,
+                             bool is_eval)
+    : is_eval_(is_eval),
+      script_(script),
+      deferred_(8),
+      masm_(new MacroAssembler(NULL, buffer_size)),
+      scope_(NULL),
+      frame_(NULL),
+      allocator_(NULL),
+      state_(NULL),
+      loop_nesting_(0),
+      function_return_is_shadowed_(false),
+      in_spilled_code_(false) {
+}
+
+
+// Calling conventions:
+// ebp: caller's frame pointer
+// esp: stack pointer
+// edi: called JS function
+// esi: callee's context
+
+void CodeGenerator::GenCode(FunctionLiteral* fun) {
+  // Record the position for debugging purposes.
+  CodeForFunctionPosition(fun);
+
+  ZoneList<Statement*>* body = fun->body();
+
+  // Initialize state.
+  ASSERT(scope_ == NULL);
+  scope_ = fun->scope();
+  ASSERT(allocator_ == NULL);
+  RegisterAllocator register_allocator(this);
+  allocator_ = &register_allocator;
+  ASSERT(frame_ == NULL);
+  frame_ = new VirtualFrame();
+  set_in_spilled_code(false);
+
+  // Adjust for function-level loop nesting.
+  loop_nesting_ += fun->loop_nesting();
+
+  JumpTarget::set_compiling_deferred_code(false);
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    frame_->SpillAll();
+    __ int3();
+  }
+#endif
+
+  // New scope to get automatic timing calculation.
+  {  // NOLINT
+    HistogramTimerScope codegen_timer(&Counters::code_generation);
+    CodeGenState state(this);
+
+    // Entry:
+    // Stack: receiver, arguments, return address.
+    // ebp: caller's frame pointer
+    // esp: stack pointer
+    // edi: called JS function
+    // esi: callee's context
+    allocator_->Initialize();
+    frame_->Enter();
+
+    // Allocate space for locals and initialize them.
+    frame_->AllocateStackSlots();
+    // Initialize the function return target after the locals are set
+    // up, because it needs the expected frame height from the frame.
+    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+    function_return_is_shadowed_ = false;
+
+    // Allocate the local context if needed.
+    if (scope_->num_heap_slots() > 0) {
+      Comment cmnt(masm_, "[ allocate local context");
+      // Allocate local context.
+      // Get outer context and create a new context based on it.
+      frame_->PushFunction();
+      Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
+
+      // Update context local.
+      frame_->SaveContextRegister();
+
+      // Verify that the runtime call result and esi agree.
+      if (FLAG_debug_code) {
+        __ cmp(context.reg(), Operand(esi));
+        __ Assert(equal, "Runtime::NewContext should end up in esi");
+      }
+    }
+
+    // TODO(1241774): Improve this code:
+    // 1) only needed if we have a context
+    // 2) no need to recompute context ptr every single time
+    // 3) don't copy parameter operand code from SlotOperand!
+    {
+      Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+      // Note that iteration order is relevant here! If we have the same
+      // parameter twice (e.g., function (x, y, x)), and that parameter
+      // needs to be copied into the context, it must be the last argument
+      // passed to the parameter that needs to be copied. This is a rare
+      // case so we don't check for it, instead we rely on the copying
+      // order: such a parameter is copied repeatedly into the same
+      // context location and thus the last value is what is seen inside
+      // the function.
+      for (int i = 0; i < scope_->num_parameters(); i++) {
+        Variable* par = scope_->parameter(i);
+        Slot* slot = par->slot();
+        if (slot != NULL && slot->type() == Slot::CONTEXT) {
+          // The use of SlotOperand below is safe in unspilled code
+          // because the slot is guaranteed to be a context slot.
+          //
+          // There are no parameters in the global scope.
+          ASSERT(!scope_->is_global_scope());
+          frame_->PushParameterAt(i);
+          Result value = frame_->Pop();
+          value.ToRegister();
+
+          // SlotOperand loads context.reg() with the context object
+          // stored to, used below in RecordWrite.
+          Result context = allocator_->Allocate();
+          ASSERT(context.is_valid());
+          __ mov(SlotOperand(slot, context.reg()), value.reg());
+          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+          Result scratch = allocator_->Allocate();
+          ASSERT(scratch.is_valid());
+          frame_->Spill(context.reg());
+          frame_->Spill(value.reg());
+          __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+        }
+      }
+    }
+
+    // Store the arguments object.  This must happen after context
+    // initialization because the arguments object may be stored in
+    // the context.
+    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+      StoreArgumentsObject(true);
+    }
+
+    // Generate code to 'execute' declarations and initialize functions
+    // (source elements). In case of an illegal redeclaration we need to
+    // handle that instead of processing the declarations.
+    if (scope_->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ illegal redeclarations");
+      scope_->VisitIllegalRedeclaration(this);
+    } else {
+      Comment cmnt(masm_, "[ declarations");
+      ProcessDeclarations(scope_->declarations());
+      // Bail out if a stack-overflow exception occurred when processing
+      // declarations.
+      if (HasStackOverflow()) return;
+    }
+
+    if (FLAG_trace) {
+      frame_->CallRuntime(Runtime::kTraceEnter, 0);
+      // Ignore the return value.
+    }
+    CheckStack();
+
+    // Compile the body of the function in a vanilla state. Don't
+    // bother compiling all the code if the scope has an illegal
+    // redeclaration.
+    if (!scope_->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+      bool is_builtin = Bootstrapper::IsActive();
+      bool should_trace =
+          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+      if (should_trace) {
+        frame_->CallRuntime(Runtime::kDebugTrace, 0);
+        // Ignore the return value.
+      }
+#endif
+      VisitStatements(body);
+
+      // Handle the return from the function.
+      if (has_valid_frame()) {
+        // If there is a valid frame, control flow can fall off the end of
+        // the body.  In that case there is an implicit return statement.
+        ASSERT(!function_return_is_shadowed_);
+        CodeForReturnPosition(fun);
+        frame_->PrepareForReturn();
+        Result undefined(Factory::undefined_value());
+        if (function_return_.is_bound()) {
+          function_return_.Jump(&undefined);
+        } else {
+          function_return_.Bind(&undefined);
+          GenerateReturnSequence(&undefined);
+        }
+      } else if (function_return_.is_linked()) {
+        // If the return target has dangling jumps to it, then we have not
+        // yet generated the return sequence.  This can happen when (a)
+        // control does not flow off the end of the body so we did not
+        // compile an artificial return statement just above, and (b) there
+        // are return statements in the body but (c) they are all shadowed.
+        Result return_value;
+        function_return_.Bind(&return_value);
+        GenerateReturnSequence(&return_value);
+      }
+    }
+  }
+
+  // Adjust for function-level loop nesting.
+  loop_nesting_ -= fun->loop_nesting();
+
+  // Code generation state must be reset.
+  ASSERT(state_ == NULL);
+  ASSERT(loop_nesting() == 0);
+  ASSERT(!function_return_is_shadowed_);
+  function_return_.Unuse();
+  DeleteFrame();
+
+  // Process any deferred code using the register allocator.
+  if (!HasStackOverflow()) {
+    HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
+    JumpTarget::set_compiling_deferred_code(true);
+    ProcessDeferred();
+    JumpTarget::set_compiling_deferred_code(false);
+  }
+
+  // There is no need to delete the register allocator, it is a
+  // stack-allocated local.
+  allocator_ = NULL;
+  scope_ = NULL;
+}
+
+
+Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+  // Currently, this assertion will fail if we try to assign to
+  // a constant variable that is constant because it is read-only
+  // (such as the variable referring to a named function expression).
+  // We need to implement assignments to read-only variables.
+  // Ideally, we should do this during AST generation (by converting
+  // such assignments into expression statements); however, in general
+  // we may not be able to make the decision until past AST generation,
+  // that is when the entire program is known.
+  ASSERT(slot != NULL);
+  int index = slot->index();
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+      return frame_->ParameterAt(index);
+
+    case Slot::LOCAL:
+      return frame_->LocalAt(index);
+
+    case Slot::CONTEXT: {
+      // Follow the context chain if necessary.
+      ASSERT(!tmp.is(esi));  // do not overwrite context register
+      Register context = esi;
+      int chain_length = scope()->ContextChainLength(slot->var()->scope());
+      for (int i = 0; i < chain_length; i++) {
+        // Load the closure.
+        // (All contexts, even 'with' contexts, have a closure,
+        // and it is the same for all contexts inside a function.
+        // There is no need to go to the function context first.)
+        __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+        // Load the function context (which is the incoming, outer context).
+        __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+        context = tmp;
+      }
+      // We may have a 'with' context now. Get the function context.
+      // (In fact this mov may never be the needed, since the scope analysis
+      // may not permit a direct context access in this case and thus we are
+      // always at a function context. However it is safe to dereference be-
+      // cause the function context of a function context is itself. Before
+      // deleting this mov we should try to create a counter-example first,
+      // though...)
+      __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+      return ContextOperand(tmp, index);
+    }
+
+    default:
+      UNREACHABLE();
+      return Operand(eax);
+  }
+}
+
+
+Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
+                                                         Result tmp,
+                                                         JumpTarget* slow) {
+  ASSERT(slot->type() == Slot::CONTEXT);
+  ASSERT(tmp.is_register());
+  Register context = esi;
+
+  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
+               Immediate(0));
+        slow->Branch(not_equal, not_taken);
+      }
+      __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+      __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+      context = tmp.reg();
+    }
+  }
+  // Check that last extension is NULL.
+  __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+  slow->Branch(not_equal, not_taken);
+  __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
+  return ContextOperand(tmp.reg(), slot->index());
+}
+
+
+// Emit code to load the value of an expression to the top of the
+// frame. If the expression is boolean-valued it may be compiled (or
+// partially compiled) into control flow to the control destination.
+// If force_control is true, control flow is forced.
+void CodeGenerator::LoadCondition(Expression* x,
+                                  TypeofState typeof_state,
+                                  ControlDestination* dest,
+                                  bool force_control) {
+  ASSERT(!in_spilled_code());
+  int original_height = frame_->height();
+
+  { CodeGenState new_state(this, typeof_state, dest);
+    Visit(x);
+
+    // If we hit a stack overflow, we may not have actually visited
+    // the expression.  In that case, we ensure that we have a
+    // valid-looking frame state because we will continue to generate
+    // code as we unwind the C++ stack.
+    //
+    // It's possible to have both a stack overflow and a valid frame
+    // state (eg, a subexpression overflowed, visiting it returned
+    // with a dummied frame state, and visiting this expression
+    // returned with a normal-looking state).
+    if (HasStackOverflow() &&
+        !dest->is_used() &&
+        frame_->height() == original_height) {
+      dest->Goto(true);
+    }
+  }
+
+  if (force_control && !dest->is_used()) {
+    // Convert the TOS value into flow to the control destination.
+    ToBoolean(dest);
+  }
+
+  ASSERT(!(force_control && !dest->is_used()));
+  ASSERT(dest->is_used() || frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadAndSpill(Expression* expression,
+                                 TypeofState typeof_state) {
+  ASSERT(in_spilled_code());
+  set_in_spilled_code(false);
+  Load(expression, typeof_state);
+  frame_->SpillAll();
+  set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  ASSERT(!in_spilled_code());
+  JumpTarget true_target;
+  JumpTarget false_target;
+  ControlDestination dest(&true_target, &false_target, true);
+  LoadCondition(x, typeof_state, &dest, false);
+
+  if (dest.false_was_fall_through()) {
+    // The false target was just bound.
+    JumpTarget loaded;
+    frame_->Push(Factory::false_value());
+    // There may be dangling jumps to the true target.
+    if (true_target.is_linked()) {
+      loaded.Jump();
+      true_target.Bind();
+      frame_->Push(Factory::true_value());
+      loaded.Bind();
+    }
+
+  } else if (dest.is_used()) {
+    // There is true, and possibly false, control flow (with true as
+    // the fall through).
+    JumpTarget loaded;
+    frame_->Push(Factory::true_value());
+    if (false_target.is_linked()) {
+      loaded.Jump();
+      false_target.Bind();
+      frame_->Push(Factory::false_value());
+      loaded.Bind();
+    }
+
+  } else {
+    // We have a valid value on top of the frame, but we still may
+    // have dangling jumps to the true and false targets from nested
+    // subexpressions (eg, the left subexpressions of the
+    // short-circuited boolean operators).
+    ASSERT(has_valid_frame());
+    if (true_target.is_linked() || false_target.is_linked()) {
+      JumpTarget loaded;
+      loaded.Jump();  // Don't lose the current TOS.
+      if (true_target.is_linked()) {
+        true_target.Bind();
+        frame_->Push(Factory::true_value());
+        if (false_target.is_linked()) {
+          loaded.Jump();
+        }
+      }
+      if (false_target.is_linked()) {
+        false_target.Bind();
+        frame_->Push(Factory::false_value());
+      }
+      loaded.Bind();
+    }
+  }
+
+  ASSERT(has_valid_frame());
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadGlobal() {
+  if (in_spilled_code()) {
+    frame_->EmitPush(GlobalObject());
+  } else {
+    Result temp = allocator_->Allocate();
+    __ mov(temp.reg(), GlobalObject());
+    frame_->Push(&temp);
+  }
+}
+
+
+void CodeGenerator::LoadGlobalReceiver() {
+  Result temp = allocator_->Allocate();
+  Register reg = temp.reg();
+  __ mov(reg, GlobalObject());
+  __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
+  frame_->Push(&temp);
+}
+
+
+// TODO(1241834): Get rid of this function in favor of just using Load, now
+// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
+// variables w/o reference errors elsewhere.
+void CodeGenerator::LoadTypeofExpression(Expression* x) {
+  Variable* variable = x->AsVariableProxy()->AsVariable();
+  if (variable != NULL && !variable->is_this() && variable->is_global()) {
+    // NOTE: This is somewhat nasty. We force the compiler to load
+    // the variable as if through '<global>.<variable>' to make sure we
+    // do not get reference errors.
+    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+    Literal key(variable->name());
+    // TODO(1241834): Fetch the position from the variable instead of using
+    // no position.
+    Property property(&global, &key, RelocInfo::kNoPosition);
+    Load(&property);
+  } else {
+    Load(x, INSIDE_TYPEOF);
+  }
+}
+
+
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
+  if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+  ASSERT(scope_->arguments_shadow() != NULL);
+  // We don't want to do lazy arguments allocation for functions that
+  // have heap-allocated contexts, because it interfers with the
+  // uninitialized const tracking in the context objects.
+  return (scope_->num_heap_slots() > 0)
+      ? EAGER_ARGUMENTS_ALLOCATION
+      : LAZY_ARGUMENTS_ALLOCATION;
+}
+
+
+Result CodeGenerator::StoreArgumentsObject(bool initial) {
+  ArgumentsAllocationMode mode = ArgumentsMode();
+  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+
+  Comment cmnt(masm_, "[ store arguments object");
+  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
+    // When using lazy arguments allocation, we store the hole value
+    // as a sentinel indicating that the arguments object hasn't been
+    // allocated yet.
+    frame_->Push(Factory::the_hole_value());
+  } else {
+    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+    frame_->PushFunction();
+    frame_->PushReceiverSlotAddress();
+    frame_->Push(Smi::FromInt(scope_->num_parameters()));
+    Result result = frame_->CallStub(&stub, 3);
+    frame_->Push(&result);
+  }
+
+  { Reference shadow_ref(this, scope_->arguments_shadow());
+    Reference arguments_ref(this, scope_->arguments());
+    ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
+    // Here we rely on the convenient property that references to slot
+    // take up zero space in the frame (ie, it doesn't matter that the
+    // stored value is actually below the reference on the frame).
+    JumpTarget done;
+    bool skip_arguments = false;
+    if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+      // We have to skip storing into the arguments slot if it has
+      // already been written to. This can happen if the a function
+      // has a local variable named 'arguments'.
+      LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+      Result arguments = frame_->Pop();
+      if (arguments.is_constant()) {
+        // We have to skip updating the arguments object if it has
+        // been assigned a proper value.
+        skip_arguments = !arguments.handle()->IsTheHole();
+      } else {
+        __ cmp(Operand(arguments.reg()), Immediate(Factory::the_hole_value()));
+        arguments.Unuse();
+        done.Branch(not_equal);
+      }
+    }
+    if (!skip_arguments) {
+      arguments_ref.SetValue(NOT_CONST_INIT);
+      if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+    }
+    shadow_ref.SetValue(NOT_CONST_INIT);
+  }
+  return frame_->Pop();
+}
+
+
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
+    : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+  cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+  cgen_->UnloadReference(this);
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+  // References are loaded from both spilled and unspilled code.  Set the
+  // state to unspilled to allow that (and explicitly spill after
+  // construction at the construction sites).
+  bool was_in_spilled_code = in_spilled_code_;
+  in_spilled_code_ = false;
+
+  Comment cmnt(masm_, "[ LoadReference");
+  Expression* e = ref->expression();
+  Property* property = e->AsProperty();
+  Variable* var = e->AsVariableProxy()->AsVariable();
+
+  if (property != NULL) {
+    // The expression is either a property or a variable proxy that rewrites
+    // to a property.
+    Load(property->obj());
+    // We use a named reference if the key is a literal symbol, unless it is
+    // a string that can be legally parsed as an integer.  This is because
+    // otherwise we will not get into the slow case code that handles [] on
+    // String objects.
+    Literal* literal = property->key()->AsLiteral();
+    uint32_t dummy;
+    if (literal != NULL &&
+        literal->handle()->IsSymbol() &&
+        !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+      ref->set_type(Reference::NAMED);
+    } else {
+      Load(property->key());
+      ref->set_type(Reference::KEYED);
+    }
+  } else if (var != NULL) {
+    // The expression is a variable proxy that does not rewrite to a
+    // property.  Global variables are treated as named property references.
+    if (var->is_global()) {
+      LoadGlobal();
+      ref->set_type(Reference::NAMED);
+    } else {
+      ASSERT(var->slot() != NULL);
+      ref->set_type(Reference::SLOT);
+    }
+  } else {
+    // Anything else is a runtime error.
+    Load(e);
+    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+  }
+
+  in_spilled_code_ = was_in_spilled_code;
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+  // Pop a reference from the stack while preserving TOS.
+  Comment cmnt(masm_, "[ UnloadReference");
+  frame_->Nip(ref->size());
+}
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+  ToBooleanStub() { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return ToBoolean; }
+  int MinorKey() { return 0; }
+};
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
+// convert it to a boolean in the condition code register or jump to
+// 'false_target'/'true_target' as appropriate.
+void CodeGenerator::ToBoolean(ControlDestination* dest) {
+  Comment cmnt(masm_, "[ ToBoolean");
+
+  // The value to convert should be popped from the frame.
+  Result value = frame_->Pop();
+  value.ToRegister();
+  // Fast case checks.
+
+  // 'false' => false.
+  __ cmp(value.reg(), Factory::false_value());
+  dest->false_target()->Branch(equal);
+
+  // 'true' => true.
+  __ cmp(value.reg(), Factory::true_value());
+  dest->true_target()->Branch(equal);
+
+  // 'undefined' => false.
+  __ cmp(value.reg(), Factory::undefined_value());
+  dest->false_target()->Branch(equal);
+
+  // Smi => false iff zero.
+  ASSERT(kSmiTag == 0);
+  __ test(value.reg(), Operand(value.reg()));
+  dest->false_target()->Branch(zero);
+  __ test(value.reg(), Immediate(kSmiTagMask));
+  dest->true_target()->Branch(zero);
+
+  // Call the stub for all other cases.
+  frame_->Push(&value);  // Undo the Pop() from above.
+  ToBooleanStub stub;
+  Result temp = frame_->CallStub(&stub, 1);
+  // Convert the result to a condition code.
+  __ test(temp.reg(), Operand(temp.reg()));
+  temp.Unuse();
+  dest->Split(not_equal);
+}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+  // Code pattern for loading a floating point value. Input value must
+  // be either a smi or a heap number object (fp value). Requirements:
+  // operand in register number. Returns operand as floating point number
+  // on FPU stack.
+  static void LoadFloatOperand(MacroAssembler* masm, Register number);
+  // Code pattern for loading floating point values. Input values must
+  // be either smi or heap number objects (fp values). Requirements:
+  // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
+  // floating point numbers on FPU stack.
+  static void LoadFloatOperands(MacroAssembler* masm, Register scratch);
+  // Test if operands are smi or number objects (fp). Requirements:
+  // operand_1 in eax, operand_2 in edx; falls through on float
+  // operands, jumps to the non_float label otherwise.
+  static void CheckFloatOperands(MacroAssembler* masm,
+                                 Label* non_float,
+                                 Register scratch);
+  // Test if operands are numbers (smi or HeapNumber objects), and load
+  // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
+  // either operand is not a number.  Operands are in edx and eax.
+  // Leaves operands unchanged.
+  static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
+  // Allocate a heap number in new space with undefined value.
+  // Returns tagged pointer in eax, or jumps to need_gc if new space is full.
+  static void AllocateHeapNumber(MacroAssembler* masm,
+                                 Label* need_gc,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register result);
+};
+
+
+const char* GenericBinaryOpStub::GetName() {
+  switch (op_) {
+    case Token::ADD: return "GenericBinaryOpStub_ADD";
+    case Token::SUB: return "GenericBinaryOpStub_SUB";
+    case Token::MUL: return "GenericBinaryOpStub_MUL";
+    case Token::DIV: return "GenericBinaryOpStub_DIV";
+    case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+    case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+    case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+    case Token::SAR: return "GenericBinaryOpStub_SAR";
+    case Token::SHL: return "GenericBinaryOpStub_SHL";
+    case Token::SHR: return "GenericBinaryOpStub_SHR";
+    default:         return "GenericBinaryOpStub";
+  }
+}
+
+
+// Call the specialized stub for a binary operation.
+class DeferredInlineBinaryOperation: public DeferredCode {
+ public:
+  DeferredInlineBinaryOperation(Token::Value op,
+                                Register dst,
+                                Register left,
+                                Register right,
+                                OverwriteMode mode)
+      : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
+    set_comment("[ DeferredInlineBinaryOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Token::Value op_;
+  Register dst_;
+  Register left_;
+  Register right_;
+  OverwriteMode mode_;
+};
+
+
+void DeferredInlineBinaryOperation::Generate() {
+  __ push(left_);
+  __ push(right_);
+  GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
+  __ CallStub(&stub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+void CodeGenerator::GenericBinaryOperation(Token::Value op,
+                                           SmiAnalysis* type,
+                                           OverwriteMode overwrite_mode) {
+  Comment cmnt(masm_, "[ BinaryOperation");
+  Comment cmnt_token(masm_, Token::String(op));
+
+  if (op == Token::COMMA) {
+    // Simply discard left value.
+    frame_->Nip(1);
+    return;
+  }
+
+  // Set the flags based on the operation, type and loop nesting level.
+  GenericBinaryFlags flags;
+  switch (op) {
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR:
+      // Bit operations always assume they likely operate on Smis. Still only
+      // generate the inline Smi check code if this operation is part of a loop.
+      flags = (loop_nesting() > 0)
+              ? SMI_CODE_INLINED
+              : SMI_CODE_IN_STUB;
+      break;
+
+    default:
+      // By default only inline the Smi check code for likely smis if this
+      // operation is part of a loop.
+      flags = ((loop_nesting() > 0) && type->IsLikelySmi())
+              ? SMI_CODE_INLINED
+              : SMI_CODE_IN_STUB;
+      break;
+  }
+
+  Result right = frame_->Pop();
+  Result left = frame_->Pop();
+
+  if (op == Token::ADD) {
+    bool left_is_string = left.is_constant() && left.handle()->IsString();
+    bool right_is_string = right.is_constant() && right.handle()->IsString();
+    if (left_is_string || right_is_string) {
+      frame_->Push(&left);
+      frame_->Push(&right);
+      Result answer;
+      if (left_is_string) {
+        if (right_is_string) {
+          // TODO(lrn): if both are constant strings
+          // -- do a compile time cons, if allocation during codegen is allowed.
+          answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+        } else {
+          answer =
+            frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
+        }
+      } else if (right_is_string) {
+        answer =
+          frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
+      }
+      frame_->Push(&answer);
+      return;
+    }
+    // Neither operand is known to be a string.
+  }
+
+  bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
+  bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
+  bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
+  bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+  bool generate_no_smi_code = false;  // No smi code at all, inline or in stub.
+
+  if (left_is_smi && right_is_smi) {
+    // Compute the constant result at compile time, and leave it on the frame.
+    int left_int = Smi::cast(*left.handle())->value();
+    int right_int = Smi::cast(*right.handle())->value();
+    if (FoldConstantSmis(op, left_int, right_int)) return;
+  }
+
+  if (left_is_non_smi || right_is_non_smi) {
+    // Set flag so that we go straight to the slow case, with no smi code.
+    generate_no_smi_code = true;
+  } else if (right_is_smi) {
+    ConstantSmiBinaryOperation(op, &left, right.handle(),
+                               type, false, overwrite_mode);
+    return;
+  } else if (left_is_smi) {
+    ConstantSmiBinaryOperation(op, &right, left.handle(),
+                               type, true, overwrite_mode);
+    return;
+  }
+
+  if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+    LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+  } else {
+    frame_->Push(&left);
+    frame_->Push(&right);
+    // If we know the arguments aren't smis, use the binary operation stub
+    // that does not check for the fast smi case.
+    // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
+    if (generate_no_smi_code) {
+      flags = SMI_CODE_INLINED;
+    }
+    GenericBinaryOpStub stub(op, overwrite_mode, flags);
+    Result answer = frame_->CallStub(&stub, 2);
+    frame_->Push(&answer);
+  }
+}
+
+
+bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
+  Object* answer_object = Heap::undefined_value();
+  switch (op) {
+    case Token::ADD:
+      if (Smi::IsValid(left + right)) {
+        answer_object = Smi::FromInt(left + right);
+      }
+      break;
+    case Token::SUB:
+      if (Smi::IsValid(left - right)) {
+        answer_object = Smi::FromInt(left - right);
+      }
+      break;
+    case Token::MUL: {
+        double answer = static_cast<double>(left) * right;
+        if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
+          // If the product is zero and the non-zero factor is negative,
+          // the spec requires us to return floating point negative zero.
+          if (answer != 0 || (left >= 0 && right >= 0)) {
+            answer_object = Smi::FromInt(static_cast<int>(answer));
+          }
+        }
+      }
+      break;
+    case Token::DIV:
+    case Token::MOD:
+      break;
+    case Token::BIT_OR:
+      answer_object = Smi::FromInt(left | right);
+      break;
+    case Token::BIT_AND:
+      answer_object = Smi::FromInt(left & right);
+      break;
+    case Token::BIT_XOR:
+      answer_object = Smi::FromInt(left ^ right);
+      break;
+
+    case Token::SHL: {
+        int shift_amount = right & 0x1F;
+        if (Smi::IsValid(left << shift_amount)) {
+          answer_object = Smi::FromInt(left << shift_amount);
+        }
+        break;
+      }
+    case Token::SHR: {
+        int shift_amount = right & 0x1F;
+        unsigned int unsigned_left = left;
+        unsigned_left >>= shift_amount;
+        if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
+          answer_object = Smi::FromInt(unsigned_left);
+        }
+        break;
+      }
+    case Token::SAR: {
+        int shift_amount = right & 0x1F;
+        unsigned int unsigned_left = left;
+        if (left < 0) {
+          // Perform arithmetic shift of a negative number by
+          // complementing number, logical shifting, complementing again.
+          unsigned_left = ~unsigned_left;
+          unsigned_left >>= shift_amount;
+          unsigned_left = ~unsigned_left;
+        } else {
+          unsigned_left >>= shift_amount;
+        }
+        ASSERT(Smi::IsValid(unsigned_left));  // Converted to signed.
+        answer_object = Smi::FromInt(unsigned_left);  // Converted to signed.
+        break;
+      }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  if (answer_object == Heap::undefined_value()) {
+    return false;
+  }
+  frame_->Push(Handle<Object>(answer_object));
+  return true;
+}
+
+
+// Implements a binary operation using a deferred code object and some
+// inline code to operate on smis quickly.
+void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+                                             Result* left,
+                                             Result* right,
+                                             OverwriteMode overwrite_mode) {
+  // Special handling of div and mod because they use fixed registers.
+  if (op == Token::DIV || op == Token::MOD) {
+    // We need eax as the quotient register, edx as the remainder
+    // register, neither left nor right in eax or edx, and left copied
+    // to eax.
+    Result quotient;
+    Result remainder;
+    bool left_is_in_eax = false;
+    // Step 1: get eax for quotient.
+    if ((left->is_register() && left->reg().is(eax)) ||
+        (right->is_register() && right->reg().is(eax))) {
+      // One or both is in eax.  Use a fresh non-edx register for
+      // them.
+      Result fresh = allocator_->Allocate();
+      ASSERT(fresh.is_valid());
+      if (fresh.reg().is(edx)) {
+        remainder = fresh;
+        fresh = allocator_->Allocate();
+        ASSERT(fresh.is_valid());
+      }
+      if (left->is_register() && left->reg().is(eax)) {
+        quotient = *left;
+        *left = fresh;
+        left_is_in_eax = true;
+      }
+      if (right->is_register() && right->reg().is(eax)) {
+        quotient = *right;
+        *right = fresh;
+      }
+      __ mov(fresh.reg(), eax);
+    } else {
+      // Neither left nor right is in eax.
+      quotient = allocator_->Allocate(eax);
+    }
+    ASSERT(quotient.is_register() && quotient.reg().is(eax));
+    ASSERT(!(left->is_register() && left->reg().is(eax)));
+    ASSERT(!(right->is_register() && right->reg().is(eax)));
+
+    // Step 2: get edx for remainder if necessary.
+    if (!remainder.is_valid()) {
+      if ((left->is_register() && left->reg().is(edx)) ||
+          (right->is_register() && right->reg().is(edx))) {
+        Result fresh = allocator_->Allocate();
+        ASSERT(fresh.is_valid());
+        if (left->is_register() && left->reg().is(edx)) {
+          remainder = *left;
+          *left = fresh;
+        }
+        if (right->is_register() && right->reg().is(edx)) {
+          remainder = *right;
+          *right = fresh;
+        }
+        __ mov(fresh.reg(), edx);
+      } else {
+        // Neither left nor right is in edx.
+        remainder = allocator_->Allocate(edx);
+      }
+    }
+    ASSERT(remainder.is_register() && remainder.reg().is(edx));
+    ASSERT(!(left->is_register() && left->reg().is(edx)));
+    ASSERT(!(right->is_register() && right->reg().is(edx)));
+
+    left->ToRegister();
+    right->ToRegister();
+    frame_->Spill(eax);
+    frame_->Spill(edx);
+
+    // Check that left and right are smi tagged.
+    DeferredInlineBinaryOperation* deferred =
+        new DeferredInlineBinaryOperation(op,
+                                          (op == Token::DIV) ? eax : edx,
+                                          left->reg(),
+                                          right->reg(),
+                                          overwrite_mode);
+    if (left->reg().is(right->reg())) {
+      __ test(left->reg(), Immediate(kSmiTagMask));
+    } else {
+      // Use the quotient register as a scratch for the tag check.
+      if (!left_is_in_eax) __ mov(eax, left->reg());
+      left_is_in_eax = false;  // About to destroy the value in eax.
+      __ or_(eax, Operand(right->reg()));
+      ASSERT(kSmiTag == 0);  // Adjust test if not the case.
+      __ test(eax, Immediate(kSmiTagMask));
+    }
+    deferred->Branch(not_zero);
+
+    if (!left_is_in_eax) __ mov(eax, left->reg());
+    // Sign extend eax into edx:eax.
+    __ cdq();
+    // Check for 0 divisor.
+    __ test(right->reg(), Operand(right->reg()));
+    deferred->Branch(zero);
+    // Divide edx:eax by the right operand.
+    __ idiv(right->reg());
+
+    // Complete the operation.
+    if (op == Token::DIV) {
+      // Check for negative zero result.  If result is zero, and divisor
+      // is negative, return a floating point negative zero.  The
+      // virtual frame is unchanged in this block, so local control flow
+      // can use a Label rather than a JumpTarget.
+      Label non_zero_result;
+      __ test(left->reg(), Operand(left->reg()));
+      __ j(not_zero, &non_zero_result);
+      __ test(right->reg(), Operand(right->reg()));
+      deferred->Branch(negative);
+      __ bind(&non_zero_result);
+      // Check for the corner case of dividing the most negative smi by
+      // -1. We cannot use the overflow flag, since it is not set by
+      // idiv instruction.
+      ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+      __ cmp(eax, 0x40000000);
+      deferred->Branch(equal);
+      // Check that the remainder is zero.
+      __ test(edx, Operand(edx));
+      deferred->Branch(not_zero);
+      // Tag the result and store it in the quotient register.
+      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+      deferred->BindExit();
+      left->Unuse();
+      right->Unuse();
+      frame_->Push(&quotient);
+    } else {
+      ASSERT(op == Token::MOD);
+      // Check for a negative zero result.  If the result is zero, and
+      // the dividend is negative, return a floating point negative
+      // zero.  The frame is unchanged in this block, so local control
+      // flow can use a Label rather than a JumpTarget.
+      Label non_zero_result;
+      __ test(edx, Operand(edx));
+      __ j(not_zero, &non_zero_result, taken);
+      __ test(left->reg(), Operand(left->reg()));
+      deferred->Branch(negative);
+      __ bind(&non_zero_result);
+      deferred->BindExit();
+      left->Unuse();
+      right->Unuse();
+      frame_->Push(&remainder);
+    }
+    return;
+  }
+
+  // Special handling of shift operations because they use fixed
+  // registers.
+  if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
+    // Move left out of ecx if necessary.
+    if (left->is_register() && left->reg().is(ecx)) {
+      *left = allocator_->Allocate();
+      ASSERT(left->is_valid());
+      __ mov(left->reg(), ecx);
+    }
+    right->ToRegister(ecx);
+    left->ToRegister();
+    ASSERT(left->is_register() && !left->reg().is(ecx));
+    ASSERT(right->is_register() && right->reg().is(ecx));
+
+    // We will modify right, it must be spilled.
+    frame_->Spill(ecx);
+
+    // Use a fresh answer register to avoid spilling the left operand.
+    Result answer = allocator_->Allocate();
+    ASSERT(answer.is_valid());
+    // Check that both operands are smis using the answer register as a
+    // temporary.
+    DeferredInlineBinaryOperation* deferred =
+        new DeferredInlineBinaryOperation(op,
+                                          answer.reg(),
+                                          left->reg(),
+                                          ecx,
+                                          overwrite_mode);
+    __ mov(answer.reg(), left->reg());
+    __ or_(answer.reg(), Operand(ecx));
+    __ test(answer.reg(), Immediate(kSmiTagMask));
+    deferred->Branch(not_zero);
+
+    // Untag both operands.
+    __ mov(answer.reg(), left->reg());
+    __ sar(answer.reg(), kSmiTagSize);
+    __ sar(ecx, kSmiTagSize);
+    // Perform the operation.
+    switch (op) {
+      case Token::SAR:
+        __ sar(answer.reg());
+        // No checks of result necessary
+        break;
+      case Token::SHR: {
+        Label result_ok;
+        __ shr(answer.reg());
+        // Check that the *unsigned* result fits in a smi.  Neither of
+        // the two high-order bits can be set:
+        //  * 0x80000000: high bit would be lost when smi tagging.
+        //  * 0x40000000: this number would convert to negative when smi
+        //    tagging.
+        // These two cases can only happen with shifts by 0 or 1 when
+        // handed a valid smi.  If the answer cannot be represented by a
+        // smi, restore the left and right arguments, and jump to slow
+        // case.  The low bit of the left argument may be lost, but only
+        // in a case where it is dropped anyway.
+        __ test(answer.reg(), Immediate(0xc0000000));
+        __ j(zero, &result_ok);
+        ASSERT(kSmiTag == 0);
+        __ shl(ecx, kSmiTagSize);
+        deferred->Jump();
+        __ bind(&result_ok);
+        break;
+      }
+      case Token::SHL: {
+        Label result_ok;
+        __ shl(answer.reg());
+        // Check that the *signed* result fits in a smi.
+        __ cmp(answer.reg(), 0xc0000000);
+        __ j(positive, &result_ok);
+        ASSERT(kSmiTag == 0);
+        __ shl(ecx, kSmiTagSize);
+        deferred->Jump();
+        __ bind(&result_ok);
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+    // Smi-tag the result in answer.
+    ASSERT(kSmiTagSize == 1);  // Adjust code if not the case.
+    __ lea(answer.reg(),
+           Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+    deferred->BindExit();
+    left->Unuse();
+    right->Unuse();
+    frame_->Push(&answer);
+    return;
+  }
+
+  // Handle the other binary operations.
+  left->ToRegister();
+  right->ToRegister();
+  // A newly allocated register answer is used to hold the answer.  The
+  // registers containing left and right are not modified so they don't
+  // need to be spilled in the fast case.
+  Result answer = allocator_->Allocate();
+  ASSERT(answer.is_valid());
+
+  // Perform the smi tag check.
+  DeferredInlineBinaryOperation* deferred =
+      new DeferredInlineBinaryOperation(op,
+                                        answer.reg(),
+                                        left->reg(),
+                                        right->reg(),
+                                        overwrite_mode);
+  if (left->reg().is(right->reg())) {
+    __ test(left->reg(), Immediate(kSmiTagMask));
+  } else {
+    __ mov(answer.reg(), left->reg());
+    __ or_(answer.reg(), Operand(right->reg()));
+    ASSERT(kSmiTag == 0);  // Adjust test if not the case.
+    __ test(answer.reg(), Immediate(kSmiTagMask));
+  }
+  deferred->Branch(not_zero);
+  __ mov(answer.reg(), left->reg());
+  switch (op) {
+    case Token::ADD:
+      __ add(answer.reg(), Operand(right->reg()));  // Add optimistically.
+      deferred->Branch(overflow);
+      break;
+
+    case Token::SUB:
+      __ sub(answer.reg(), Operand(right->reg()));  // Subtract optimistically.
+      deferred->Branch(overflow);
+      break;
+
+    case Token::MUL: {
+      // If the smi tag is 0 we can just leave the tag on one operand.
+      ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
+      // Remove smi tag from the left operand (but keep sign).
+      // Left-hand operand has been copied into answer.
+      __ sar(answer.reg(), kSmiTagSize);
+      // Do multiplication of smis, leaving result in answer.
+      __ imul(answer.reg(), Operand(right->reg()));
+      // Go slow on overflows.
+      deferred->Branch(overflow);
+      // Check for negative zero result.  If product is zero, and one
+      // argument is negative, go to slow case.  The frame is unchanged
+      // in this block, so local control flow can use a Label rather
+      // than a JumpTarget.
+      Label non_zero_result;
+      __ test(answer.reg(), Operand(answer.reg()));
+      __ j(not_zero, &non_zero_result, taken);
+      __ mov(answer.reg(), left->reg());
+      __ or_(answer.reg(), Operand(right->reg()));
+      deferred->Branch(negative);
+      __ xor_(answer.reg(), Operand(answer.reg()));  // Positive 0 is correct.
+      __ bind(&non_zero_result);
+      break;
+    }
+
+    case Token::BIT_OR:
+      __ or_(answer.reg(), Operand(right->reg()));
+      break;
+
+    case Token::BIT_AND:
+      __ and_(answer.reg(), Operand(right->reg()));
+      break;
+
+    case Token::BIT_XOR:
+      __ xor_(answer.reg(), Operand(right->reg()));
+      break;
+
+    default:
+      UNREACHABLE();
+      break;
+  }
+  deferred->BindExit();
+  left->Unuse();
+  right->Unuse();
+  frame_->Push(&answer);
+}
+
+
+// Call the appropriate binary operation stub to compute src op value
+// and leave the result in dst.
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+  DeferredInlineSmiOperation(Token::Value op,
+                             Register dst,
+                             Register src,
+                             Smi* value,
+                             OverwriteMode overwrite_mode)
+      : op_(op),
+        dst_(dst),
+        src_(src),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Token::Value op_;
+  Register dst_;
+  Register src_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiOperation::Generate() {
+  __ push(src_);
+  __ push(Immediate(value_));
+  // For mod we don't generate all the Smi code inline.
+  GenericBinaryOpStub stub(
+      op_,
+      overwrite_mode_,
+      (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
+  __ CallStub(&stub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// Call the appropriate binary operation stub to compute value op src
+// and leave the result in dst.
+class DeferredInlineSmiOperationReversed: public DeferredCode {
+ public:
+  DeferredInlineSmiOperationReversed(Token::Value op,
+                                     Register dst,
+                                     Smi* value,
+                                     Register src,
+                                     OverwriteMode overwrite_mode)
+      : op_(op),
+        dst_(dst),
+        value_(value),
+        src_(src),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiOperationReversed");
+  }
+
+  virtual void Generate();
+
+ private:
+  Token::Value op_;
+  Register dst_;
+  Smi* value_;
+  Register src_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiOperationReversed::Generate() {
+  __ push(Immediate(value_));
+  __ push(src_);
+  GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// The result of src + value is in dst.  It either overflowed or was not
+// smi tagged.  Undo the speculative addition and call the appropriate
+// specialized stub for add.  The result is left in dst.
+class DeferredInlineSmiAdd: public DeferredCode {
+ public:
+  DeferredInlineSmiAdd(Register dst,
+                       Smi* value,
+                       OverwriteMode overwrite_mode)
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiAdd");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiAdd::Generate() {
+  // Undo the optimistic add operation and call the shared stub.
+  __ sub(Operand(dst_), Immediate(value_));
+  __ push(dst_);
+  __ push(Immediate(value_));
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// The result of value + src is in dst.  It either overflowed or was not
+// smi tagged.  Undo the speculative addition and call the appropriate
+// specialized stub for add.  The result is left in dst.
+class DeferredInlineSmiAddReversed: public DeferredCode {
+ public:
+  DeferredInlineSmiAddReversed(Register dst,
+                               Smi* value,
+                               OverwriteMode overwrite_mode)
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiAddReversed");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiAddReversed::Generate() {
+  // Undo the optimistic add operation and call the shared stub.
+  __ sub(Operand(dst_), Immediate(value_));
+  __ push(Immediate(value_));
+  __ push(dst_);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// The result of src - value is in dst.  It either overflowed or was not
+// smi tagged.  Undo the speculative subtraction and call the
+// appropriate specialized stub for subtract.  The result is left in
+// dst.
+class DeferredInlineSmiSub: public DeferredCode {
+ public:
+  DeferredInlineSmiSub(Register dst,
+                       Smi* value,
+                       OverwriteMode overwrite_mode)
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiSub");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiSub::Generate() {
+  // Undo the optimistic sub operation and call the shared stub.
+  __ add(Operand(dst_), Immediate(value_));
+  __ push(dst_);
+  __ push(Immediate(value_));
+  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+                                               Result* operand,
+                                               Handle<Object> value,
+                                               SmiAnalysis* type,
+                                               bool reversed,
+                                               OverwriteMode overwrite_mode) {
+  // NOTE: This is an attempt to inline (a bit) more of the code for
+  // some possible smi operations (like + and -) when (at least) one
+  // of the operands is a constant smi.
+  // Consumes the argument "operand".
+
+  // TODO(199): Optimize some special cases of operations involving a
+  // smi literal (multiply by 2, shift by 0, etc.).
+  if (IsUnsafeSmi(value)) {
+    Result unsafe_operand(value);
+    if (reversed) {
+      LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+                               overwrite_mode);
+    } else {
+      LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+                               overwrite_mode);
+    }
+    ASSERT(!operand->is_valid());
+    return;
+  }
+
+  // Get the literal value.
+  Smi* smi_value = Smi::cast(*value);
+  int int_value = smi_value->value();
+
+  switch (op) {
+    case Token::ADD: {
+      operand->ToRegister();
+      frame_->Spill(operand->reg());
+
+      // Optimistically add.  Call the specialized add stub if the
+      // result is not a smi or overflows.
+      DeferredCode* deferred = NULL;
+      if (reversed) {
+        deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+                                                    smi_value,
+                                                    overwrite_mode);
+      } else {
+        deferred = new DeferredInlineSmiAdd(operand->reg(),
+                                            smi_value,
+                                            overwrite_mode);
+      }
+      __ add(Operand(operand->reg()), Immediate(value));
+      deferred->Branch(overflow);
+      __ test(operand->reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+      deferred->BindExit();
+      frame_->Push(operand);
+      break;
+    }
+
+    case Token::SUB: {
+      DeferredCode* deferred = NULL;
+      Result answer;  // Only allocate a new register if reversed.
+      if (reversed) {
+        // The reversed case is only hit when the right operand is not a
+        // constant.
+        ASSERT(operand->is_register());
+        answer = allocator()->Allocate();
+        ASSERT(answer.is_valid());
+        __ Set(answer.reg(), Immediate(value));
+        deferred = new DeferredInlineSmiOperationReversed(op,
+                                                          answer.reg(),
+                                                          smi_value,
+                                                          operand->reg(),
+                                                          overwrite_mode);
+        __ sub(answer.reg(), Operand(operand->reg()));
+      } else {
+        operand->ToRegister();
+        frame_->Spill(operand->reg());
+        answer = *operand;
+        deferred = new DeferredInlineSmiSub(operand->reg(),
+                                            smi_value,
+                                            overwrite_mode);
+        __ sub(Operand(operand->reg()), Immediate(value));
+      }
+      deferred->Branch(overflow);
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+      deferred->BindExit();
+      operand->Unuse();
+      frame_->Push(&answer);
+      break;
+    }
+
+    case Token::SAR:
+      if (reversed) {
+        Result constant_operand(value);
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
+        operand->ToRegister();
+        frame_->Spill(operand->reg());
+        DeferredInlineSmiOperation* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           operand->reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
+        __ test(operand->reg(), Immediate(kSmiTagMask));
+        deferred->Branch(not_zero);
+        if (shift_value > 0) {
+          __ sar(operand->reg(), shift_value);
+          __ and_(operand->reg(), ~kSmiTagMask);
+        }
+        deferred->BindExit();
+        frame_->Push(operand);
+      }
+      break;
+
+    case Token::SHR:
+      if (reversed) {
+        Result constant_operand(value);
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
+        operand->ToRegister();
+        Result answer = allocator()->Allocate();
+        ASSERT(answer.is_valid());
+        DeferredInlineSmiOperation* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           answer.reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
+        __ test(operand->reg(), Immediate(kSmiTagMask));
+        deferred->Branch(not_zero);
+        __ mov(answer.reg(), operand->reg());
+        __ sar(answer.reg(), kSmiTagSize);
+        __ shr(answer.reg(), shift_value);
+        // A negative Smi shifted right two is in the positive Smi range.
+        if (shift_value < 2) {
+          __ test(answer.reg(), Immediate(0xc0000000));
+          deferred->Branch(not_zero);
+        }
+        operand->Unuse();
+        ASSERT(kSmiTagSize == times_2);  // Adjust the code if not true.
+        __ lea(answer.reg(),
+               Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+        deferred->BindExit();
+        frame_->Push(&answer);
+      }
+      break;
+
+    case Token::SHL:
+      if (reversed) {
+        Result constant_operand(value);
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
+        operand->ToRegister();
+        if (shift_value == 0) {
+          // Spill operand so it can be overwritten in the slow case.
+          frame_->Spill(operand->reg());
+          DeferredInlineSmiOperation* deferred =
+              new DeferredInlineSmiOperation(op,
+                                             operand->reg(),
+                                             operand->reg(),
+                                             smi_value,
+                                             overwrite_mode);
+          __ test(operand->reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+          deferred->BindExit();
+          frame_->Push(operand);
+        } else {
+          // Use a fresh temporary for nonzero shift values.
+          Result answer = allocator()->Allocate();
+          ASSERT(answer.is_valid());
+          DeferredInlineSmiOperation* deferred =
+              new DeferredInlineSmiOperation(op,
+                                             answer.reg(),
+                                             operand->reg(),
+                                             smi_value,
+                                             overwrite_mode);
+          __ test(operand->reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+          __ mov(answer.reg(), operand->reg());
+          ASSERT(kSmiTag == 0);  // adjust code if not the case
+          // We do no shifts, only the Smi conversion, if shift_value is 1.
+          if (shift_value > 1) {
+            __ shl(answer.reg(), shift_value - 1);
+          }
+          // Convert int result to Smi, checking that it is in int range.
+          ASSERT(kSmiTagSize == 1);  // adjust code if not the case
+          __ add(answer.reg(), Operand(answer.reg()));
+          deferred->Branch(overflow);
+          deferred->BindExit();
+          operand->Unuse();
+          frame_->Push(&answer);
+        }
+      }
+      break;
+
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND: {
+      operand->ToRegister();
+      frame_->Spill(operand->reg());
+      DeferredCode* deferred = NULL;
+      if (reversed) {
+        deferred = new DeferredInlineSmiOperationReversed(op,
+                                                          operand->reg(),
+                                                          smi_value,
+                                                          operand->reg(),
+                                                          overwrite_mode);
+      } else {
+        deferred =  new DeferredInlineSmiOperation(op,
+                                                   operand->reg(),
+                                                   operand->reg(),
+                                                   smi_value,
+                                                   overwrite_mode);
+      }
+      __ test(operand->reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+      if (op == Token::BIT_AND) {
+        __ and_(Operand(operand->reg()), Immediate(value));
+      } else if (op == Token::BIT_XOR) {
+        if (int_value != 0) {
+          __ xor_(Operand(operand->reg()), Immediate(value));
+        }
+      } else {
+        ASSERT(op == Token::BIT_OR);
+        if (int_value != 0) {
+          __ or_(Operand(operand->reg()), Immediate(value));
+        }
+      }
+      deferred->BindExit();
+      frame_->Push(operand);
+      break;
+    }
+
+    // Generate inline code for mod of powers of 2 and negative powers of 2.
+    case Token::MOD:
+      if (!reversed &&
+          int_value != 0 &&
+          (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
+        operand->ToRegister();
+        frame_->Spill(operand->reg());
+        DeferredCode* deferred = new DeferredInlineSmiOperation(op,
+                                                                operand->reg(),
+                                                                operand->reg(),
+                                                                smi_value,
+                                                                overwrite_mode);
+        // Check for negative or non-Smi left hand side.
+        __ test(operand->reg(), Immediate(kSmiTagMask | 0x80000000));
+        deferred->Branch(not_zero);
+        if (int_value < 0) int_value = -int_value;
+        if (int_value == 1) {
+          __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
+        } else {
+          __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
+        }
+        deferred->BindExit();
+        frame_->Push(operand);
+        break;
+      }
+      // Fall through if we did not find a power of 2 on the right hand side!
+
+    default: {
+      Result constant_operand(value);
+      if (reversed) {
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        LikelySmiBinaryOperation(op, operand, &constant_operand,
+                                 overwrite_mode);
+      }
+      break;
+    }
+  }
+  ASSERT(!operand->is_valid());
+}
+
+
+void CodeGenerator::Comparison(Condition cc,
+                               bool strict,
+                               ControlDestination* dest) {
+  // Strict only makes sense for equality comparisons.
+  ASSERT(!strict || cc == equal);
+
+  Result left_side;
+  Result right_side;
+  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
+  if (cc == greater || cc == less_equal) {
+    cc = ReverseCondition(cc);
+    left_side = frame_->Pop();
+    right_side = frame_->Pop();
+  } else {
+    right_side = frame_->Pop();
+    left_side = frame_->Pop();
+  }
+  ASSERT(cc == less || cc == equal || cc == greater_equal);
+
+  // If either side is a constant smi, optimize the comparison.
+  bool left_side_constant_smi =
+      left_side.is_constant() && left_side.handle()->IsSmi();
+  bool right_side_constant_smi =
+      right_side.is_constant() && right_side.handle()->IsSmi();
+  bool left_side_constant_null =
+      left_side.is_constant() && left_side.handle()->IsNull();
+  bool right_side_constant_null =
+      right_side.is_constant() && right_side.handle()->IsNull();
+
+  if (left_side_constant_smi || right_side_constant_smi) {
+    if (left_side_constant_smi && right_side_constant_smi) {
+      // Trivial case, comparing two constants.
+      int left_value = Smi::cast(*left_side.handle())->value();
+      int right_value = Smi::cast(*right_side.handle())->value();
+      switch (cc) {
+        case less:
+          dest->Goto(left_value < right_value);
+          break;
+        case equal:
+          dest->Goto(left_value == right_value);
+          break;
+        case greater_equal:
+          dest->Goto(left_value >= right_value);
+          break;
+        default:
+          UNREACHABLE();
+      }
+    } else {  // Only one side is a constant Smi.
+      // If left side is a constant Smi, reverse the operands.
+      // Since one side is a constant Smi, conversion order does not matter.
+      if (left_side_constant_smi) {
+        Result temp = left_side;
+        left_side = right_side;
+        right_side = temp;
+        cc = ReverseCondition(cc);
+        // This may reintroduce greater or less_equal as the value of cc.
+        // CompareStub and the inline code both support all values of cc.
+      }
+      // Implement comparison against a constant Smi, inlining the case
+      // where both sides are Smis.
+      left_side.ToRegister();
+
+      // Here we split control flow to the stub call and inlined cases
+      // before finally splitting it to the control destination.  We use
+      // a jump target and branching to duplicate the virtual frame at
+      // the first split.  We manually handle the off-frame references
+      // by reconstituting them on the non-fall-through path.
+      JumpTarget is_smi;
+      Register left_reg = left_side.reg();
+      Handle<Object> right_val = right_side.handle();
+      __ test(left_side.reg(), Immediate(kSmiTagMask));
+      is_smi.Branch(zero, taken);
+
+      // Setup and call the compare stub.
+      CompareStub stub(cc, strict);
+      Result result = frame_->CallStub(&stub, &left_side, &right_side);
+      result.ToRegister();
+      __ cmp(result.reg(), 0);
+      result.Unuse();
+      dest->true_target()->Branch(cc);
+      dest->false_target()->Jump();
+
+      is_smi.Bind();
+      left_side = Result(left_reg);
+      right_side = Result(right_val);
+      // Test smi equality and comparison by signed int comparison.
+      if (IsUnsafeSmi(right_side.handle())) {
+        right_side.ToRegister();
+        __ cmp(left_side.reg(), Operand(right_side.reg()));
+      } else {
+        __ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
+      }
+      left_side.Unuse();
+      right_side.Unuse();
+      dest->Split(cc);
+    }
+  } else if (cc == equal &&
+             (left_side_constant_null || right_side_constant_null)) {
+    // To make null checks efficient, we check if either the left side or
+    // the right side is the constant 'null'.
+    // If so, we optimize the code by inlining a null check instead of
+    // calling the (very) general runtime routine for checking equality.
+    Result operand = left_side_constant_null ? right_side : left_side;
+    right_side.Unuse();
+    left_side.Unuse();
+    operand.ToRegister();
+    __ cmp(operand.reg(), Factory::null_value());
+    if (strict) {
+      operand.Unuse();
+      dest->Split(equal);
+    } else {
+      // The 'null' value is only equal to 'undefined' if using non-strict
+      // comparisons.
+      dest->true_target()->Branch(equal);
+      __ cmp(operand.reg(), Factory::undefined_value());
+      dest->true_target()->Branch(equal);
+      __ test(operand.reg(), Immediate(kSmiTagMask));
+      dest->false_target()->Branch(equal);
+
+      // It can be an undetectable object.
+      // Use a scratch register in preference to spilling operand.reg().
+      Result temp = allocator()->Allocate();
+      ASSERT(temp.is_valid());
+      __ mov(temp.reg(),
+             FieldOperand(operand.reg(), HeapObject::kMapOffset));
+      __ movzx_b(temp.reg(),
+                 FieldOperand(temp.reg(), Map::kBitFieldOffset));
+      __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+      temp.Unuse();
+      operand.Unuse();
+      dest->Split(not_zero);
+    }
+  } else {  // Neither side is a constant Smi or null.
+    // If either side is a non-smi constant, skip the smi check.
+    bool known_non_smi =
+        (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
+        (right_side.is_constant() && !right_side.handle()->IsSmi());
+    left_side.ToRegister();
+    right_side.ToRegister();
+
+    if (known_non_smi) {
+      // When non-smi, call out to the compare stub.
+      CompareStub stub(cc, strict);
+      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+      if (cc == equal) {
+        __ test(answer.reg(), Operand(answer.reg()));
+      } else {
+        __ cmp(answer.reg(), 0);
+      }
+      answer.Unuse();
+      dest->Split(cc);
+    } else {
+      // Here we split control flow to the stub call and inlined cases
+      // before finally splitting it to the control destination.  We use
+      // a jump target and branching to duplicate the virtual frame at
+      // the first split.  We manually handle the off-frame references
+      // by reconstituting them on the non-fall-through path.
+      JumpTarget is_smi;
+      Register left_reg = left_side.reg();
+      Register right_reg = right_side.reg();
+
+      Result temp = allocator_->Allocate();
+      ASSERT(temp.is_valid());
+      __ mov(temp.reg(), left_side.reg());
+      __ or_(temp.reg(), Operand(right_side.reg()));
+      __ test(temp.reg(), Immediate(kSmiTagMask));
+      temp.Unuse();
+      is_smi.Branch(zero, taken);
+      // When non-smi, call out to the compare stub.
+      CompareStub stub(cc, strict);
+      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+      if (cc == equal) {
+        __ test(answer.reg(), Operand(answer.reg()));
+      } else {
+        __ cmp(answer.reg(), 0);
+      }
+      answer.Unuse();
+      dest->true_target()->Branch(cc);
+      dest->false_target()->Jump();
+
+      is_smi.Bind();
+      left_side = Result(left_reg);
+      right_side = Result(right_reg);
+      __ cmp(left_side.reg(), Operand(right_side.reg()));
+      right_side.Unuse();
+      left_side.Unuse();
+      dest->Split(cc);
+    }
+  }
+}
+
+
+class CallFunctionStub: public CodeStub {
+ public:
+  CallFunctionStub(int argc, InLoopFlag in_loop)
+      : argc_(argc), in_loop_(in_loop) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  int argc_;
+  InLoopFlag in_loop_;
+
+#ifdef DEBUG
+  void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+  Major MajorKey() { return CallFunction; }
+  int MinorKey() { return argc_; }
+  InLoopFlag InLoop() { return in_loop_; }
+};
+
+
+// Call the function just below TOS on the stack with the given
+// arguments. The receiver is the TOS.
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+                                      int position) {
+  // Push the arguments ("left-to-right") on the stack.
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Load(args->at(i));
+  }
+
+  // Record the position for debugging purposes.
+  CodeForSourcePosition(position);
+
+  // Use the shared code stub to call the function.
+  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub call_function(arg_count, in_loop);
+  Result answer = frame_->CallStub(&call_function, arg_count + 1);
+  // Restore context and replace function on the stack with the
+  // result of the stub invocation.
+  frame_->RestoreContextRegister();
+  frame_->SetElementAt(0, &answer);
+}
+
+
+void CodeGenerator::CallApplyLazy(Property* apply,
+                                  Expression* receiver,
+                                  VariableProxy* arguments,
+                                  int position) {
+  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
+  ASSERT(arguments->IsArguments());
+
+  JumpTarget slow, done;
+
+  // Load the apply function onto the stack. This will usually
+  // give us a megamorphic load site. Not super, but it works.
+  Reference ref(this, apply);
+  ref.GetValue(NOT_INSIDE_TYPEOF);
+  ASSERT(ref.type() == Reference::NAMED);
+
+  // Load the receiver and the existing arguments object onto the
+  // expression stack. Avoid allocating the arguments object here.
+  Load(receiver);
+  LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+
+  // Emit the source position information after having loaded the
+  // receiver and the arguments.
+  CodeForSourcePosition(position);
+
+  // Check if the arguments object has been lazily allocated
+  // already. If so, just use that instead of copying the arguments
+  // from the stack. This also deals with cases where a local variable
+  // named 'arguments' has been introduced.
+  frame_->Dup();
+  Result probe = frame_->Pop();
+  bool try_lazy = true;
+  if (probe.is_constant()) {
+    try_lazy = probe.handle()->IsTheHole();
+  } else {
+    __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+    probe.Unuse();
+    slow.Branch(not_equal);
+  }
+
+  if (try_lazy) {
+    JumpTarget build_args;
+
+    // Get rid of the arguments object probe.
+    frame_->Drop();
+
+    // Before messing with the execution stack, we sync all
+    // elements. This is bound to happen anyway because we're
+    // about to call a function.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+
+    // Check that the receiver really is a JavaScript object.
+    { frame_->PushElementAt(0);
+      Result receiver = frame_->Pop();
+      receiver.ToRegister();
+      __ test(receiver.reg(), Immediate(kSmiTagMask));
+      build_args.Branch(zero);
+      Result tmp = allocator_->Allocate();
+      // We allow all JSObjects including JSFunctions.  As long as
+      // JS_FUNCTION_TYPE is the last instance type and it is right
+      // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
+      // bound.
+      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+      ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+      __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
+      build_args.Branch(less);
+    }
+
+    // Verify that we're invoking Function.prototype.apply.
+    { frame_->PushElementAt(1);
+      Result apply = frame_->Pop();
+      apply.ToRegister();
+      __ test(apply.reg(), Immediate(kSmiTagMask));
+      build_args.Branch(zero);
+      Result tmp = allocator_->Allocate();
+      __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
+      build_args.Branch(not_equal);
+      __ mov(tmp.reg(),
+             FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+      Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+      __ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+             Immediate(apply_code));
+      build_args.Branch(not_equal);
+    }
+
+    // Get the function receiver from the stack. Check that it
+    // really is a function.
+    __ mov(edi, Operand(esp, 2 * kPointerSize));
+    __ test(edi, Immediate(kSmiTagMask));
+    build_args.Branch(zero);
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+    build_args.Branch(not_equal);
+
+    // Copy the arguments to this function possibly from the
+    // adaptor frame below it.
+    Label invoke, adapted;
+    __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+    __ cmp(Operand(ecx),
+           Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+    __ j(equal, &adapted);
+
+    // No arguments adaptor frame. Copy fixed number of arguments.
+    __ mov(eax, Immediate(scope_->num_parameters()));
+    for (int i = 0; i < scope_->num_parameters(); i++) {
+      __ push(frame_->ParameterAt(i));
+    }
+    __ jmp(&invoke);
+
+    // Arguments adaptor frame present. Copy arguments from there, but
+    // avoid copying too many arguments to avoid stack overflows.
+    __ bind(&adapted);
+    static const uint32_t kArgumentsLimit = 1 * KB;
+    __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ shr(eax, kSmiTagSize);
+    __ mov(ecx, Operand(eax));
+    __ cmp(eax, kArgumentsLimit);
+    build_args.Branch(above);
+
+    // Loop through the arguments pushing them onto the execution
+    // stack. We don't inform the virtual frame of the push, so we don't
+    // have to worry about getting rid of the elements from the virtual
+    // frame.
+    Label loop;
+    __ bind(&loop);
+    __ test(ecx, Operand(ecx));
+    __ j(zero, &invoke);
+    __ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
+    __ dec(ecx);
+    __ jmp(&loop);
+
+    // Invoke the function. The virtual frame knows about the receiver
+    // so make sure to forget that explicitly.
+    __ bind(&invoke);
+    ParameterCount actual(eax);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION);
+    frame_->Forget(1);
+    Result result = allocator()->Allocate(eax);
+    frame_->SetElementAt(0, &result);
+    done.Jump();
+
+    // Slow-case: Allocate the arguments object since we know it isn't
+    // there, and fall-through to the slow-case where we call
+    // Function.prototype.apply.
+    build_args.Bind();
+    Result arguments_object = StoreArgumentsObject(false);
+    frame_->Push(&arguments_object);
+    slow.Bind();
+  }
+
+  // Flip the apply function and the function to call on the stack, so
+  // the function looks like the receiver of the apply call. This way,
+  // the generic Function.prototype.apply implementation can deal with
+  // the call like it usually does.
+  Result a2 = frame_->Pop();
+  Result a1 = frame_->Pop();
+  Result ap = frame_->Pop();
+  Result fn = frame_->Pop();
+  frame_->Push(&ap);
+  frame_->Push(&fn);
+  frame_->Push(&a1);
+  frame_->Push(&a2);
+  CallFunctionStub call_function(2, NOT_IN_LOOP);
+  Result res = frame_->CallStub(&call_function, 3);
+  frame_->Push(&res);
+
+  // All done. Restore context register after call.
+  if (try_lazy) done.Bind();
+  frame_->RestoreContextRegister();
+}
+
+
+class DeferredStackCheck: public DeferredCode {
+ public:
+  DeferredStackCheck() {
+    set_comment("[ DeferredStackCheck");
+  }
+
+  virtual void Generate();
+};
+
+
+void DeferredStackCheck::Generate() {
+  StackCheckStub stub;
+  __ CallStub(&stub);
+}
+
+
+void CodeGenerator::CheckStack() {
+  if (FLAG_check_stack) {
+    DeferredStackCheck* deferred = new DeferredStackCheck;
+    ExternalReference stack_guard_limit =
+        ExternalReference::address_of_stack_guard_limit();
+    __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+    deferred->Branch(below);
+    deferred->BindExit();
+  }
+}
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+  ASSERT(in_spilled_code());
+  set_in_spilled_code(false);
+  Visit(statement);
+  if (frame_ != NULL) {
+    frame_->SpillAll();
+  }
+  set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+  ASSERT(in_spilled_code());
+  set_in_spilled_code(false);
+  VisitStatements(statements);
+  if (frame_ != NULL) {
+    frame_->SpillAll();
+  }
+  set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+  ASSERT(!in_spilled_code());
+  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
+    Visit(statements->at(i));
+  }
+}
+
+
+void CodeGenerator::VisitBlock(Block* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ Block");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  VisitStatements(node->statements());
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.  The inevitable call
+  // will sync frame elements to memory anyway, so we do it eagerly to
+  // allow us to push the arguments directly into place.
+  frame_->SyncRange(0, frame_->element_count() - 1);
+
+  frame_->EmitPush(Immediate(pairs));
+  frame_->EmitPush(esi);  // The context is the second argument.
+  frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
+  Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
+}
+
+
+void CodeGenerator::VisitDeclaration(Declaration* node) {
+  Comment cmnt(masm_, "[ Declaration");
+  Variable* var = node->proxy()->var();
+  ASSERT(var != NULL);  // must have been resolved
+  Slot* slot = var->slot();
+
+  // If it was not possible to allocate the variable at compile time,
+  // we need to "declare" it at runtime to make sure it actually
+  // exists in the local context.
+  if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    // Variables with a "LOOKUP" slot were introduced as non-locals
+    // during variable resolution and must have mode DYNAMIC.
+    ASSERT(var->is_dynamic());
+    // For now, just do a runtime call.  Sync the virtual frame eagerly
+    // so we can simply push the arguments into place.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+    frame_->EmitPush(esi);
+    frame_->EmitPush(Immediate(var->name()));
+    // Declaration nodes are always introduced in one of two modes.
+    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
+    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
+    frame_->EmitPush(Immediate(Smi::FromInt(attr)));
+    // Push initial value, if any.
+    // Note: For variables we must not push an initial value (such as
+    // 'undefined') because we may have a (legal) redeclaration and we
+    // must not destroy the current value.
+    if (node->mode() == Variable::CONST) {
+      frame_->EmitPush(Immediate(Factory::the_hole_value()));
+    } else if (node->fun() != NULL) {
+      Load(node->fun());
+    } else {
+      frame_->EmitPush(Immediate(Smi::FromInt(0)));  // no initial value!
+    }
+    Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
+    // Ignore the return value (declarations are statements).
+    return;
+  }
+
+  ASSERT(!var->is_global());
+
+  // If we have a function or a constant, we need to initialize the variable.
+  Expression* val = NULL;
+  if (node->mode() == Variable::CONST) {
+    val = new Literal(Factory::the_hole_value());
+  } else {
+    val = node->fun();  // NULL if we don't have a function
+  }
+
+  if (val != NULL) {
+    {
+      // Set the initial value.
+      Reference target(this, node->proxy());
+      Load(val);
+      target.SetValue(NOT_CONST_INIT);
+      // The reference is removed from the stack (preserving TOS) when
+      // it goes out of scope.
+    }
+    // Get rid of the assigned value (declarations are statements).
+    frame_->Drop();
+  }
+}
+
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  CodeForStatementPosition(node);
+  Expression* expression = node->expression();
+  expression->MarkAsStatement();
+  Load(expression);
+  // Remove the lingering expression result from the top of stack.
+  frame_->Drop();
+}
+
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "// EmptyStatement");
+  CodeForStatementPosition(node);
+  // nothing to do
+}
+
+
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ IfStatement");
+  // Generate different code depending on which parts of the if statement
+  // are present or not.
+  bool has_then_stm = node->HasThenStatement();
+  bool has_else_stm = node->HasElseStatement();
+
+  CodeForStatementPosition(node);
+  JumpTarget exit;
+  if (has_then_stm && has_else_stm) {
+    JumpTarget then;
+    JumpTarget else_;
+    ControlDestination dest(&then, &else_, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.false_was_fall_through()) {
+      // The else target was bound, so we compile the else part first.
+      Visit(node->else_statement());
+
+      // We may have dangling jumps to the then part.
+      if (then.is_linked()) {
+        if (has_valid_frame()) exit.Jump();
+        then.Bind();
+        Visit(node->then_statement());
+      }
+    } else {
+      // The then target was bound, so we compile the then part first.
+      Visit(node->then_statement());
+
+      if (else_.is_linked()) {
+        if (has_valid_frame()) exit.Jump();
+        else_.Bind();
+        Visit(node->else_statement());
+      }
+    }
+
+  } else if (has_then_stm) {
+    ASSERT(!has_else_stm);
+    JumpTarget then;
+    ControlDestination dest(&then, &exit, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.false_was_fall_through()) {
+      // The exit label was bound.  We may have dangling jumps to the
+      // then part.
+      if (then.is_linked()) {
+        exit.Unuse();
+        exit.Jump();
+        then.Bind();
+        Visit(node->then_statement());
+      }
+    } else {
+      // The then label was bound.
+      Visit(node->then_statement());
+    }
+
+  } else if (has_else_stm) {
+    ASSERT(!has_then_stm);
+    JumpTarget else_;
+    ControlDestination dest(&exit, &else_, false);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.true_was_fall_through()) {
+      // The exit label was bound.  We may have dangling jumps to the
+      // else part.
+      if (else_.is_linked()) {
+        exit.Unuse();
+        exit.Jump();
+        else_.Bind();
+        Visit(node->else_statement());
+      }
+    } else {
+      // The else label was bound.
+      Visit(node->else_statement());
+    }
+
+  } else {
+    ASSERT(!has_then_stm && !has_else_stm);
+    // We only care about the condition's side effects (not its value
+    // or control flow effect).  LoadCondition is called without
+    // forcing control flow.
+    ControlDestination dest(&exit, &exit, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+    if (!dest.is_used()) {
+      // We got a value on the frame rather than (or in addition to)
+      // control flow.
+      frame_->Drop();
+    }
+  }
+
+  if (exit.is_linked()) {
+    exit.Bind();
+  }
+}
+
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ContinueStatement");
+  CodeForStatementPosition(node);
+  node->target()->continue_target()->Jump();
+}
+
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ BreakStatement");
+  CodeForStatementPosition(node);
+  node->target()->break_target()->Jump();
+}
+
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ReturnStatement");
+
+  CodeForStatementPosition(node);
+  Load(node->expression());
+  Result return_value = frame_->Pop();
+  if (function_return_is_shadowed_) {
+    function_return_.Jump(&return_value);
+  } else {
+    frame_->PrepareForReturn();
+    if (function_return_.is_bound()) {
+      // If the function return label is already bound we reuse the
+      // code by jumping to the return site.
+      function_return_.Jump(&return_value);
+    } else {
+      function_return_.Bind(&return_value);
+      GenerateReturnSequence(&return_value);
+    }
+  }
+}
+
+
+void CodeGenerator::GenerateReturnSequence(Result* return_value) {
+  // The return value is a live (but not currently reference counted)
+  // reference to eax.  This is safe because the current frame does not
+  // contain a reference to eax (it is prepared for the return by spilling
+  // all registers).
+  if (FLAG_trace) {
+    frame_->Push(return_value);
+    *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
+  }
+  return_value->ToRegister(eax);
+
+  // Add a label for checking the size of the code used for returning.
+  Label check_exit_codesize;
+  masm_->bind(&check_exit_codesize);
+
+  // Leave the frame and return popping the arguments and the
+  // receiver.
+  frame_->Exit();
+  masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+  DeleteFrame();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Check that the size of the code used for returning matches what is
+  // expected by the debugger.
+  ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
+            masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+}
+
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ WithEnterStatement");
+  CodeForStatementPosition(node);
+  Load(node->expression());
+  Result context;
+  if (node->is_catch_block()) {
+    context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
+  } else {
+    context = frame_->CallRuntime(Runtime::kPushContext, 1);
+  }
+
+  // Update context local.
+  frame_->SaveContextRegister();
+
+  // Verify that the runtime call result and esi agree.
+  if (FLAG_debug_code) {
+    __ cmp(context.reg(), Operand(esi));
+    __ Assert(equal, "Runtime::NewContext should end up in esi");
+  }
+}
+
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ WithExitStatement");
+  CodeForStatementPosition(node);
+  // Pop context.
+  __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
+  // Update context local.
+  frame_->SaveContextRegister();
+}
+
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ SwitchStatement");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  // Compile the switch value.
+  Load(node->tag());
+
+  ZoneList<CaseClause*>* cases = node->cases();
+  int length = cases->length();
+  CaseClause* default_clause = NULL;
+
+  JumpTarget next_test;
+  // Compile the case label expressions and comparisons.  Exit early
+  // if a comparison is unconditionally true.  The target next_test is
+  // bound before the loop in order to indicate control flow to the
+  // first comparison.
+  next_test.Bind();
+  for (int i = 0; i < length && !next_test.is_unused(); i++) {
+    CaseClause* clause = cases->at(i);
+    // The default is not a test, but remember it for later.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    // We recycle the same target next_test for each test.  Bind it if
+    // the previous test has not done so and then unuse it for the
+    // loop.
+    if (next_test.is_linked()) {
+      next_test.Bind();
+    }
+    next_test.Unuse();
+
+    // Duplicate the switch value.
+    frame_->Dup();
+
+    // Compile the label expression.
+    Load(clause->label());
+
+    // Compare and branch to the body if true or the next test if
+    // false.  Prefer the next test as a fall through.
+    ControlDestination dest(clause->body_target(), &next_test, false);
+    Comparison(equal, true, &dest);
+
+    // If the comparison fell through to the true target, jump to the
+    // actual body.
+    if (dest.true_was_fall_through()) {
+      clause->body_target()->Unuse();
+      clause->body_target()->Jump();
+    }
+  }
+
+  // If there was control flow to a next test from the last one
+  // compiled, compile a jump to the default or break target.
+  if (!next_test.is_unused()) {
+    if (next_test.is_linked()) {
+      next_test.Bind();
+    }
+    // Drop the switch value.
+    frame_->Drop();
+    if (default_clause != NULL) {
+      default_clause->body_target()->Jump();
+    } else {
+      node->break_target()->Jump();
+    }
+  }
+
+
+  // The last instruction emitted was a jump, either to the default
+  // clause or the break target, or else to a case body from the loop
+  // that compiles the tests.
+  ASSERT(!has_valid_frame());
+  // Compile case bodies as needed.
+  for (int i = 0; i < length; i++) {
+    CaseClause* clause = cases->at(i);
+
+    // There are two ways to reach the body: from the corresponding
+    // test or as the fall through of the previous body.
+    if (clause->body_target()->is_linked() || has_valid_frame()) {
+      if (clause->body_target()->is_linked()) {
+        if (has_valid_frame()) {
+          // If we have both a jump to the test and a fall through, put
+          // a jump on the fall through path to avoid the dropping of
+          // the switch value on the test path.  The exception is the
+          // default which has already had the switch value dropped.
+          if (clause->is_default()) {
+            clause->body_target()->Bind();
+          } else {
+            JumpTarget body;
+            body.Jump();
+            clause->body_target()->Bind();
+            frame_->Drop();
+            body.Bind();
+          }
+        } else {
+          // No fall through to worry about.
+          clause->body_target()->Bind();
+          if (!clause->is_default()) {
+            frame_->Drop();
+          }
+        }
+      } else {
+        // Otherwise, we have only fall through.
+        ASSERT(has_valid_frame());
+      }
+
+      // We are now prepared to compile the body.
+      Comment cmnt(masm_, "[ Case body");
+      VisitStatements(clause->statements());
+    }
+    clause->body_target()->Unuse();
+  }
+
+  // We may not have a valid frame here so bind the break target only
+  // if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ LoopStatement");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
+  // known result for the test expression, with no side effects.
+  enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
+  if (node->cond() == NULL) {
+    ASSERT(node->type() == LoopStatement::FOR_LOOP);
+    info = ALWAYS_TRUE;
+  } else {
+    Literal* lit = node->cond()->AsLiteral();
+    if (lit != NULL) {
+      if (lit->IsTrue()) {
+        info = ALWAYS_TRUE;
+      } else if (lit->IsFalse()) {
+        info = ALWAYS_FALSE;
+      }
+    }
+  }
+
+  switch (node->type()) {
+    case LoopStatement::DO_LOOP: {
+      JumpTarget body(JumpTarget::BIDIRECTIONAL);
+      IncrementLoopNesting();
+
+      // Label the top of the loop for the backward jump if necessary.
+      if (info == ALWAYS_TRUE) {
+        // Use the continue target.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else if (info == ALWAYS_FALSE) {
+        // No need to label it.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else {
+        // Continue is the test, so use the backward body target.
+        ASSERT(info == DONT_KNOW);
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        body.Bind();
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
+
+      // Compile the test.
+      if (info == ALWAYS_TRUE) {
+        // If control flow can fall off the end of the body, jump back
+        // to the top and bind the break target at the exit.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
+
+      } else if (info == ALWAYS_FALSE) {
+        // We may have had continues or breaks in the body.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
+
+      } else {
+        ASSERT(info == DONT_KNOW);
+        // We have to compile the test expression if it can be reached by
+        // control flow falling out of the body or via continue.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+        if (has_valid_frame()) {
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
+      }
+      break;
+    }
+
+    case LoopStatement::WHILE_LOOP: {
+      // Do not duplicate conditions that may have function literal
+      // subexpressions.  This can cause us to compile the function
+      // literal twice.
+      bool test_at_bottom = !node->may_have_function_literal();
+
+      IncrementLoopNesting();
+
+      // If the condition is always false and has no side effects, we
+      // do not need to compile anything.
+      if (info == ALWAYS_FALSE) break;
+
+      JumpTarget body;
+      if (test_at_bottom) {
+        body.set_direction(JumpTarget::BIDIRECTIONAL);
+      }
+
+      // Based on the condition analysis, compile the test as necessary.
+      if (info == ALWAYS_TRUE) {
+        // We will not compile the test expression.  Label the top of
+        // the loop with the continue target.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          // Continue is the test at the bottom, no need to label the
+          // test at the top.  The body is a backward target.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        } else {
+          // Label the test at the top as the continue target.  The
+          // body is a forward-only target.
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        }
+        // Compile the test with the body as the true target and
+        // preferred fall-through and with the break target as the
+        // false target.
+        ControlDestination dest(&body, node->break_target(), true);
+        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+        if (dest.false_was_fall_through()) {
+          // If we got the break target as fall-through, the test may
+          // have been unconditionally false (if there are no jumps to
+          // the body).
+          if (!body.is_linked()) break;
+
+          // Otherwise, jump around the body on the fall through and
+          // then bind the body target.
+          node->break_target()->Unuse();
+          node->break_target()->Jump();
+          body.Bind();
+        }
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
+
+      // Based on the condition analysis, compile the backward jump as
+      // necessary.
+      if (info == ALWAYS_TRUE) {
+        // The loop body has been labeled with the continue target.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          // If we have chosen to recompile the test at the bottom,
+          // then it is the continue target.
+          if (node->continue_target()->is_linked()) {
+            node->continue_target()->Bind();
+          }
+          if (has_valid_frame()) {
+            // The break target is the fall-through (body is a backward
+            // jump from here and thus an invalid fall-through).
+            ControlDestination dest(&body, node->break_target(), false);
+            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+          }
+        } else {
+          // If we have chosen not to recompile the test at the
+          // bottom, jump back to the one at the top.
+          if (has_valid_frame()) {
+            node->continue_target()->Jump();
+          }
+        }
+      }
+
+      // The break target may be already bound (by the condition), or
+      // there may not be a valid frame.  Bind it only if needed.
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
+      break;
+    }
+
+    case LoopStatement::FOR_LOOP: {
+      // Do not duplicate conditions that may have function literal
+      // subexpressions.  This can cause us to compile the function
+      // literal twice.
+      bool test_at_bottom = !node->may_have_function_literal();
+
+      // Compile the init expression if present.
+      if (node->init() != NULL) {
+        Visit(node->init());
+      }
+
+      IncrementLoopNesting();
+
+      // If the condition is always false and has no side effects, we
+      // do not need to compile anything else.
+      if (info == ALWAYS_FALSE) break;
+
+      // Target for backward edge if no test at the bottom, otherwise
+      // unused.
+      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+      // Target for backward edge if there is a test at the bottom,
+      // otherwise used as target for test at the top.
+      JumpTarget body;
+      if (test_at_bottom) {
+        body.set_direction(JumpTarget::BIDIRECTIONAL);
+      }
+
+      // Based on the condition analysis, compile the test as necessary.
+      if (info == ALWAYS_TRUE) {
+        // We will not compile the test expression.  Label the top of
+        // the loop.
+        if (node->next() == NULL) {
+          // Use the continue target if there is no update expression.
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        } else {
+          // Otherwise use the backward loop target.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+          loop.Bind();
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);
+        if (test_at_bottom) {
+          // Continue is either the update expression or the test at
+          // the bottom, no need to label the test at the top.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        } else if (node->next() == NULL) {
+          // We are not recompiling the test at the bottom and there
+          // is no update expression.
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        } else {
+          // We are not recompiling the test at the bottom and there
+          // is an update expression.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+          loop.Bind();
+        }
+
+        // Compile the test with the body as the true target and
+        // preferred fall-through and with the break target as the
+        // false target.
+        ControlDestination dest(&body, node->break_target(), true);
+        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+        if (dest.false_was_fall_through()) {
+          // If we got the break target as fall-through, the test may
+          // have been unconditionally false (if there are no jumps to
+          // the body).
+          if (!body.is_linked()) break;
+
+          // Otherwise, jump around the body on the fall through and
+          // then bind the body target.
+          node->break_target()->Unuse();
+          node->break_target()->Jump();
+          body.Bind();
+        }
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
+
+      // If there is an update expression, compile it if necessary.
+      if (node->next() != NULL) {
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+
+        // Control can reach the update by falling out of the body or
+        // by a continue.
+        if (has_valid_frame()) {
+          // Record the source position of the statement as this code
+          // which is after the code for the body actually belongs to
+          // the loop statement and not the body.
+          CodeForStatementPosition(node);
+          Visit(node->next());
+        }
+      }
+
+      // Based on the condition analysis, compile the backward jump as
+      // necessary.
+      if (info == ALWAYS_TRUE) {
+        if (has_valid_frame()) {
+          if (node->next() == NULL) {
+            node->continue_target()->Jump();
+          } else {
+            loop.Jump();
+          }
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          if (node->continue_target()->is_linked()) {
+            // We can have dangling jumps to the continue target if
+            // there was no update expression.
+            node->continue_target()->Bind();
+          }
+          // Control can reach the test at the bottom by falling out
+          // of the body, by a continue in the body, or from the
+          // update expression.
+          if (has_valid_frame()) {
+            // The break target is the fall-through (body is a
+            // backward jump from here).
+            ControlDestination dest(&body, node->break_target(), false);
+            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+          }
+        } else {
+          // Otherwise, jump back to the test at the top.
+          if (has_valid_frame()) {
+            if (node->next() == NULL) {
+              node->continue_target()->Jump();
+            } else {
+              loop.Jump();
+            }
+          }
+        }
+      }
+
+      // The break target may be already bound (by the condition), or
+      // there may not be a valid frame.  Bind it only if needed.
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
+      break;
+    }
+  }
+
+  DecrementLoopNesting();
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ForInStatement");
+  CodeForStatementPosition(node);
+
+  JumpTarget primitive;
+  JumpTarget jsobject;
+  JumpTarget fixed_array;
+  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+  JumpTarget end_del_check;
+  JumpTarget exit;
+
+  // Get the object to enumerate over (converted to JSObject).
+  LoadAndSpill(node->enumerable());
+
+  // Both SpiderMonkey and kjs ignore null and undefined in contrast
+  // to the specification.  12.6.4 mandates a call to ToObject.
+  frame_->EmitPop(eax);
+
+  // eax: value to be iterated over
+  __ cmp(eax, Factory::undefined_value());
+  exit.Branch(equal);
+  __ cmp(eax, Factory::null_value());
+  exit.Branch(equal);
+
+  // Stack layout in body:
+  // [iteration counter (smi)] <- slot 0
+  // [length of array]         <- slot 1
+  // [FixedArray]              <- slot 2
+  // [Map or 0]                <- slot 3
+  // [Object]                  <- slot 4
+
+  // Check if enumerable is already a JSObject
+  // eax: value to be iterated over
+  __ test(eax, Immediate(kSmiTagMask));
+  primitive.Branch(zero);
+  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  jsobject.Branch(above_equal);
+
+  primitive.Bind();
+  frame_->EmitPush(eax);
+  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
+  // function call returns the value in eax, which is where we want it below
+
+  jsobject.Bind();
+  // Get the set of properties (as a FixedArray or Map).
+  // eax: value to be iterated over
+  frame_->EmitPush(eax);  // push the object being iterated over (slot 4)
+
+  frame_->EmitPush(eax);  // push the Object (slot 4) for the runtime call
+  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a Map, we can do a fast modification check.
+  // Otherwise, we got a FixedArray, and we have to do a slow check.
+  // eax: map or fixed array (result from call to
+  // Runtime::kGetPropertyNamesFast)
+  __ mov(edx, Operand(eax));
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ cmp(ecx, Factory::meta_map());
+  fixed_array.Branch(not_equal);
+
+  // Get enum cache
+  // eax: map (result from call to Runtime::kGetPropertyNamesFast)
+  __ mov(ecx, Operand(eax));
+  __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
+  // Get the bridge array held in the enumeration index field.
+  __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
+  // Get the cache from the bridge array.
+  __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  frame_->EmitPush(eax);  // <- slot 3
+  frame_->EmitPush(edx);  // <- slot 2
+  __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
+  __ shl(eax, kSmiTagSize);
+  frame_->EmitPush(eax);  // <- slot 1
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
+  entry.Jump();
+
+  fixed_array.Bind();
+  // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 3
+  frame_->EmitPush(eax);  // <- slot 2
+
+  // Push the length of the array and the initial index onto the stack.
+  __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
+  __ shl(eax, kSmiTagSize);
+  frame_->EmitPush(eax);  // <- slot 1
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
+
+  // Condition.
+  entry.Bind();
+  // Grab the current frame's height for the break and continue
+  // targets only after all the state is pushed on the frame.
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  __ mov(eax, frame_->ElementAt(0));  // load the current count
+  __ cmp(eax, frame_->ElementAt(1));  // compare to the array length
+  node->break_target()->Branch(above_equal);
+
+  // Get the i'th entry of the array.
+  __ mov(edx, frame_->ElementAt(2));
+  __ mov(ebx, Operand(edx, eax, times_2,
+                      FixedArray::kHeaderSize - kHeapObjectTag));
+
+  // Get the expected map from the stack or a zero map in the
+  // permanent slow case eax: current iteration count ebx: i'th entry
+  // of the enum cache
+  __ mov(edx, frame_->ElementAt(3));
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we have to filter the key.
+  // eax: current iteration count
+  // ebx: i'th entry of the enum cache
+  // edx: expected map value
+  __ mov(ecx, frame_->ElementAt(4));
+  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ cmp(ecx, Operand(edx));
+  end_del_check.Branch(equal);
+
+  // Convert the entry to a string (or null if it isn't a property anymore).
+  frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
+  frame_->EmitPush(ebx);  // push entry
+  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
+  __ mov(ebx, Operand(eax));
+
+  // If the property has been removed while iterating, we just skip it.
+  __ cmp(ebx, Factory::null_value());
+  node->continue_target()->Branch(equal);
+
+  end_del_check.Bind();
+  // Store the entry in the 'each' expression and take another spin in the
+  // loop.  edx: i'th entry of the enum cache (or string there of)
+  frame_->EmitPush(ebx);
+  { Reference each(this, node->each());
+    // Loading a reference may leave the frame in an unspilled state.
+    frame_->SpillAll();
+    if (!each.is_illegal()) {
+      if (each.size() > 0) {
+        frame_->EmitPush(frame_->ElementAt(each.size()));
+      }
+      // If the reference was to a slot we rely on the convenient property
+      // that it doesn't matter whether a value (eg, ebx pushed above) is
+      // right on top of or right underneath a zero-sized reference.
+      each.SetValue(NOT_CONST_INIT);
+      if (each.size() > 0) {
+        // It's safe to pop the value lying on top of the reference before
+        // unloading the reference itself (which preserves the top of stack,
+        // ie, now the topmost value of the non-zero sized reference), since
+        // we will discard the top of stack after unloading the reference
+        // anyway.
+        frame_->Drop();
+      }
+    }
+  }
+  // Unloading a reference may leave the frame in an unspilled state.
+  frame_->SpillAll();
+
+  // Discard the i'th entry pushed above or else the remainder of the
+  // reference, whichever is currently on top of the stack.
+  frame_->Drop();
+
+  // Body.
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  VisitAndSpill(node->body());
+
+  // Next.  Reestablish a spilled frame in case we are coming here via
+  // a continue in the body.
+  node->continue_target()->Bind();
+  frame_->SpillAll();
+  frame_->EmitPop(eax);
+  __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+  frame_->EmitPush(eax);
+  entry.Jump();
+
+  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
+  // any frame.
+  node->break_target()->Bind();
+  frame_->Drop(5);
+
+  // Exit.
+  exit.Bind();
+
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::VisitTryCatch(TryCatch* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ TryCatch");
+  CodeForStatementPosition(node);
+
+  JumpTarget try_block;
+  JumpTarget exit;
+
+  try_block.Call();
+  // --- Catch block ---
+  frame_->EmitPush(eax);
+
+  // Store the caught exception in the catch variable.
+  { Reference ref(this, node->catch_var());
+    ASSERT(ref.is_slot());
+    // Load the exception to the top of the stack.  Here we make use of the
+    // convenient property that it doesn't matter whether a value is
+    // immediately on top of or underneath a zero-sized reference.
+    ref.SetValue(NOT_CONST_INIT);
+  }
+
+  // Remove the exception from the stack.
+  frame_->Drop();
+
+  VisitStatementsAndSpill(node->catch_block()->statements());
+  if (has_valid_frame()) {
+    exit.Jump();
+  }
+
+
+  // --- Try block ---
+  try_block.Bind();
+
+  frame_->PushTryHandler(TRY_CATCH_HANDLER);
+  int handler_height = frame_->height();
+
+  // Shadow the jump targets for all escapes from the try block, including
+  // returns.  During shadowing, the original target is hidden as the
+  // ShadowTarget and operations on the original actually affect the
+  // shadowing target.
+  //
+  // We should probably try to unify the escaping targets and the return
+  // target.
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
+  for (int i = 0; i < nof_escapes; i++) {
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+  }
+
+  // Generate code for the statements in the try block.
+  VisitStatementsAndSpill(node->try_block()->statements());
+
+  // Stop the introduced shadowing and count the number of required unlinks.
+  // After shadowing stops, the original targets are unshadowed and the
+  // ShadowTargets represent the formerly shadowing targets.
+  bool has_unlinks = false;
+  for (int i = 0; i < shadows.length(); i++) {
+    shadows[i]->StopShadowing();
+    has_unlinks = has_unlinks || shadows[i]->is_linked();
+  }
+  function_return_is_shadowed_ = function_return_was_shadowed;
+
+  // Get an external reference to the handler address.
+  ExternalReference handler_address(Top::k_handler_address);
+
+  // Make sure that there's nothing left on the stack above the
+  // handler structure.
+  if (FLAG_debug_code) {
+    __ mov(eax, Operand::StaticVariable(handler_address));
+    __ cmp(esp, Operand(eax));
+    __ Assert(equal, "stack pointer should point to top handler");
+  }
+
+  // If we can fall off the end of the try block, unlink from try chain.
+  if (has_valid_frame()) {
+    // The next handler address is on top of the frame.  Unlink from
+    // the handler list and drop the rest of this handler from the
+    // frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    frame_->EmitPop(Operand::StaticVariable(handler_address));
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+    if (has_unlinks) {
+      exit.Jump();
+    }
+  }
+
+  // Generate unlink code for the (formerly) shadowing targets that
+  // have been jumped to.  Deallocate each shadow target.
+  Result return_value;
+  for (int i = 0; i < shadows.length(); i++) {
+    if (shadows[i]->is_linked()) {
+      // Unlink from try chain; be careful not to destroy the TOS if
+      // there is one.
+      if (i == kReturnShadowIndex) {
+        shadows[i]->Bind(&return_value);
+        return_value.ToRegister(eax);
+      } else {
+        shadows[i]->Bind();
+      }
+      // Because we can be jumping here (to spilled code) from
+      // unspilled code, we need to reestablish a spilled frame at
+      // this block.
+      frame_->SpillAll();
+
+      // Reload sp from the top handler, because some statements that we
+      // break from (eg, for...in) may have left stuff on the stack.
+      __ mov(esp, Operand::StaticVariable(handler_address));
+      frame_->Forget(frame_->height() - handler_height);
+
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      frame_->EmitPop(Operand::StaticVariable(handler_address));
+      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+      if (i == kReturnShadowIndex) {
+        if (!function_return_is_shadowed_) frame_->PrepareForReturn();
+        shadows[i]->other_target()->Jump(&return_value);
+      } else {
+        shadows[i]->other_target()->Jump();
+      }
+    }
+  }
+
+  exit.Bind();
+}
+
+
+void CodeGenerator::VisitTryFinally(TryFinally* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ TryFinally");
+  CodeForStatementPosition(node);
+
+  // State: Used to keep track of reason for entering the finally
+  // block. Should probably be extended to hold information for
+  // break/continue from within the try block.
+  enum { FALLING, THROWING, JUMPING };
+
+  JumpTarget try_block;
+  JumpTarget finally_block;
+
+  try_block.Call();
+
+  frame_->EmitPush(eax);
+  // In case of thrown exceptions, this is where we continue.
+  __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
+  finally_block.Jump();
+
+  // --- Try block ---
+  try_block.Bind();
+
+  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+  int handler_height = frame_->height();
+
+  // Shadow the jump targets for all escapes from the try block, including
+  // returns.  During shadowing, the original target is hidden as the
+  // ShadowTarget and operations on the original actually affect the
+  // shadowing target.
+  //
+  // We should probably try to unify the escaping targets and the return
+  // target.
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
+  for (int i = 0; i < nof_escapes; i++) {
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+  }
+
+  // Generate code for the statements in the try block.
+  VisitStatementsAndSpill(node->try_block()->statements());
+
+  // Stop the introduced shadowing and count the number of required unlinks.
+  // After shadowing stops, the original targets are unshadowed and the
+  // ShadowTargets represent the formerly shadowing targets.
+  int nof_unlinks = 0;
+  for (int i = 0; i < shadows.length(); i++) {
+    shadows[i]->StopShadowing();
+    if (shadows[i]->is_linked()) nof_unlinks++;
+  }
+  function_return_is_shadowed_ = function_return_was_shadowed;
+
+  // Get an external reference to the handler address.
+  ExternalReference handler_address(Top::k_handler_address);
+
+  // If we can fall off the end of the try block, unlink from the try
+  // chain and set the state on the frame to FALLING.
+  if (has_valid_frame()) {
+    // The next handler address is on top of the frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    frame_->EmitPop(Operand::StaticVariable(handler_address));
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+    // Fake a top of stack value (unneeded when FALLING) and set the
+    // state in ecx, then jump around the unlink blocks if any.
+    frame_->EmitPush(Immediate(Factory::undefined_value()));
+    __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
+    if (nof_unlinks > 0) {
+      finally_block.Jump();
+    }
+  }
+
+  // Generate code to unlink and set the state for the (formerly)
+  // shadowing targets that have been jumped to.
+  for (int i = 0; i < shadows.length(); i++) {
+    if (shadows[i]->is_linked()) {
+      // If we have come from the shadowed return, the return value is
+      // on the virtual frame.  We must preserve it until it is
+      // pushed.
+      if (i == kReturnShadowIndex) {
+        Result return_value;
+        shadows[i]->Bind(&return_value);
+        return_value.ToRegister(eax);
+      } else {
+        shadows[i]->Bind();
+      }
+      // Because we can be jumping here (to spilled code) from
+      // unspilled code, we need to reestablish a spilled frame at
+      // this block.
+      frame_->SpillAll();
+
+      // Reload sp from the top handler, because some statements that
+      // we break from (eg, for...in) may have left stuff on the
+      // stack.
+      __ mov(esp, Operand::StaticVariable(handler_address));
+      frame_->Forget(frame_->height() - handler_height);
+
+      // Unlink this handler and drop it from the frame.
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      frame_->EmitPop(Operand::StaticVariable(handler_address));
+      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+      if (i == kReturnShadowIndex) {
+        // If this target shadowed the function return, materialize
+        // the return value on the stack.
+        frame_->EmitPush(eax);
+      } else {
+        // Fake TOS for targets that shadowed breaks and continues.
+        frame_->EmitPush(Immediate(Factory::undefined_value()));
+      }
+      __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
+      if (--nof_unlinks > 0) {
+        // If this is not the last unlink block, jump around the next.
+        finally_block.Jump();
+      }
+    }
+  }
+
+  // --- Finally block ---
+  finally_block.Bind();
+
+  // Push the state on the stack.
+  frame_->EmitPush(ecx);
+
+  // We keep two elements on the stack - the (possibly faked) result
+  // and the state - while evaluating the finally block.
+  //
+  // Generate code for the statements in the finally block.
+  VisitStatementsAndSpill(node->finally_block()->statements());
+
+  if (has_valid_frame()) {
+    // Restore state and return value or faked TOS.
+    frame_->EmitPop(ecx);
+    frame_->EmitPop(eax);
+  }
+
+  // Generate code to jump to the right destination for all used
+  // formerly shadowing targets.  Deallocate each shadow target.
+  for (int i = 0; i < shadows.length(); i++) {
+    if (has_valid_frame() && shadows[i]->is_bound()) {
+      BreakTarget* original = shadows[i]->other_target();
+      __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
+      if (i == kReturnShadowIndex) {
+        // The return value is (already) in eax.
+        Result return_value = allocator_->Allocate(eax);
+        ASSERT(return_value.is_valid());
+        if (function_return_is_shadowed_) {
+          original->Branch(equal, &return_value);
+        } else {
+          // Branch around the preparation for return which may emit
+          // code.
+          JumpTarget skip;
+          skip.Branch(not_equal);
+          frame_->PrepareForReturn();
+          original->Jump(&return_value);
+          skip.Bind();
+        }
+      } else {
+        original->Branch(equal);
+      }
+    }
+  }
+
+  if (has_valid_frame()) {
+    // Check if we need to rethrow the exception.
+    JumpTarget exit;
+    __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
+    exit.Branch(not_equal);
+
+    // Rethrow exception.
+    frame_->EmitPush(eax);  // undo pop from above
+    frame_->CallRuntime(Runtime::kReThrow, 1);
+
+    // Done.
+    exit.Bind();
+  }
+}
+
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ DebuggerStatement");
+  CodeForStatementPosition(node);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Spill everything, even constants, to the frame.
+  frame_->SpillAll();
+  frame_->CallRuntime(Runtime::kDebugBreak, 0);
+  // Ignore the return value.
+#endif
+}
+
+
+void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+  // Call the runtime to instantiate the function boilerplate object.
+  // The inevitable call will sync frame elements to memory anyway, so
+  // we do it eagerly to allow us to push the arguments directly into
+  // place.
+  ASSERT(boilerplate->IsBoilerplate());
+  frame_->SyncRange(0, frame_->element_count() - 1);
+
+  // Push the boilerplate on the stack.
+  frame_->EmitPush(Immediate(boilerplate));
+
+  // Create a new closure.
+  frame_->EmitPush(esi);
+  Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+  Comment cmnt(masm_, "[ FunctionLiteral");
+
+  // Build the function boilerplate and instantiate it.
+  Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+  // Check for stack-overflow exception.
+  if (HasStackOverflow()) return;
+  InstantiateBoilerplate(boilerplate);
+}
+
+
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* node) {
+  Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
+  InstantiateBoilerplate(node->boilerplate());
+}
+
+
+void CodeGenerator::VisitConditional(Conditional* node) {
+  Comment cmnt(masm_, "[ Conditional");
+  JumpTarget then;
+  JumpTarget else_;
+  JumpTarget exit;
+  ControlDestination dest(&then, &else_, true);
+  LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+  if (dest.false_was_fall_through()) {
+    // The else target was bound, so we compile the else part first.
+    Load(node->else_expression(), typeof_state());
+
+    if (then.is_linked()) {
+      exit.Jump();
+      then.Bind();
+      Load(node->then_expression(), typeof_state());
+    }
+  } else {
+    // The then target was bound, so we compile the then part first.
+    Load(node->then_expression(), typeof_state());
+
+    if (else_.is_linked()) {
+      exit.Jump();
+      else_.Bind();
+      Load(node->else_expression(), typeof_state());
+    }
+  }
+
+  exit.Bind();
+}
+
+
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+  if (slot->type() == Slot::LOOKUP) {
+    ASSERT(slot->var()->is_dynamic());
+
+    JumpTarget slow;
+    JumpTarget done;
+    Result value;
+
+    // Generate fast-case code for variables that might be shadowed by
+    // eval-introduced variables.  Eval is used a lot without
+    // introducing variables.  In those cases, we do not want to
+    // perform a runtime call for all variables in the scope
+    // containing the eval.
+    if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+      value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
+      // If there was no control flow to slow, we can exit early.
+      if (!slow.is_linked()) {
+        frame_->Push(&value);
+        return;
+      }
+
+      done.Jump(&value);
+
+    } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+      Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+      // Only generate the fast case for locals that rewrite to slots.
+      // This rules out argument loads.
+      if (potential_slot != NULL) {
+        // Allocate a fresh register to use as a temp in
+        // ContextSlotOperandCheckExtensions and to hold the result
+        // value.
+        value = allocator_->Allocate();
+        ASSERT(value.is_valid());
+        __ mov(value.reg(),
+               ContextSlotOperandCheckExtensions(potential_slot,
+                                                 value,
+                                                 &slow));
+        if (potential_slot->var()->mode() == Variable::CONST) {
+          __ cmp(value.reg(), Factory::the_hole_value());
+          done.Branch(not_equal, &value);
+          __ mov(value.reg(), Factory::undefined_value());
+        }
+        // There is always control flow to slow from
+        // ContextSlotOperandCheckExtensions so we have to jump around
+        // it.
+        done.Jump(&value);
+      }
+    }
+
+    slow.Bind();
+    // A runtime call is inevitable.  We eagerly sync frame elements
+    // to memory so that we can push the arguments directly into place
+    // on top of the frame.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+    frame_->EmitPush(esi);
+    frame_->EmitPush(Immediate(slot->var()->name()));
+    if (typeof_state == INSIDE_TYPEOF) {
+      value =
+          frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    } else {
+      value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+    }
+
+    done.Bind(&value);
+    frame_->Push(&value);
+
+  } else if (slot->var()->mode() == Variable::CONST) {
+    // Const slots may contain 'the hole' value (the constant hasn't been
+    // initialized yet) which needs to be converted into the 'undefined'
+    // value.
+    //
+    // We currently spill the virtual frame because constants use the
+    // potentially unsafe direct-frame access of SlotOperand.
+    VirtualFrame::SpilledScope spilled_scope;
+    Comment cmnt(masm_, "[ Load const");
+    JumpTarget exit;
+    __ mov(ecx, SlotOperand(slot, ecx));
+    __ cmp(ecx, Factory::the_hole_value());
+    exit.Branch(not_equal);
+    __ mov(ecx, Factory::undefined_value());
+    exit.Bind();
+    frame_->EmitPush(ecx);
+
+  } else if (slot->type() == Slot::PARAMETER) {
+    frame_->PushParameterAt(slot->index());
+
+  } else if (slot->type() == Slot::LOCAL) {
+    frame_->PushLocalAt(slot->index());
+
+  } else {
+    // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
+    // here.
+    //
+    // The use of SlotOperand below is safe for an unspilled frame
+    // because it will always be a context slot.
+    ASSERT(slot->type() == Slot::CONTEXT);
+    Result temp = allocator_->Allocate();
+    ASSERT(temp.is_valid());
+    __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
+    frame_->Push(&temp);
+  }
+}
+
+
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+                                                  TypeofState state) {
+  LoadFromSlot(slot, state);
+
+  // Bail out quickly if we're not using lazy arguments allocation.
+  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+
+  // ... or if the slot isn't a non-parameter arguments slot.
+  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+
+  // Pop the loaded value from the stack.
+  Result value = frame_->Pop();
+
+  // If the loaded value is a constant, we know if the arguments
+  // object has been lazily loaded yet.
+  if (value.is_constant()) {
+    if (value.handle()->IsTheHole()) {
+      Result arguments = StoreArgumentsObject(false);
+      frame_->Push(&arguments);
+    } else {
+      frame_->Push(&value);
+    }
+    return;
+  }
+
+  // The loaded value is in a register. If it is the sentinel that
+  // indicates that we haven't loaded the arguments object yet, we
+  // need to do it now.
+  JumpTarget exit;
+  __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+  frame_->Push(&value);
+  exit.Branch(not_equal);
+  Result arguments = StoreArgumentsObject(false);
+  frame_->SetElementAt(0, &arguments);
+  exit.Bind();
+}
+
+
+Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
+    Slot* slot,
+    TypeofState typeof_state,
+    JumpTarget* slow) {
+  // Check that no extension objects have been created by calls to
+  // eval from the current scope to the global scope.
+  Register context = esi;
+  Result tmp = allocator_->Allocate();
+  ASSERT(tmp.is_valid());  // All non-reserved registers were available.
+
+  Scope* s = scope();
+  while (s != NULL) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
+               Immediate(0));
+        slow->Branch(not_equal, not_taken);
+      }
+      // Load next context in chain.
+      __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+      __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+      context = tmp.reg();
+    }
+    // If no outer scope calls eval, we do not need to check more
+    // context extensions.  If we have reached an eval scope, we check
+    // all extensions from this point.
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    s = s->outer_scope();
+  }
+
+  if (s != NULL && s->is_eval_scope()) {
+    // Loop up the context chain.  There is no frame effect so it is
+    // safe to use raw labels here.
+    Label next, fast;
+    if (!context.is(tmp.reg())) {
+      __ mov(tmp.reg(), context);
+    }
+    __ bind(&next);
+    // Terminate at global context.
+    __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+           Immediate(Factory::global_context_map()));
+    __ j(equal, &fast);
+    // Check that extension is NULL.
+    __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
+    slow->Branch(not_equal, not_taken);
+    // Load next context in chain.
+    __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
+    __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+    __ jmp(&next);
+    __ bind(&fast);
+  }
+  tmp.Unuse();
+
+  // All extension objects were empty and it is safe to use a global
+  // load IC call.
+  LoadGlobal();
+  frame_->Push(slot->var()->name());
+  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+                         ? RelocInfo::CODE_TARGET
+                         : RelocInfo::CODE_TARGET_CONTEXT;
+  Result answer = frame_->CallLoadIC(mode);
+  // A test eax instruction following the call signals that the inobject
+  // property case was inlined.  Ensure that there is not a test eax
+  // instruction here.
+  __ nop();
+  // Discard the global object. The result is in answer.
+  frame_->Drop();
+  return answer;
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+  if (slot->type() == Slot::LOOKUP) {
+    ASSERT(slot->var()->is_dynamic());
+
+    // For now, just do a runtime call.  Since the call is inevitable,
+    // we eagerly sync the virtual frame so we can directly push the
+    // arguments into place.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+
+    frame_->EmitPush(esi);
+    frame_->EmitPush(Immediate(slot->var()->name()));
+
+    Result value;
+    if (init_state == CONST_INIT) {
+      // Same as the case for a normal store, but ignores attribute
+      // (e.g. READ_ONLY) of context slot so that we can initialize const
+      // properties (introduced via eval("const foo = (some expr);")). Also,
+      // uses the current function context instead of the top context.
+      //
+      // Note that we must declare the foo upon entry of eval(), via a
+      // context slot declaration, but we cannot initialize it at the same
+      // time, because the const declaration may be at the end of the eval
+      // code (sigh...) and the const variable may have been used before
+      // (where its value is 'undefined'). Thus, we can only do the
+      // initialization when we actually encounter the expression and when
+      // the expression operands are defined and valid, and thus we need the
+      // split into 2 operations: declaration of the context slot followed
+      // by initialization.
+      value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+    } else {
+      value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+    }
+    // Storing a variable must keep the (new) value on the expression
+    // stack. This is necessary for compiling chained assignment
+    // expressions.
+    frame_->Push(&value);
+
+  } else {
+    ASSERT(!slot->var()->is_dynamic());
+
+    JumpTarget exit;
+    if (init_state == CONST_INIT) {
+      ASSERT(slot->var()->mode() == Variable::CONST);
+      // Only the first const initialization must be executed (the slot
+      // still contains 'the hole' value). When the assignment is executed,
+      // the code is identical to a normal store (see below).
+      //
+      // We spill the frame in the code below because the direct-frame
+      // access of SlotOperand is potentially unsafe with an unspilled
+      // frame.
+      VirtualFrame::SpilledScope spilled_scope;
+      Comment cmnt(masm_, "[ Init const");
+      __ mov(ecx, SlotOperand(slot, ecx));
+      __ cmp(ecx, Factory::the_hole_value());
+      exit.Branch(not_equal);
+    }
+
+    // We must execute the store.  Storing a variable must keep the (new)
+    // value on the stack. This is necessary for compiling assignment
+    // expressions.
+    //
+    // Note: We will reach here even with slot->var()->mode() ==
+    // Variable::CONST because of const declarations which will initialize
+    // consts to 'the hole' value and by doing so, end up calling this code.
+    if (slot->type() == Slot::PARAMETER) {
+      frame_->StoreToParameterAt(slot->index());
+    } else if (slot->type() == Slot::LOCAL) {
+      frame_->StoreToLocalAt(slot->index());
+    } else {
+      // The other slot types (LOOKUP and GLOBAL) cannot reach here.
+      //
+      // The use of SlotOperand below is safe for an unspilled frame
+      // because the slot is a context slot.
+      ASSERT(slot->type() == Slot::CONTEXT);
+      frame_->Dup();
+      Result value = frame_->Pop();
+      value.ToRegister();
+      Result start = allocator_->Allocate();
+      ASSERT(start.is_valid());
+      __ mov(SlotOperand(slot, start.reg()), value.reg());
+      // RecordWrite may destroy the value registers.
+      //
+      // TODO(204): Avoid actually spilling when the value is not
+      // needed (probably the common case).
+      frame_->Spill(value.reg());
+      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+      Result temp = allocator_->Allocate();
+      ASSERT(temp.is_valid());
+      __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
+      // The results start, value, and temp are unused by going out of
+      // scope.
+    }
+
+    exit.Bind();
+  }
+}
+
+
+void CodeGenerator::VisitSlot(Slot* node) {
+  Comment cmnt(masm_, "[ Slot");
+  LoadFromSlotCheckForArguments(node, typeof_state());
+}
+
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+  Comment cmnt(masm_, "[ VariableProxy");
+  Variable* var = node->var();
+  Expression* expr = var->rewrite();
+  if (expr != NULL) {
+    Visit(expr);
+  } else {
+    ASSERT(var->is_global());
+    Reference ref(this, node);
+    ref.GetValue(typeof_state());
+  }
+}
+
+
+void CodeGenerator::VisitLiteral(Literal* node) {
+  Comment cmnt(masm_, "[ Literal");
+  frame_->Push(node->handle());
+}
+
+
+void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
+  ASSERT(target.is_valid());
+  ASSERT(value->IsSmi());
+  int bits = reinterpret_cast<int>(*value);
+  __ Set(target, Immediate(bits & 0x0000FFFF));
+  __ xor_(target, bits & 0xFFFF0000);
+}
+
+
+bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
+  if (!value->IsSmi()) return false;
+  int int_value = Smi::cast(*value)->value();
+  return !is_intn(int_value, kMaxSmiInlinedBits);
+}
+
+
+// Materialize the regexp literal 'node' in the literals array
+// 'literals' of the function.  Leave the regexp boilerplate in
+// 'boilerplate'.
+class DeferredRegExpLiteral: public DeferredCode {
+ public:
+  DeferredRegExpLiteral(Register boilerplate,
+                        Register literals,
+                        RegExpLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
+    set_comment("[ DeferredRegExpLiteral");
+  }
+
+  void Generate();
+
+ private:
+  Register boilerplate_;
+  Register literals_;
+  RegExpLiteral* node_;
+};
+
+
+void DeferredRegExpLiteral::Generate() {
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
+  // Literal array (0).
+  __ push(literals_);
+  // Literal index (1).
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  // RegExp pattern (2).
+  __ push(Immediate(node_->pattern()));
+  // RegExp flags (3).
+  __ push(Immediate(node_->flags()));
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
+}
+
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+  Comment cmnt(masm_, "[ RegExp Literal");
+
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
+
+  // Load the literals array of the function.
+  __ mov(literals.reg(),
+         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+  // Check whether we need to materialize the RegExp object.  If so,
+  // jump to the deferred code passing the literals array.
+  DeferredRegExpLiteral* deferred =
+      new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
+  __ cmp(boilerplate.reg(), Factory::undefined_value());
+  deferred->Branch(equal);
+  deferred->BindExit();
+  literals.Unuse();
+
+  // Push the boilerplate object.
+  frame_->Push(&boilerplate);
+}
+
+
+// Materialize the object literal 'node' in the literals array
+// 'literals' of the function.  Leave the object boilerplate in
+// 'boilerplate'.
+class DeferredObjectLiteral: public DeferredCode {
+ public:
+  DeferredObjectLiteral(Register boilerplate,
+                        Register literals,
+                        ObjectLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
+    set_comment("[ DeferredObjectLiteral");
+  }
+
+  void Generate();
+
+ private:
+  Register boilerplate_;
+  Register literals_;
+  ObjectLiteral* node_;
+};
+
+
+void DeferredObjectLiteral::Generate() {
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
+  // Literal array (0).
+  __ push(literals_);
+  // Literal index (1).
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  // Constant properties (2).
+  __ push(Immediate(node_->constant_properties()));
+  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
+}
+
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+  Comment cmnt(masm_, "[ ObjectLiteral");
+
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
+
+  // Load the literals array of the function.
+  __ mov(literals.reg(),
+         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+  // Check whether we need to materialize the object literal boilerplate.
+  // If so, jump to the deferred code passing the literals array.
+  DeferredObjectLiteral* deferred =
+      new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
+  __ cmp(boilerplate.reg(), Factory::undefined_value());
+  deferred->Branch(equal);
+  deferred->BindExit();
+  literals.Unuse();
+
+  // Push the boilerplate object.
+  frame_->Push(&boilerplate);
+  // Clone the boilerplate object.
+  Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+  if (node->depth() == 1) {
+    clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+  }
+  Result clone = frame_->CallRuntime(clone_function_id, 1);
+  // Push the newly cloned literal object as the result.
+  frame_->Push(&clone);
+
+  for (int i = 0; i < node->properties()->length(); i++) {
+    ObjectLiteral::Property* property = node->properties()->at(i);
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        break;
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
+        // else fall through.
+      case ObjectLiteral::Property::COMPUTED: {
+        Handle<Object> key(property->key()->handle());
+        if (key->IsSymbol()) {
+          // Duplicate the object as the IC receiver.
+          frame_->Dup();
+          Load(property->value());
+          frame_->Push(key);
+          Result ignored = frame_->CallStoreIC();
+          // Drop the duplicated receiver and ignore the result.
+          frame_->Drop();
+          break;
+        }
+        // Fall through
+      }
+      case ObjectLiteral::Property::PROTOTYPE: {
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
+        Load(property->key());
+        Load(property->value());
+        Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
+        // Ignore the result.
+        break;
+      }
+      case ObjectLiteral::Property::SETTER: {
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
+        Load(property->key());
+        frame_->Push(Smi::FromInt(1));
+        Load(property->value());
+        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+        // Ignore the result.
+        break;
+      }
+      case ObjectLiteral::Property::GETTER: {
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
+        Load(property->key());
+        frame_->Push(Smi::FromInt(0));
+        Load(property->value());
+        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+        // Ignore the result.
+        break;
+      }
+      default: UNREACHABLE();
+    }
+  }
+}
+
+
+// Materialize the array literal 'node' in the literals array 'literals'
+// of the function.  Leave the array boilerplate in 'boilerplate'.
+class DeferredArrayLiteral: public DeferredCode {
+ public:
+  DeferredArrayLiteral(Register boilerplate,
+                       Register literals,
+                       ArrayLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
+    set_comment("[ DeferredArrayLiteral");
+  }
+
+  void Generate();
+
+ private:
+  Register boilerplate_;
+  Register literals_;
+  ArrayLiteral* node_;
+};
+
+
+void DeferredArrayLiteral::Generate() {
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
+  // Literal array (0).
+  __ push(literals_);
+  // Literal index (1).
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  // Constant properties (2).
+  __ push(Immediate(node_->literals()));
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+  if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
+}
+
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
+
+  // Load the literals array of the function.
+  __ mov(literals.reg(),
+         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+  // Check whether we need to materialize the object literal boilerplate.
+  // If so, jump to the deferred code passing the literals array.
+  DeferredArrayLiteral* deferred =
+      new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
+  __ cmp(boilerplate.reg(), Factory::undefined_value());
+  deferred->Branch(equal);
+  deferred->BindExit();
+  literals.Unuse();
+
+  // Push the resulting array literal boilerplate on the stack.
+  frame_->Push(&boilerplate);
+  // Clone the boilerplate object.
+  Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+  if (node->depth() == 1) {
+    clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+  }
+  Result clone = frame_->CallRuntime(clone_function_id, 1);
+  // Push the newly cloned literal object as the result.
+  frame_->Push(&clone);
+
+  // Generate code to set the elements in the array that are not
+  // literals.
+  for (int i = 0; i < node->values()->length(); i++) {
+    Expression* value = node->values()->at(i);
+
+    // If value is a literal the property value is already set in the
+    // boilerplate object.
+    if (value->AsLiteral() != NULL) continue;
+    // If value is a materialized literal the property value is already set
+    // in the boilerplate object if it is simple.
+    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
+
+    // The property must be set by generated code.
+    Load(value);
+
+    // Get the property value off the stack.
+    Result prop_value = frame_->Pop();
+    prop_value.ToRegister();
+
+    // Fetch the array literal while leaving a copy on the stack and
+    // use it to get the elements array.
+    frame_->Dup();
+    Result elements = frame_->Pop();
+    elements.ToRegister();
+    frame_->Spill(elements.reg());
+    // Get the elements array.
+    __ mov(elements.reg(),
+           FieldOperand(elements.reg(), JSObject::kElementsOffset));
+
+    // Write to the indexed properties array.
+    int offset = i * kPointerSize + FixedArray::kHeaderSize;
+    __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
+
+    // Update the write barrier for the array address.
+    frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
+    Result scratch = allocator_->Allocate();
+    ASSERT(scratch.is_valid());
+    __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
+  }
+}
+
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  ASSERT(!in_spilled_code());
+  // Call runtime routine to allocate the catch extension object and
+  // assign the exception value to the catch variable.
+  Comment cmnt(masm_, "[ CatchExtensionObject");
+  Load(node->key());
+  Load(node->value());
+  Result result =
+      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+  Comment cmnt(masm_, "[ Assignment");
+
+  { Reference target(this, node->target());
+    if (target.is_illegal()) {
+      // Fool the virtual frame into thinking that we left the assignment's
+      // value on the frame.
+      frame_->Push(Smi::FromInt(0));
+      return;
+    }
+    Variable* var = node->target()->AsVariableProxy()->AsVariable();
+
+    if (node->starts_initialization_block()) {
+      ASSERT(target.type() == Reference::NAMED ||
+             target.type() == Reference::KEYED);
+      // Change to slow case in the beginning of an initialization
+      // block to avoid the quadratic behavior of repeatedly adding
+      // fast properties.
+
+      // The receiver is the argument to the runtime call.  It is the
+      // first value pushed when the reference was loaded to the
+      // frame.
+      frame_->PushElementAt(target.size() - 1);
+      Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+    }
+    if (node->op() == Token::ASSIGN ||
+        node->op() == Token::INIT_VAR ||
+        node->op() == Token::INIT_CONST) {
+      Load(node->value());
+
+    } else {
+      Literal* literal = node->value()->AsLiteral();
+      bool overwrite_value =
+          (node->value()->AsBinaryOperation() != NULL &&
+           node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+      Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
+      // There are two cases where the target is not read in the right hand
+      // side, that are easy to test for: the right hand side is a literal,
+      // or the right hand side is a different variable.  TakeValue invalidates
+      // the target, with an implicit promise that it will be written to again
+      // before it is read.
+      if (literal != NULL || (right_var != NULL && right_var != var)) {
+        target.TakeValue(NOT_INSIDE_TYPEOF);
+      } else {
+        target.GetValue(NOT_INSIDE_TYPEOF);
+      }
+      Load(node->value());
+      GenericBinaryOperation(node->binary_op(),
+                             node->type(),
+                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+    }
+
+    if (var != NULL &&
+        var->mode() == Variable::CONST &&
+        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+      // Assignment ignored - leave the value on the stack.
+    } else {
+      CodeForSourcePosition(node->position());
+      if (node->op() == Token::INIT_CONST) {
+        // Dynamic constant initializations must use the function context
+        // and initialize the actual constant declared. Dynamic variable
+        // initializations are simply assignments and use SetValue.
+        target.SetValue(CONST_INIT);
+      } else {
+        target.SetValue(NOT_CONST_INIT);
+      }
+      if (node->ends_initialization_block()) {
+        ASSERT(target.type() == Reference::NAMED ||
+               target.type() == Reference::KEYED);
+        // End of initialization block. Revert to fast case.  The
+        // argument to the runtime call is the receiver, which is the
+        // first value pushed as part of the reference, which is below
+        // the lhs value.
+        frame_->PushElementAt(target.size());
+        Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+      }
+    }
+  }
+}
+
+
+void CodeGenerator::VisitThrow(Throw* node) {
+  Comment cmnt(masm_, "[ Throw");
+  Load(node->exception());
+  Result result = frame_->CallRuntime(Runtime::kThrow, 1);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::VisitProperty(Property* node) {
+  Comment cmnt(masm_, "[ Property");
+  Reference property(this, node);
+  property.GetValue(typeof_state());
+}
+
+
+void CodeGenerator::VisitCall(Call* node) {
+  Comment cmnt(masm_, "[ Call");
+
+  Expression* function = node->expression();
+  ZoneList<Expression*>* args = node->arguments();
+
+  // Check if the function is a variable or a property.
+  Variable* var = function->AsVariableProxy()->AsVariable();
+  Property* property = function->AsProperty();
+
+  // ------------------------------------------------------------------------
+  // Fast-case: Use inline caching.
+  // ---
+  // According to ECMA-262, section 11.2.3, page 44, the function to call
+  // must be resolved after the arguments have been evaluated. The IC code
+  // automatically handles this by loading the arguments before the function
+  // is resolved in cache misses (this also holds for megamorphic calls).
+  // ------------------------------------------------------------------------
+
+  if (var != NULL && var->is_possibly_eval()) {
+    // ----------------------------------
+    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
+    // ----------------------------------
+
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+
+    // Prepare the stack for the call to the resolved function.
+    Load(function);
+
+    // Allocate a frame slot for the receiver.
+    frame_->Push(Factory::undefined_value());
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      Load(args->at(i));
+    }
+
+    // Prepare the stack for the call to ResolvePossiblyDirectEval.
+    frame_->PushElementAt(arg_count + 1);
+    if (arg_count > 0) {
+      frame_->PushElementAt(arg_count);
+    } else {
+      frame_->Push(Factory::undefined_value());
+    }
+
+    // Resolve the call.
+    Result result =
+        frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+    // Touch up the stack with the right values for the function and the
+    // receiver.  Use a scratch register to avoid destroying the result.
+    Result scratch = allocator_->Allocate();
+    ASSERT(scratch.is_valid());
+    __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
+    frame_->SetElementAt(arg_count + 1, &scratch);
+
+    // We can reuse the result register now.
+    frame_->Spill(result.reg());
+    __ mov(result.reg(),
+           FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
+    frame_->SetElementAt(arg_count, &result);
+
+    // Call the function.
+    CodeForSourcePosition(node->position());
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub call_function(arg_count, in_loop);
+    result = frame_->CallStub(&call_function, arg_count + 1);
+
+    // Restore the context and overwrite the function on the stack with
+    // the result.
+    frame_->RestoreContextRegister();
+    frame_->SetElementAt(0, &result);
+
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
+    // ----------------------------------
+
+    // Push the name of the function and the receiver onto the stack.
+    frame_->Push(var->name());
+
+    // Pass the global object as the receiver and let the IC stub
+    // patch the stack to use the global proxy as 'this' in the
+    // invoked function.
+    LoadGlobal();
+
+    // Load the arguments.
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      Load(args->at(i));
+    }
+
+    // Call the IC initialization code.
+    CodeForSourcePosition(node->position());
+    Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
+                                       arg_count,
+                                       loop_nesting());
+    frame_->RestoreContextRegister();
+    // Replace the function on the stack with the result.
+    frame_->SetElementAt(0, &result);
+
+  } else if (var != NULL && var->slot() != NULL &&
+             var->slot()->type() == Slot::LOOKUP) {
+    // ----------------------------------
+    // JavaScript example: 'with (obj) foo(1, 2, 3)'  // foo is in obj
+    // ----------------------------------
+
+    // Load the function from the context.  Sync the frame so we can
+    // push the arguments directly into place.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+    frame_->EmitPush(esi);
+    frame_->EmitPush(Immediate(var->name()));
+    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+    // The runtime call returns a pair of values in eax and edx.  The
+    // looked-up function is in eax and the receiver is in edx.  These
+    // register references are not ref counted here.  We spill them
+    // eagerly since they are arguments to an inevitable call (and are
+    // not sharable by the arguments).
+    ASSERT(!allocator()->is_used(eax));
+    frame_->EmitPush(eax);
+
+    // Load the receiver.
+    ASSERT(!allocator()->is_used(edx));
+    frame_->EmitPush(edx);
+
+    // Call the function.
+    CallWithArguments(args, node->position());
+
+  } else if (property != NULL) {
+    // Check if the key is a literal string.
+    Literal* literal = property->key()->AsLiteral();
+
+    if (literal != NULL && literal->handle()->IsSymbol()) {
+      // ------------------------------------------------------------------
+      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
+      // ------------------------------------------------------------------
+
+      Handle<String> name = Handle<String>::cast(literal->handle());
+
+      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
+          name->IsEqualTo(CStrVector("apply")) &&
+          args->length() == 2 &&
+          args->at(1)->AsVariableProxy() != NULL &&
+          args->at(1)->AsVariableProxy()->IsArguments()) {
+        // Use the optimized Function.prototype.apply that avoids
+        // allocating lazily allocated arguments objects.
+        CallApplyLazy(property,
+                      args->at(0),
+                      args->at(1)->AsVariableProxy(),
+                      node->position());
+
+      } else {
+        // Push the name of the function and the receiver onto the stack.
+        frame_->Push(name);
+        Load(property->obj());
+
+        // Load the arguments.
+        int arg_count = args->length();
+        for (int i = 0; i < arg_count; i++) {
+          Load(args->at(i));
+        }
+
+        // Call the IC initialization code.
+        CodeForSourcePosition(node->position());
+        Result result =
+            frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
+                               loop_nesting());
+        frame_->RestoreContextRegister();
+        // Replace the function on the stack with the result.
+        frame_->SetElementAt(0, &result);
+      }
+
+    } else {
+      // -------------------------------------------
+      // JavaScript example: 'array[index](1, 2, 3)'
+      // -------------------------------------------
+
+      // Load the function to call from the property through a reference.
+      Reference ref(this, property);
+      ref.GetValue(NOT_INSIDE_TYPEOF);
+
+      // Pass receiver to called function.
+      if (property->is_synthetic()) {
+        // Use global object as receiver.
+        LoadGlobalReceiver();
+      } else {
+        // The reference's size is non-negative.
+        frame_->PushElementAt(ref.size());
+      }
+
+      // Call the function.
+      CallWithArguments(args, node->position());
+    }
+
+  } else {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
+    // ----------------------------------
+
+    // Load the function.
+    Load(function);
+
+    // Pass the global proxy as the receiver.
+    LoadGlobalReceiver();
+
+    // Call the function.
+    CallWithArguments(args, node->position());
+  }
+}
+
+
+void CodeGenerator::VisitCallNew(CallNew* node) {
+  Comment cmnt(masm_, "[ CallNew");
+
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments. This is different from ordinary calls, where the
+  // actual function to call is resolved after the arguments have been
+  // evaluated.
+
+  // Compute function to call and use the global object as the
+  // receiver. There is no need to use the global proxy here because
+  // it will always be replaced with a newly allocated object.
+  Load(node->expression());
+  LoadGlobal();
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = node->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Load(args->at(i));
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  CodeForSourcePosition(node->position());
+  Result result = frame_->CallConstructor(arg_count);
+  // Replace the function on the stack with the result.
+  frame_->SetElementAt(0, &result);
+}
+
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  __ test(value.reg(), Immediate(kSmiTagMask));
+  value.Unuse();
+  destination()->Split(zero);
+}
+
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+  // Conditionally generate a log call.
+  // Args:
+  //   0 (literal string): The type of logging (corresponds to the flags).
+  //     This is used to determine whether or not to generate the log call.
+  //   1 (string): Format string.  Access the string at argument index 2
+  //     with '%2s' (see Logger::LogRuntime for all the formats).
+  //   2 (array): Arguments to the format string.
+  ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (ShouldGenerateLog(args->at(0))) {
+    Load(args->at(1));
+    Load(args->at(2));
+    frame_->CallRuntime(Runtime::kLog, 2);
+  }
+#endif
+  // Finally, we're expected to leave a value on the top of the stack.
+  frame_->Push(Factory::undefined_value());
+}
+
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  __ test(value.reg(), Immediate(kSmiTagMask | 0x80000000));
+  value.Unuse();
+  destination()->Split(zero);
+}
+
+
+// This generates code that performs a charCodeAt() call or returns
+// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
+// It can handle flat and sliced strings, 8 and 16 bit characters and
+// cons strings where the answer is found in the left hand branch of the
+// cons.  The slow case will flatten the string, which will ensure that
+// the answer is in the left hand side the next time around.
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+  Comment(masm_, "[ GenerateFastCharCodeAt");
+  ASSERT(args->length() == 2);
+
+  Label slow_case;
+  Label end;
+  Label not_a_flat_string;
+  Label a_cons_string;
+  Label try_again_with_new_string;
+  Label ascii_string;
+  Label got_char_code;
+
+  Load(args->at(0));
+  Load(args->at(1));
+  Result index = frame_->Pop();
+  Result object = frame_->Pop();
+
+  // Get register ecx to use as shift amount later.
+  Result shift_amount;
+  if (object.is_register() && object.reg().is(ecx)) {
+    Result fresh = allocator_->Allocate();
+    shift_amount = object;
+    object = fresh;
+    __ mov(object.reg(), ecx);
+  }
+  if (index.is_register() && index.reg().is(ecx)) {
+    Result fresh = allocator_->Allocate();
+    shift_amount = index;
+    index = fresh;
+    __ mov(index.reg(), ecx);
+  }
+  // There could be references to ecx in the frame. Allocating will
+  // spill them, otherwise spill explicitly.
+  if (shift_amount.is_valid()) {
+    frame_->Spill(ecx);
+  } else {
+    shift_amount = allocator()->Allocate(ecx);
+  }
+  ASSERT(shift_amount.is_register());
+  ASSERT(shift_amount.reg().is(ecx));
+  ASSERT(allocator_->count(ecx) == 1);
+
+  // We will mutate the index register and possibly the object register.
+  // The case where they are somehow the same register is handled
+  // because we only mutate them in the case where the receiver is a
+  // heap object and the index is not.
+  object.ToRegister();
+  index.ToRegister();
+  frame_->Spill(object.reg());
+  frame_->Spill(index.reg());
+
+  // We need a single extra temporary register.
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+
+  // There is no virtual frame effect from here up to the final result
+  // push.
+
+  // If the receiver is a smi trigger the slow case.
+  ASSERT(kSmiTag == 0);
+  __ test(object.reg(), Immediate(kSmiTagMask));
+  __ j(zero, &slow_case);
+
+  // If the index is negative or non-smi trigger the slow case.
+  ASSERT(kSmiTag == 0);
+  __ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
+  __ j(not_zero, &slow_case);
+  // Untag the index.
+  __ sar(index.reg(), kSmiTagSize);
+
+  __ bind(&try_again_with_new_string);
+  // Fetch the instance type of the receiver into ecx.
+  __ mov(ecx, FieldOperand(object.reg(), HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  // If the receiver is not a string trigger the slow case.
+  __ test(ecx, Immediate(kIsNotStringMask));
+  __ j(not_zero, &slow_case);
+
+  // Here we make assumptions about the tag values and the shifts needed.
+  // See the comment in objects.h.
+  ASSERT(kLongStringTag == 0);
+  ASSERT(kMediumStringTag + String::kLongLengthShift ==
+         String::kMediumLengthShift);
+  ASSERT(kShortStringTag + String::kLongLengthShift ==
+         String::kShortLengthShift);
+  __ and_(ecx, kStringSizeMask);
+  __ add(Operand(ecx), Immediate(String::kLongLengthShift));
+  // Fetch the length field into the temporary register.
+  __ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
+  __ shr(temp.reg());  // The shift amount in ecx is implicit operand.
+  // Check for index out of range.
+  __ cmp(index.reg(), Operand(temp.reg()));
+  __ j(greater_equal, &slow_case);
+  // Reload the instance type (into the temp register this time)..
+  __ mov(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+  __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+
+  // We need special handling for non-flat strings.
+  ASSERT(kSeqStringTag == 0);
+  __ test(temp.reg(), Immediate(kStringRepresentationMask));
+  __ j(not_zero, &not_a_flat_string);
+  // Check for 1-byte or 2-byte string.
+  __ test(temp.reg(), Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii_string);
+
+  // 2-byte string.
+  // Load the 2-byte character code into the temp register.
+  __ movzx_w(temp.reg(), FieldOperand(object.reg(),
+                                      index.reg(),
+                                      times_2,
+                                      SeqTwoByteString::kHeaderSize));
+  __ jmp(&got_char_code);
+
+  // ASCII string.
+  __ bind(&ascii_string);
+  // Load the byte into the temp register.
+  __ movzx_b(temp.reg(), FieldOperand(object.reg(),
+                                      index.reg(),
+                                      times_1,
+                                      SeqAsciiString::kHeaderSize));
+  __ bind(&got_char_code);
+  ASSERT(kSmiTag == 0);
+  __ shl(temp.reg(), kSmiTagSize);
+  __ jmp(&end);
+
+  // Handle non-flat strings.
+  __ bind(&not_a_flat_string);
+  __ and_(temp.reg(), kStringRepresentationMask);
+  __ cmp(temp.reg(), kConsStringTag);
+  __ j(equal, &a_cons_string);
+  __ cmp(temp.reg(), kSlicedStringTag);
+  __ j(not_equal, &slow_case);
+
+  // SlicedString.
+  // Add the offset to the index and trigger the slow case on overflow.
+  __ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
+  __ j(overflow, &slow_case);
+  // Getting the underlying string is done by running the cons string code.
+
+  // ConsString.
+  __ bind(&a_cons_string);
+  // Get the first of the two strings.  Both sliced and cons strings
+  // store their source string at the same offset.
+  ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+  __ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
+  __ jmp(&try_again_with_new_string);
+
+  __ bind(&slow_case);
+  // Move the undefined value into the result register, which will
+  // trigger the slow case.
+  __ Set(temp.reg(), Immediate(Factory::undefined_value()));
+
+  __ bind(&end);
+  frame_->Push(&temp);
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  __ test(value.reg(), Immediate(kSmiTagMask));
+  destination()->false_target()->Branch(equal);
+  // It is a heap object - get map.
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  // Check if the object is a JS array or not.
+  __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg());
+  value.Unuse();
+  temp.Unuse();
+  destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  // Get the frame pointer for the calling frame.
+  Result fp = allocator()->Allocate();
+  __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &check_frame_marker);
+  __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+         Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+  fp.Unuse();
+  destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+  // ArgumentsAccessStub takes the parameter count as an input argument
+  // in register eax.  Create a constant result for it.
+  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  // Call the shared stub to get to the arguments.length.
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
+  Result result = frame_->CallStub(&stub, &count);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  JumpTarget leave, null, function, non_function_constructor;
+  Load(args->at(0));  // Load the object.
+  Result obj = frame_->Pop();
+  obj.ToRegister();
+  frame_->Spill(obj.reg());
+
+  // If the object is a smi, we return null.
+  __ test(obj.reg(), Immediate(kSmiTagMask));
+  null.Branch(zero);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  { Result tmp = allocator()->Allocate();
+    __ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+    __ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
+    __ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
+    null.Branch(less);
+
+    // As long as JS_FUNCTION_TYPE is the last instance type and it is
+    // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+    // LAST_JS_OBJECT_TYPE.
+    ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+    __ cmp(tmp.reg(), JS_FUNCTION_TYPE);
+    function.Branch(equal);
+  }
+
+  // Check if the constructor in the map is a function.
+  { Result tmp = allocator()->Allocate();
+    __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
+    __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
+    non_function_constructor.Branch(not_equal);
+  }
+
+  // The map register now contains the constructor function. Grab the
+  // instance class name from there.
+  __ mov(obj.reg(),
+         FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
+  __ mov(obj.reg(),
+         FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
+  frame_->Push(&obj);
+  leave.Jump();
+
+  // Functions have class 'Function'.
+  function.Bind();
+  frame_->Push(Factory::function_class_symbol());
+  leave.Jump();
+
+  // Objects with a non-function constructor have class 'Object'.
+  non_function_constructor.Bind();
+  frame_->Push(Factory::Object_symbol());
+  leave.Jump();
+
+  // Non-JS objects have class null.
+  null.Bind();
+  frame_->Push(Factory::null_value());
+
+  // All done.
+  leave.Bind();
+}
+
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  JumpTarget leave;
+  Load(args->at(0));  // Load the object.
+  frame_->Dup();
+  Result object = frame_->Pop();
+  object.ToRegister();
+  ASSERT(object.is_valid());
+  // if (object->IsSmi()) return object.
+  __ test(object.reg(), Immediate(kSmiTagMask));
+  leave.Branch(zero, taken);
+  // It is a heap object - get map.
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  // if (!object->IsJSValue()) return object.
+  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
+  leave.Branch(not_equal, not_taken);
+  __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
+  object.Unuse();
+  frame_->SetElementAt(0, &temp);
+  leave.Bind();
+}
+
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+  JumpTarget leave;
+  Load(args->at(0));  // Load the object.
+  Load(args->at(1));  // Load the value.
+  Result value = frame_->Pop();
+  Result object = frame_->Pop();
+  value.ToRegister();
+  object.ToRegister();
+
+  // if (object->IsSmi()) return value.
+  __ test(object.reg(), Immediate(kSmiTagMask));
+  leave.Branch(zero, &value, taken);
+
+  // It is a heap object - get its map.
+  Result scratch = allocator_->Allocate();
+  ASSERT(scratch.is_valid());
+  // if (!object->IsJSValue()) return value.
+  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
+  leave.Branch(not_equal, &value, not_taken);
+
+  // Store the value.
+  __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  Result duplicate_value = allocator_->Allocate();
+  ASSERT(duplicate_value.is_valid());
+  __ mov(duplicate_value.reg(), value.reg());
+  // The object register is also overwritten by the write barrier and
+  // possibly aliased in the frame.
+  frame_->Spill(object.reg());
+  __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
+                 scratch.reg());
+  object.Unuse();
+  scratch.Unuse();
+  duplicate_value.Unuse();
+
+  // Leave.
+  leave.Bind(&value);
+  frame_->Push(&value);
+}
+
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in edx and the formal
+  // parameter count in eax.
+  Load(args->at(0));
+  Result key = frame_->Pop();
+  // Explicitly create a constant result.
+  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  // Call the shared stub to get to arguments[key].
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  Result result = frame_->CallStub(&stub, &key, &count);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  Load(args->at(0));
+  Load(args->at(1));
+  Result right = frame_->Pop();
+  Result left = frame_->Pop();
+  right.ToRegister();
+  left.ToRegister();
+  __ cmp(right.reg(), Operand(left.reg()));
+  right.Unuse();
+  left.Unuse();
+  destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+  ASSERT(kSmiTag == 0);  // EBP value is aligned, so it should look like Smi.
+  Result ebp_as_smi = allocator_->Allocate();
+  ASSERT(ebp_as_smi.is_valid());
+  __ mov(ebp_as_smi.reg(), Operand(ebp));
+  frame_->Push(&ebp_as_smi);
+}
+
+
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+  frame_->SpillAll();
+
+  // Make sure the frame is aligned like the OS expects.
+  static const int kFrameAlignment = OS::ActivationFrameAlignment();
+  if (kFrameAlignment > 0) {
+    ASSERT(IsPowerOf2(kFrameAlignment));
+    __ mov(edi, Operand(esp));  // Save in callee-saved register.
+    __ and_(esp, -kFrameAlignment);
+  }
+
+  // Call V8::RandomPositiveSmi().
+  __ call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
+
+  // Restore stack pointer from callee-saved register edi.
+  if (kFrameAlignment > 0) {
+    __ mov(esp, Operand(edi));
+  }
+
+  Result result = allocator_->Allocate(eax);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
+  JumpTarget done;
+  JumpTarget call_runtime;
+  ASSERT(args->length() == 1);
+
+  // Load number and duplicate it.
+  Load(args->at(0));
+  frame_->Dup();
+
+  // Get the number into an unaliased register and load it onto the
+  // floating point stack still leaving one copy on the frame.
+  Result number = frame_->Pop();
+  number.ToRegister();
+  frame_->Spill(number.reg());
+  FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
+  number.Unuse();
+
+  // Perform the operation on the number.
+  switch (op) {
+    case SIN:
+      __ fsin();
+      break;
+    case COS:
+      __ fcos();
+      break;
+  }
+
+  // Go slow case if argument to operation is out of range.
+  Result eax_reg = allocator_->Allocate(eax);
+  ASSERT(eax_reg.is_valid());
+  __ fnstsw_ax();
+  __ sahf();
+  eax_reg.Unuse();
+  call_runtime.Branch(parity_even, not_taken);
+
+  // Allocate heap number for result if possible.
+  Result scratch1 = allocator()->Allocate();
+  Result scratch2 = allocator()->Allocate();
+  Result heap_number = allocator()->Allocate();
+  FloatingPointHelper::AllocateHeapNumber(masm_,
+                                          call_runtime.entry_label(),
+                                          scratch1.reg(),
+                                          scratch2.reg(),
+                                          heap_number.reg());
+  scratch1.Unuse();
+  scratch2.Unuse();
+
+  // Store the result in the allocated heap number.
+  __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
+  // Replace the extra copy of the argument with the result.
+  frame_->SetElementAt(0, &heap_number);
+  done.Jump();
+
+  call_runtime.Bind();
+  // Free ST(0) which was not popped before calling into the runtime.
+  __ ffree(0);
+  Result answer;
+  switch (op) {
+    case SIN:
+      answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
+      break;
+    case COS:
+      answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
+      break;
+  }
+  frame_->Push(&answer);
+  done.Bind();
+}
+
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+  if (CheckForInlineRuntimeCall(node)) {
+    return;
+  }
+
+  ZoneList<Expression*>* args = node->arguments();
+  Comment cmnt(masm_, "[ CallRuntime");
+  Runtime::Function* function = node->function();
+
+  if (function == NULL) {
+    // Prepare stack for calling JS runtime function.
+    frame_->Push(node->name());
+    // Push the builtins object found in the current global object.
+    Result temp = allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ mov(temp.reg(), GlobalObject());
+    __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
+    frame_->Push(&temp);
+  }
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Load(args->at(i));
+  }
+
+  if (function == NULL) {
+    // Call the JS runtime function.
+    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+                                       arg_count,
+                                       loop_nesting_);
+    frame_->RestoreContextRegister();
+    frame_->SetElementAt(0, &answer);
+  } else {
+    // Call the C runtime function.
+    Result answer = frame_->CallRuntime(function, arg_count);
+    frame_->Push(&answer);
+  }
+}
+
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+  // Note that because of NOT and an optimization in comparison of a typeof
+  // expression to a literal string, this function can fail to leave a value
+  // on top of the frame or in the cc register.
+  Comment cmnt(masm_, "[ UnaryOperation");
+
+  Token::Value op = node->op();
+
+  if (op == Token::NOT) {
+    // Swap the true and false targets but keep the same actual label
+    // as the fall through.
+    destination()->Invert();
+    LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+    // Swap the labels back.
+    destination()->Invert();
+
+  } else if (op == Token::DELETE) {
+    Property* property = node->expression()->AsProperty();
+    if (property != NULL) {
+      Load(property->obj());
+      Load(property->key());
+      Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+      frame_->Push(&answer);
+      return;
+    }
+
+    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
+    if (variable != NULL) {
+      Slot* slot = variable->slot();
+      if (variable->is_global()) {
+        LoadGlobal();
+        frame_->Push(variable->name());
+        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+                                              CALL_FUNCTION, 2);
+        frame_->Push(&answer);
+        return;
+
+      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+        // Call the runtime to look up the context holding the named
+        // variable.  Sync the virtual frame eagerly so we can push the
+        // arguments directly into place.
+        frame_->SyncRange(0, frame_->element_count() - 1);
+        frame_->EmitPush(esi);
+        frame_->EmitPush(Immediate(variable->name()));
+        Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
+        ASSERT(context.is_register());
+        frame_->EmitPush(context.reg());
+        context.Unuse();
+        frame_->EmitPush(Immediate(variable->name()));
+        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+                                              CALL_FUNCTION, 2);
+        frame_->Push(&answer);
+        return;
+      }
+
+      // Default: Result of deleting non-global, not dynamically
+      // introduced variables is false.
+      frame_->Push(Factory::false_value());
+
+    } else {
+      // Default: Result of deleting expressions is true.
+      Load(node->expression());  // may have side-effects
+      frame_->SetElementAt(0, Factory::true_value());
+    }
+
+  } else if (op == Token::TYPEOF) {
+    // Special case for loading the typeof expression; see comment on
+    // LoadTypeofExpression().
+    LoadTypeofExpression(node->expression());
+    Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
+    frame_->Push(&answer);
+
+  } else if (op == Token::VOID) {
+    Expression* expression = node->expression();
+    if (expression && expression->AsLiteral() && (
+        expression->AsLiteral()->IsTrue() ||
+        expression->AsLiteral()->IsFalse() ||
+        expression->AsLiteral()->handle()->IsNumber() ||
+        expression->AsLiteral()->handle()->IsString() ||
+        expression->AsLiteral()->handle()->IsJSRegExp() ||
+        expression->AsLiteral()->IsNull())) {
+      // Omit evaluating the value of the primitive literal.
+      // It will be discarded anyway, and can have no side effect.
+      frame_->Push(Factory::undefined_value());
+    } else {
+      Load(node->expression());
+      frame_->SetElementAt(0, Factory::undefined_value());
+    }
+
+  } else {
+    Load(node->expression());
+    switch (op) {
+      case Token::SUB: {
+        bool overwrite =
+            (node->AsBinaryOperation() != NULL &&
+             node->AsBinaryOperation()->ResultOverwriteAllowed());
+        UnarySubStub stub(overwrite);
+        // TODO(1222589): remove dependency of TOS being cached inside stub
+        Result operand = frame_->Pop();
+        Result answer = frame_->CallStub(&stub, &operand);
+        frame_->Push(&answer);
+        break;
+      }
+
+      case Token::BIT_NOT: {
+        // Smi check.
+        JumpTarget smi_label;
+        JumpTarget continue_label;
+        Result operand = frame_->Pop();
+        operand.ToRegister();
+        __ test(operand.reg(), Immediate(kSmiTagMask));
+        smi_label.Branch(zero, &operand, taken);
+
+        frame_->Push(&operand);  // undo popping of TOS
+        Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
+                                              CALL_FUNCTION, 1);
+
+        continue_label.Jump(&answer);
+        smi_label.Bind(&answer);
+        answer.ToRegister();
+        frame_->Spill(answer.reg());
+        __ not_(answer.reg());
+        __ and_(answer.reg(), ~kSmiTagMask);  // Remove inverted smi-tag.
+        continue_label.Bind(&answer);
+        frame_->Push(&answer);
+        break;
+      }
+
+      case Token::ADD: {
+        // Smi check.
+        JumpTarget continue_label;
+        Result operand = frame_->Pop();
+        operand.ToRegister();
+        __ test(operand.reg(), Immediate(kSmiTagMask));
+        continue_label.Branch(zero, &operand, taken);
+
+        frame_->Push(&operand);
+        Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
+                                              CALL_FUNCTION, 1);
+
+        continue_label.Bind(&answer);
+        frame_->Push(&answer);
+        break;
+      }
+
+      default:
+        // NOT, DELETE, TYPEOF, and VOID are handled outside the
+        // switch.
+        UNREACHABLE();
+    }
+  }
+}
+
+
+// The value in dst was optimistically incremented or decremented.  The
+// result overflowed or was not smi tagged.  Undo the operation, call
+// into the runtime to convert the argument to a number, and call the
+// specialized add or subtract stub.  The result is left in dst.
+class DeferredPrefixCountOperation: public DeferredCode {
+ public:
+  DeferredPrefixCountOperation(Register dst, bool is_increment)
+      : dst_(dst), is_increment_(is_increment) {
+    set_comment("[ DeferredCountOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  bool is_increment_;
+};
+
+
+void DeferredPrefixCountOperation::Generate() {
+  // Undo the optimistic smi operation.
+  if (is_increment_) {
+    __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
+  } else {
+    __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
+  }
+  __ push(dst_);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+  __ push(eax);
+  __ push(Immediate(Smi::FromInt(1)));
+  if (is_increment_) {
+    __ CallRuntime(Runtime::kNumberAdd, 2);
+  } else {
+    __ CallRuntime(Runtime::kNumberSub, 2);
+  }
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// The value in dst was optimistically incremented or decremented.  The
+// result overflowed or was not smi tagged.  Undo the operation and call
+// into the runtime to convert the argument to a number.  Update the
+// original value in old.  Call the specialized add or subtract stub.
+// The result is left in dst.
+class DeferredPostfixCountOperation: public DeferredCode {
+ public:
+  DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
+      : dst_(dst), old_(old), is_increment_(is_increment) {
+    set_comment("[ DeferredCountOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Register old_;
+  bool is_increment_;
+};
+
+
+void DeferredPostfixCountOperation::Generate() {
+  // Undo the optimistic smi operation.
+  if (is_increment_) {
+    __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
+  } else {
+    __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
+  }
+  __ push(dst_);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+  // Save the result of ToNumber to use as the old value.
+  __ push(eax);
+
+  // Call the runtime for the addition or subtraction.
+  __ push(eax);
+  __ push(Immediate(Smi::FromInt(1)));
+  if (is_increment_) {
+    __ CallRuntime(Runtime::kNumberAdd, 2);
+  } else {
+    __ CallRuntime(Runtime::kNumberSub, 2);
+  }
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+  __ pop(old_);
+}
+
+
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+  Comment cmnt(masm_, "[ CountOperation");
+
+  bool is_postfix = node->is_postfix();
+  bool is_increment = node->op() == Token::INC;
+
+  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
+  bool is_const = (var != NULL && var->mode() == Variable::CONST);
+
+  // Postfix operations need a stack slot under the reference to hold
+  // the old value while the new value is being stored.  This is so that
+  // in the case that storing the new value requires a call, the old
+  // value will be in the frame to be spilled.
+  if (is_postfix) frame_->Push(Smi::FromInt(0));
+
+  { Reference target(this, node->expression());
+    if (target.is_illegal()) {
+      // Spoof the virtual frame to have the expected height (one higher
+      // than on entry).
+      if (!is_postfix) frame_->Push(Smi::FromInt(0));
+      return;
+    }
+    target.TakeValue(NOT_INSIDE_TYPEOF);
+
+    Result new_value = frame_->Pop();
+    new_value.ToRegister();
+
+    Result old_value;  // Only allocated in the postfix case.
+    if (is_postfix) {
+      // Allocate a temporary to preserve the old value.
+      old_value = allocator_->Allocate();
+      ASSERT(old_value.is_valid());
+      __ mov(old_value.reg(), new_value.reg());
+    }
+    // Ensure the new value is writable.
+    frame_->Spill(new_value.reg());
+
+    // In order to combine the overflow and the smi tag check, we need
+    // to be able to allocate a byte register.  We attempt to do so
+    // without spilling.  If we fail, we will generate separate overflow
+    // and smi tag checks.
+    //
+    // We allocate and clear the temporary byte register before
+    // performing the count operation since clearing the register using
+    // xor will clear the overflow flag.
+    Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
+    if (tmp.is_valid()) {
+      __ Set(tmp.reg(), Immediate(0));
+    }
+
+    DeferredCode* deferred = NULL;
+    if (is_postfix) {
+      deferred = new DeferredPostfixCountOperation(new_value.reg(),
+                                                   old_value.reg(),
+                                                   is_increment);
+    } else {
+      deferred = new DeferredPrefixCountOperation(new_value.reg(),
+                                                  is_increment);
+    }
+
+    if (is_increment) {
+      __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
+    } else {
+      __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
+    }
+
+    // If the count operation didn't overflow and the result is a valid
+    // smi, we're done. Otherwise, we jump to the deferred slow-case
+    // code.
+    if (tmp.is_valid()) {
+      // We combine the overflow and the smi tag check if we could
+      // successfully allocate a temporary byte register.
+      __ setcc(overflow, tmp.reg());
+      __ or_(Operand(tmp.reg()), new_value.reg());
+      __ test(tmp.reg(), Immediate(kSmiTagMask));
+      tmp.Unuse();
+      deferred->Branch(not_zero);
+    } else {
+      // Otherwise we test separately for overflow and smi tag.
+      deferred->Branch(overflow);
+      __ test(new_value.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+    }
+    deferred->BindExit();
+
+    // Postfix: store the old value in the allocated slot under the
+    // reference.
+    if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
+
+    frame_->Push(&new_value);
+    // Non-constant: update the reference.
+    if (!is_const) target.SetValue(NOT_CONST_INIT);
+  }
+
+  // Postfix: drop the new value and use the old.
+  if (is_postfix) frame_->Drop();
+}
+
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+  // Note that due to an optimization in comparison operations (typeof
+  // compared to a string literal), we can evaluate a binary expression such
+  // as AND or OR and not leave a value on the frame or in the cc register.
+  Comment cmnt(masm_, "[ BinaryOperation");
+  Token::Value op = node->op();
+
+  // According to ECMA-262 section 11.11, page 58, the binary logical
+  // operators must yield the result of one of the two expressions
+  // before any ToBoolean() conversions. This means that the value
+  // produced by a && or || operator is not necessarily a boolean.
+
+  // NOTE: If the left hand side produces a materialized value (not
+  // control flow), we force the right hand side to do the same. This
+  // is necessary because we assume that if we get control flow on the
+  // last path out of an expression we got it on all paths.
+  if (op == Token::AND) {
+    JumpTarget is_true;
+    ControlDestination dest(&is_true, destination()->false_target(), true);
+    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+    if (dest.false_was_fall_through()) {
+      // The current false target was used as the fall-through.  If
+      // there are no dangling jumps to is_true then the left
+      // subexpression was unconditionally false.  Otherwise we have
+      // paths where we do have to evaluate the right subexpression.
+      if (is_true.is_linked()) {
+        // We need to compile the right subexpression.  If the jump to
+        // the current false target was a forward jump then we have a
+        // valid frame, we have just bound the false target, and we
+        // have to jump around the code for the right subexpression.
+        if (has_valid_frame()) {
+          destination()->false_target()->Unuse();
+          destination()->false_target()->Jump();
+        }
+        is_true.Bind();
+        // The left subexpression compiled to control flow, so the
+        // right one is free to do so as well.
+        LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+      } else {
+        // We have actually just jumped to or bound the current false
+        // target but the current control destination is not marked as
+        // used.
+        destination()->Use(false);
+      }
+
+    } else if (dest.is_used()) {
+      // The left subexpression compiled to control flow (and is_true
+      // was just bound), so the right is free to do so as well.
+      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+
+    } else {
+      // We have a materialized value on the frame, so we exit with
+      // one on all paths.  There are possibly also jumps to is_true
+      // from nested subexpressions.
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
+
+      // Avoid popping the result if it converts to 'false' using the
+      // standard ToBoolean() conversion as described in ECMA-262,
+      // section 9.2, page 30.
+      //
+      // Duplicate the TOS value. The duplicate will be popped by
+      // ToBoolean.
+      frame_->Dup();
+      ControlDestination dest(&pop_and_continue, &exit, true);
+      ToBoolean(&dest);
+
+      // Pop the result of evaluating the first part.
+      frame_->Drop();
+
+      // Compile right side expression.
+      is_true.Bind();
+      Load(node->right());
+
+      // Exit (always with a materialized value).
+      exit.Bind();
+    }
+
+  } else if (op == Token::OR) {
+    JumpTarget is_false;
+    ControlDestination dest(destination()->true_target(), &is_false, false);
+    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+    if (dest.true_was_fall_through()) {
+      // The current true target was used as the fall-through.  If
+      // there are no dangling jumps to is_false then the left
+      // subexpression was unconditionally true.  Otherwise we have
+      // paths where we do have to evaluate the right subexpression.
+      if (is_false.is_linked()) {
+        // We need to compile the right subexpression.  If the jump to
+        // the current true target was a forward jump then we have a
+        // valid frame, we have just bound the true target, and we
+        // have to jump around the code for the right subexpression.
+        if (has_valid_frame()) {
+          destination()->true_target()->Unuse();
+          destination()->true_target()->Jump();
+        }
+        is_false.Bind();
+        // The left subexpression compiled to control flow, so the
+        // right one is free to do so as well.
+        LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+      } else {
+        // We have just jumped to or bound the current true target but
+        // the current control destination is not marked as used.
+        destination()->Use(true);
+      }
+
+    } else if (dest.is_used()) {
+      // The left subexpression compiled to control flow (and is_false
+      // was just bound), so the right is free to do so as well.
+      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+
+    } else {
+      // We have a materialized value on the frame, so we exit with
+      // one on all paths.  There are possibly also jumps to is_false
+      // from nested subexpressions.
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
+
+      // Avoid popping the result if it converts to 'true' using the
+      // standard ToBoolean() conversion as described in ECMA-262,
+      // section 9.2, page 30.
+      //
+      // Duplicate the TOS value. The duplicate will be popped by
+      // ToBoolean.
+      frame_->Dup();
+      ControlDestination dest(&exit, &pop_and_continue, false);
+      ToBoolean(&dest);
+
+      // Pop the result of evaluating the first part.
+      frame_->Drop();
+
+      // Compile right side expression.
+      is_false.Bind();
+      Load(node->right());
+
+      // Exit (always with a materialized value).
+      exit.Bind();
+    }
+
+  } else {
+    // NOTE: The code below assumes that the slow cases (calls to runtime)
+    // never return a constant/immutable object.
+    OverwriteMode overwrite_mode = NO_OVERWRITE;
+    if (node->left()->AsBinaryOperation() != NULL &&
+        node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+      overwrite_mode = OVERWRITE_LEFT;
+    } else if (node->right()->AsBinaryOperation() != NULL &&
+               node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+      overwrite_mode = OVERWRITE_RIGHT;
+    }
+
+    Load(node->left());
+    Load(node->right());
+    GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
+  }
+}
+
+
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+  frame_->PushFunction();
+}
+
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+  Comment cmnt(masm_, "[ CompareOperation");
+
+  // Get the expressions from the node.
+  Expression* left = node->left();
+  Expression* right = node->right();
+  Token::Value op = node->op();
+  // To make typeof testing for natives implemented in JavaScript really
+  // efficient, we generate special code for expressions of the form:
+  // 'typeof <expression> == <string>'.
+  UnaryOperation* operation = left->AsUnaryOperation();
+  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
+      (operation != NULL && operation->op() == Token::TYPEOF) &&
+      (right->AsLiteral() != NULL &&
+       right->AsLiteral()->handle()->IsString())) {
+    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
+
+    // Load the operand and move it to a register.
+    LoadTypeofExpression(operation->expression());
+    Result answer = frame_->Pop();
+    answer.ToRegister();
+
+    if (check->Equals(Heap::number_symbol())) {
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->true_target()->Branch(zero);
+      frame_->Spill(answer.reg());
+      __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ cmp(answer.reg(), Factory::heap_number_map());
+      answer.Unuse();
+      destination()->Split(equal);
+
+    } else if (check->Equals(Heap::string_symbol())) {
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
+
+      // It can be an undetectable string object.
+      Result temp = allocator()->Allocate();
+      ASSERT(temp.is_valid());
+      __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset));
+      __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+      destination()->false_target()->Branch(not_zero);
+      __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(temp.reg(),
+                 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+      __ cmp(temp.reg(), FIRST_NONSTRING_TYPE);
+      temp.Unuse();
+      answer.Unuse();
+      destination()->Split(less);
+
+    } else if (check->Equals(Heap::boolean_symbol())) {
+      __ cmp(answer.reg(), Factory::true_value());
+      destination()->true_target()->Branch(equal);
+      __ cmp(answer.reg(), Factory::false_value());
+      answer.Unuse();
+      destination()->Split(equal);
+
+    } else if (check->Equals(Heap::undefined_symbol())) {
+      __ cmp(answer.reg(), Factory::undefined_value());
+      destination()->true_target()->Branch(equal);
+
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
+
+      // It can be an undetectable object.
+      frame_->Spill(answer.reg());
+      __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(answer.reg(),
+                 FieldOperand(answer.reg(), Map::kBitFieldOffset));
+      __ test(answer.reg(), Immediate(1 << Map::kIsUndetectable));
+      answer.Unuse();
+      destination()->Split(not_zero);
+
+    } else if (check->Equals(Heap::function_symbol())) {
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
+      frame_->Spill(answer.reg());
+      __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+      answer.Unuse();
+      destination()->Split(equal);
+
+    } else if (check->Equals(Heap::object_symbol())) {
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
+      __ cmp(answer.reg(), Factory::null_value());
+      destination()->true_target()->Branch(equal);
+
+      // It can be an undetectable object.
+      Result map = allocator()->Allocate();
+      ASSERT(map.is_valid());
+      __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
+      __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
+      destination()->false_target()->Branch(not_zero);
+      __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
+      __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
+      destination()->false_target()->Branch(less);
+      __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
+      answer.Unuse();
+      map.Unuse();
+      destination()->Split(less_equal);
+    } else {
+      // Uncommon case: typeof testing against a string literal that is
+      // never returned from the typeof operator.
+      answer.Unuse();
+      destination()->Goto(false);
+    }
+    return;
+  }
+
+  Condition cc = no_condition;
+  bool strict = false;
+  switch (op) {
+    case Token::EQ_STRICT:
+      strict = true;
+      // Fall through
+    case Token::EQ:
+      cc = equal;
+      break;
+    case Token::LT:
+      cc = less;
+      break;
+    case Token::GT:
+      cc = greater;
+      break;
+    case Token::LTE:
+      cc = less_equal;
+      break;
+    case Token::GTE:
+      cc = greater_equal;
+      break;
+    case Token::IN: {
+      Load(left);
+      Load(right);
+      Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
+      frame_->Push(&answer);  // push the result
+      return;
+    }
+    case Token::INSTANCEOF: {
+      Load(left);
+      Load(right);
+      InstanceofStub stub;
+      Result answer = frame_->CallStub(&stub, 2);
+      answer.ToRegister();
+      __ test(answer.reg(), Operand(answer.reg()));
+      answer.Unuse();
+      destination()->Split(zero);
+      return;
+    }
+    default:
+      UNREACHABLE();
+  }
+  Load(left);
+  Load(right);
+  Comparison(cc, strict, destination());
+}
+
+
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() {
+  return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
+      && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0))
+      && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0))
+      && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0))
+      && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0));
+}
+#endif
+
+
+// Emit a LoadIC call to get the value from receiver and leave it in
+// dst.  The receiver register is restored after the call.
+class DeferredReferenceGetNamedValue: public DeferredCode {
+ public:
+  DeferredReferenceGetNamedValue(Register dst,
+                                 Register receiver,
+                                 Handle<String> name)
+      : dst_(dst), receiver_(receiver),  name_(name) {
+    set_comment("[ DeferredReferenceGetNamedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Label patch_site_;
+  Register dst_;
+  Register receiver_;
+  Handle<String> name_;
+};
+
+
+void DeferredReferenceGetNamedValue::Generate() {
+  __ push(receiver_);
+  __ Set(ecx, Immediate(name_));
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  // The call must be followed by a test eax instruction to indicate
+  // that the inobject property case was inlined.
+  //
+  // Store the delta to the map check instruction here in the test
+  // instruction.  Use masm_-> instead of the __ macro since the
+  // latter can't return a value.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->test(eax, Immediate(-delta_to_patch_site));
+  __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+  __ pop(receiver_);
+}
+
+
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+  explicit DeferredReferenceGetKeyedValue(Register dst,
+                                          Register receiver,
+                                          Register key,
+                                          bool is_global)
+      : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
+    set_comment("[ DeferredReferenceGetKeyedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Label patch_site_;
+  Register dst_;
+  Register receiver_;
+  Register key_;
+  bool is_global_;
+};
+
+
+void DeferredReferenceGetKeyedValue::Generate() {
+  __ push(receiver_);  // First IC argument.
+  __ push(key_);       // Second IC argument.
+
+  // Calculate the delta from the IC call instruction to the map check
+  // cmp instruction in the inlined version.  This delta is stored in
+  // a test(eax, delta) instruction after the call so that we can find
+  // it in the IC initialization code and patch the cmp instruction.
+  // This means that we cannot allow test instructions after calls to
+  // KeyedLoadIC stubs in other places.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  RelocInfo::Mode mode = is_global_
+                         ? RelocInfo::CODE_TARGET_CONTEXT
+                         : RelocInfo::CODE_TARGET;
+  __ call(ic, mode);
+  // The delta from the start of the map-compare instruction to the
+  // test instruction.  We use masm_-> directly here instead of the __
+  // macro because the macro sometimes uses macro expansion to turn
+  // into something that can't return a value.  This is encountered
+  // when doing generated code coverage tests.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->test(eax, Immediate(-delta_to_patch_site));
+  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+  __ pop(key_);
+  __ pop(receiver_);
+}
+
+
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+  DeferredReferenceSetKeyedValue(Register value,
+                                 Register key,
+                                 Register receiver)
+      : value_(value), key_(key), receiver_(receiver) {
+    set_comment("[ DeferredReferenceSetKeyedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Register value_;
+  Register key_;
+  Register receiver_;
+  Label patch_site_;
+};
+
+
+void DeferredReferenceSetKeyedValue::Generate() {
+  __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+  // Push receiver and key arguments on the stack.
+  __ push(receiver_);
+  __ push(key_);
+  // Move value argument to eax as expected by the IC stub.
+  if (!value_.is(eax)) __ mov(eax, value_);
+  // Call the IC stub.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  // The delta from the start of the map-compare instruction to the
+  // test instruction.  We use masm_-> directly here instead of the
+  // __ macro because the macro sometimes uses macro expansion to turn
+  // into something that can't return a value.  This is encountered
+  // when doing generated code coverage tests.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->test(eax, Immediate(-delta_to_patch_site));
+  // Restore value (returned from store IC), key and receiver
+  // registers.
+  if (!value_.is(eax)) __ mov(value_, eax);
+  __ pop(key_);
+  __ pop(receiver_);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+Handle<String> Reference::GetName() {
+  ASSERT(type_ == NAMED);
+  Property* property = expression_->AsProperty();
+  if (property == NULL) {
+    // Global variable reference treated as a named property reference.
+    VariableProxy* proxy = expression_->AsVariableProxy();
+    ASSERT(proxy->AsVariable() != NULL);
+    ASSERT(proxy->AsVariable()->is_global());
+    return proxy->name();
+  } else {
+    Literal* raw_name = property->key()->AsLiteral();
+    ASSERT(raw_name != NULL);
+    return Handle<String>(String::cast(*raw_name->handle()));
+  }
+}
+
+
+void Reference::GetValue(TypeofState typeof_state) {
+  ASSERT(!cgen_->in_spilled_code());
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_illegal());
+  MacroAssembler* masm = cgen_->masm();
+
+  // Record the source position for the property load.
+  Property* property = expression_->AsProperty();
+  if (property != NULL) {
+    cgen_->CodeForSourcePosition(property->position());
+  }
+
+  switch (type_) {
+    case SLOT: {
+      Comment cmnt(masm, "[ Load from Slot");
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      ASSERT(slot != NULL);
+      cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
+      break;
+    }
+
+    case NAMED: {
+      // TODO(1241834): Make sure that it is safe to ignore the
+      // distinction between expressions in a typeof and not in a
+      // typeof. If there is a chance that reference errors can be
+      // thrown below, we must distinguish between the two kinds of
+      // loads (typeof expression loads must not throw a reference
+      // error).
+      Variable* var = expression_->AsVariableProxy()->AsVariable();
+      bool is_global = var != NULL;
+      ASSERT(!is_global || var->is_global());
+
+      // Do not inline the inobject property case for loads from the global
+      // object.  Also do not inline for unoptimized code.  This saves time
+      // in the code generator.  Unoptimized code is toplevel code or code
+      // that is not in a loop.
+      if (is_global ||
+          cgen_->scope()->is_global_scope() ||
+          cgen_->loop_nesting() == 0) {
+        Comment cmnt(masm, "[ Load from named Property");
+        cgen_->frame()->Push(GetName());
+
+        RelocInfo::Mode mode = is_global
+                               ? RelocInfo::CODE_TARGET_CONTEXT
+                               : RelocInfo::CODE_TARGET;
+        Result answer = cgen_->frame()->CallLoadIC(mode);
+        // A test eax instruction following the call signals that the
+        // inobject property case was inlined.  Ensure that there is not
+        // a test eax instruction here.
+        __ nop();
+        cgen_->frame()->Push(&answer);
+      } else {
+        // Inline the inobject property case.
+        Comment cmnt(masm, "[ Inlined named property load");
+        Result receiver = cgen_->frame()->Pop();
+        receiver.ToRegister();
+
+        Result value = cgen_->allocator()->Allocate();
+        ASSERT(value.is_valid());
+        DeferredReferenceGetNamedValue* deferred =
+            new DeferredReferenceGetNamedValue(value.reg(),
+                                               receiver.reg(),
+                                               GetName());
+
+        // Check that the receiver is a heap object.
+        __ test(receiver.reg(), Immediate(kSmiTagMask));
+        deferred->Branch(zero);
+
+        __ bind(deferred->patch_site());
+        // This is the map check instruction that will be patched (so we can't
+        // use the double underscore macro that may insert instructions).
+        // Initially use an invalid map to force a failure.
+        masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                  Immediate(Factory::null_value()));
+        // This branch is always a forwards branch so it's always a fixed
+        // size which allows the assert below to succeed and patching to work.
+        deferred->Branch(not_equal);
+
+        // The delta from the patch label to the load offset must be
+        // statically known.
+        ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
+               LoadIC::kOffsetToLoadInstruction);
+        // The initial (invalid) offset has to be large enough to force
+        // a 32-bit instruction encoding to allow patching with an
+        // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
+        int offset = kMaxInt;
+        masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
+
+        __ IncrementCounter(&Counters::named_load_inline, 1);
+        deferred->BindExit();
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&value);
+      }
+      break;
+    }
+
+    case KEYED: {
+      // TODO(1241834): Make sure that this it is safe to ignore the
+      // distinction between expressions in a typeof and not in a typeof.
+      Comment cmnt(masm, "[ Load from keyed Property");
+      Variable* var = expression_->AsVariableProxy()->AsVariable();
+      bool is_global = var != NULL;
+      ASSERT(!is_global || var->is_global());
+
+      // Inline array load code if inside of a loop.  We do not know
+      // the receiver map yet, so we initially generate the code with
+      // a check against an invalid map.  In the inline cache code, we
+      // patch the map check if appropriate.
+      if (cgen_->loop_nesting() > 0) {
+        Comment cmnt(masm, "[ Inlined load from keyed Property");
+
+        Result key = cgen_->frame()->Pop();
+        Result receiver = cgen_->frame()->Pop();
+        key.ToRegister();
+        receiver.ToRegister();
+
+        // Use a fresh temporary to load the elements without destroying
+        // the receiver which is needed for the deferred slow case.
+        Result elements = cgen_->allocator()->Allocate();
+        ASSERT(elements.is_valid());
+
+        // Use a fresh temporary for the index and later the loaded
+        // value.
+        Result index = cgen_->allocator()->Allocate();
+        ASSERT(index.is_valid());
+
+        DeferredReferenceGetKeyedValue* deferred =
+            new DeferredReferenceGetKeyedValue(index.reg(),
+                                               receiver.reg(),
+                                               key.reg(),
+                                               is_global);
+
+        // Check that the receiver is not a smi (only needed if this
+        // is not a load from the global context) and that it has the
+        // expected map.
+        if (!is_global) {
+          __ test(receiver.reg(), Immediate(kSmiTagMask));
+          deferred->Branch(zero);
+        }
+
+        // Initially, use an invalid map. The map is patched in the IC
+        // initialization code.
+        __ bind(deferred->patch_site());
+        // Use masm-> here instead of the double underscore macro since extra
+        // coverage code can interfere with the patching.
+        masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                  Immediate(Factory::null_value()));
+        deferred->Branch(not_equal);
+
+        // Check that the key is a smi.
+        __ test(key.reg(), Immediate(kSmiTagMask));
+        deferred->Branch(not_zero);
+
+        // Get the elements array from the receiver and check that it
+        // is not a dictionary.
+        __ mov(elements.reg(),
+               FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+        __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+               Immediate(Factory::fixed_array_map()));
+        deferred->Branch(not_equal);
+
+        // Shift the key to get the actual index value and check that
+        // it is within bounds.
+        __ mov(index.reg(), key.reg());
+        __ sar(index.reg(), kSmiTagSize);
+        __ cmp(index.reg(),
+               FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+        deferred->Branch(above_equal);
+
+        // Load and check that the result is not the hole.  We could
+        // reuse the index or elements register for the value.
+        //
+        // TODO(206): Consider whether it makes sense to try some
+        // heuristic about which register to reuse.  For example, if
+        // one is eax, the we can reuse that one because the value
+        // coming from the deferred code will be in eax.
+        Result value = index;
+        __ mov(value.reg(), Operand(elements.reg(),
+                                    index.reg(),
+                                    times_4,
+                                    FixedArray::kHeaderSize - kHeapObjectTag));
+        elements.Unuse();
+        index.Unuse();
+        __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+        deferred->Branch(equal);
+        __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+        deferred->BindExit();
+        // Restore the receiver and key to the frame and push the
+        // result on top of it.
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&key);
+        cgen_->frame()->Push(&value);
+
+      } else {
+        Comment cmnt(masm, "[ Load from keyed Property");
+        RelocInfo::Mode mode = is_global
+                               ? RelocInfo::CODE_TARGET_CONTEXT
+                               : RelocInfo::CODE_TARGET;
+        Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
+        // Make sure that we do not have a test instruction after the
+        // call.  A test instruction after the call is used to
+        // indicate that we have generated an inline version of the
+        // keyed load.  The explicit nop instruction is here because
+        // the push that follows might be peep-hole optimized away.
+        __ nop();
+        cgen_->frame()->Push(&answer);
+      }
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void Reference::TakeValue(TypeofState typeof_state) {
+  // For non-constant frame-allocated slots, we invalidate the value in the
+  // slot.  For all others, we fall back on GetValue.
+  ASSERT(!cgen_->in_spilled_code());
+  ASSERT(!is_illegal());
+  if (type_ != SLOT) {
+    GetValue(typeof_state);
+    return;
+  }
+
+  Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+  ASSERT(slot != NULL);
+  if (slot->type() == Slot::LOOKUP ||
+      slot->type() == Slot::CONTEXT ||
+      slot->var()->mode() == Variable::CONST ||
+      slot->is_arguments()) {
+    GetValue(typeof_state);
+    return;
+  }
+
+  // Only non-constant, frame-allocated parameters and locals can
+  // reach here. Be careful not to use the optimizations for arguments
+  // object access since it may not have been initialized yet.
+  ASSERT(!slot->is_arguments());
+  if (slot->type() == Slot::PARAMETER) {
+    cgen_->frame()->TakeParameterAt(slot->index());
+  } else {
+    ASSERT(slot->type() == Slot::LOCAL);
+    cgen_->frame()->TakeLocalAt(slot->index());
+  }
+}
+
+
+void Reference::SetValue(InitState init_state) {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_illegal());
+  MacroAssembler* masm = cgen_->masm();
+  switch (type_) {
+    case SLOT: {
+      Comment cmnt(masm, "[ Store to Slot");
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      ASSERT(slot != NULL);
+      cgen_->StoreToSlot(slot, init_state);
+      break;
+    }
+
+    case NAMED: {
+      Comment cmnt(masm, "[ Store to named Property");
+      cgen_->frame()->Push(GetName());
+      Result answer = cgen_->frame()->CallStoreIC();
+      cgen_->frame()->Push(&answer);
+      break;
+    }
+
+    case KEYED: {
+      Comment cmnt(masm, "[ Store to keyed Property");
+
+      // Generate inlined version of the keyed store if the code is in
+      // a loop and the key is likely to be a smi.
+      Property* property = expression()->AsProperty();
+      ASSERT(property != NULL);
+      SmiAnalysis* key_smi_analysis = property->key()->type();
+
+      if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
+        Comment cmnt(masm, "[ Inlined store to keyed Property");
+
+        // Get the receiver, key and value into registers.
+        Result value = cgen_->frame()->Pop();
+        Result key = cgen_->frame()->Pop();
+        Result receiver = cgen_->frame()->Pop();
+
+        Result tmp = cgen_->allocator_->Allocate();
+        ASSERT(tmp.is_valid());
+
+        // Determine whether the value is a constant before putting it
+        // in a register.
+        bool value_is_constant = value.is_constant();
+
+        // Make sure that value, key and receiver are in registers.
+        value.ToRegister();
+        key.ToRegister();
+        receiver.ToRegister();
+
+        DeferredReferenceSetKeyedValue* deferred =
+            new DeferredReferenceSetKeyedValue(value.reg(),
+                                               key.reg(),
+                                               receiver.reg());
+
+        // Check that the value is a smi if it is not a constant.  We
+        // can skip the write barrier for smis and constants.
+        if (!value_is_constant) {
+          __ test(value.reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+        }
+
+        // Check that the key is a non-negative smi.
+        __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
+        deferred->Branch(not_zero);
+
+        // Check that the receiver is not a smi.
+        __ test(receiver.reg(), Immediate(kSmiTagMask));
+        deferred->Branch(zero);
+
+        // Check that the receiver is a JSArray.
+        __ mov(tmp.reg(),
+               FieldOperand(receiver.reg(), HeapObject::kMapOffset));
+        __ movzx_b(tmp.reg(),
+                   FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
+        __ cmp(tmp.reg(), JS_ARRAY_TYPE);
+        deferred->Branch(not_equal);
+
+        // Check that the key is within bounds.  Both the key and the
+        // length of the JSArray are smis.
+        __ cmp(key.reg(),
+               FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+        deferred->Branch(greater_equal);
+
+        // Get the elements array from the receiver and check that it
+        // is not a dictionary.
+        __ mov(tmp.reg(),
+               FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+        // Bind the deferred code patch site to be able to locate the
+        // fixed array map comparison.  When debugging, we patch this
+        // comparison to always fail so that we will hit the IC call
+        // in the deferred code which will allow the debugger to
+        // break for fast case stores.
+        __ bind(deferred->patch_site());
+        __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+               Immediate(Factory::fixed_array_map()));
+        deferred->Branch(not_equal);
+
+        // Store the value.
+        __ mov(Operand(tmp.reg(),
+                       key.reg(),
+                       times_2,
+                       FixedArray::kHeaderSize - kHeapObjectTag),
+               value.reg());
+        __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+        deferred->BindExit();
+
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&key);
+        cgen_->frame()->Push(&value);
+      } else {
+        Result answer = cgen_->frame()->CallKeyedStoreIC();
+        // Make sure that we do not have a test instruction after the
+        // call.  A test instruction after the call is used to
+        // indicate that we have generated an inline version of the
+        // keyed store.
+        __ nop();
+        cgen_->frame()->Push(&answer);
+      }
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+  Label false_result, true_result, not_string;
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+  // 'null' => false.
+  __ cmp(eax, Factory::null_value());
+  __ j(equal, &false_result);
+
+  // Get the map and type of the heap object.
+  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+
+  // Undetectable => false.
+  __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+  __ and_(ebx, 1 << Map::kIsUndetectable);
+  __ j(not_zero, &false_result);
+
+  // JavaScript object => true.
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(above_equal, &true_result);
+
+  // String value => false iff empty.
+  __ cmp(ecx, FIRST_NONSTRING_TYPE);
+  __ j(above_equal, &not_string);
+  __ and_(ecx, kStringSizeMask);
+  __ cmp(ecx, kShortStringTag);
+  __ j(not_equal, &true_result);  // Empty string is always short.
+  __ mov(edx, FieldOperand(eax, String::kLengthOffset));
+  __ shr(edx, String::kShortLengthShift);
+  __ j(zero, &false_result);
+  __ jmp(&true_result);
+
+  __ bind(&not_string);
+  // HeapNumber => false iff +0, -0, or NaN.
+  __ cmp(edx, Factory::heap_number_map());
+  __ j(not_equal, &true_result);
+  __ fldz();
+  __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+  __ fucompp();
+  __ push(eax);
+  __ fnstsw_ax();
+  __ sahf();
+  __ pop(eax);
+  __ j(zero, &false_result);
+  // Fall through to |true_result|.
+
+  // Return 1/0 for true/false in eax.
+  __ bind(&true_result);
+  __ mov(eax, 1);
+  __ ret(1 * kPointerSize);
+  __ bind(&false_result);
+  __ mov(eax, 0);
+  __ ret(1 * kPointerSize);
+}
+
+
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+  // Perform fast-case smi code for the operation (eax <op> ebx) and
+  // leave result in register eax.
+
+  // Prepare the smi check of both operands by or'ing them together
+  // before checking against the smi mask.
+  __ mov(ecx, Operand(ebx));
+  __ or_(ecx, Operand(eax));
+
+  switch (op_) {
+    case Token::ADD:
+      __ add(eax, Operand(ebx));  // add optimistically
+      __ j(overflow, slow, not_taken);
+      break;
+
+    case Token::SUB:
+      __ sub(eax, Operand(ebx));  // subtract optimistically
+      __ j(overflow, slow, not_taken);
+      break;
+
+    case Token::DIV:
+    case Token::MOD:
+      // Sign extend eax into edx:eax.
+      __ cdq();
+      // Check for 0 divisor.
+      __ test(ebx, Operand(ebx));
+      __ j(zero, slow, not_taken);
+      break;
+
+    default:
+      // Fall-through to smi check.
+      break;
+  }
+
+  // Perform the actual smi check.
+  ASSERT(kSmiTag == 0);  // adjust zero check if not the case
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(not_zero, slow, not_taken);
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+      // Do nothing here.
+      break;
+
+    case Token::MUL:
+      // If the smi tag is 0 we can just leave the tag on one operand.
+      ASSERT(kSmiTag == 0);  // adjust code below if not the case
+      // Remove tag from one of the operands (but keep sign).
+      __ sar(eax, kSmiTagSize);
+      // Do multiplication.
+      __ imul(eax, Operand(ebx));  // multiplication of smis; result in eax
+      // Go slow on overflows.
+      __ j(overflow, slow, not_taken);
+      // Check for negative zero result.
+      __ NegativeZeroTest(eax, ecx, slow);  // use ecx = x | y
+      break;
+
+    case Token::DIV:
+      // Divide edx:eax by ebx.
+      __ idiv(ebx);
+      // Check for the corner case of dividing the most negative smi
+      // by -1. We cannot use the overflow flag, since it is not set
+      // by idiv instruction.
+      ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+      __ cmp(eax, 0x40000000);
+      __ j(equal, slow);
+      // Check for negative zero result.
+      __ NegativeZeroTest(eax, ecx, slow);  // use ecx = x | y
+      // Check that the remainder is zero.
+      __ test(edx, Operand(edx));
+      __ j(not_zero, slow);
+      // Tag the result and store it in register eax.
+      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+      break;
+
+    case Token::MOD:
+      // Divide edx:eax by ebx.
+      __ idiv(ebx);
+      // Check for negative zero result.
+      __ NegativeZeroTest(edx, ecx, slow);  // use ecx = x | y
+      // Move remainder to register eax.
+      __ mov(eax, Operand(edx));
+      break;
+
+    case Token::BIT_OR:
+      __ or_(eax, Operand(ebx));
+      break;
+
+    case Token::BIT_AND:
+      __ and_(eax, Operand(ebx));
+      break;
+
+    case Token::BIT_XOR:
+      __ xor_(eax, Operand(ebx));
+      break;
+
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR:
+      // Move the second operand into register ecx.
+      __ mov(ecx, Operand(ebx));
+      // Remove tags from operands (but keep sign).
+      __ sar(eax, kSmiTagSize);
+      __ sar(ecx, kSmiTagSize);
+      // Perform the operation.
+      switch (op_) {
+        case Token::SAR:
+          __ sar(eax);
+          // No checks of result necessary
+          break;
+        case Token::SHR:
+          __ shr(eax);
+          // Check that the *unsigned* result fits in a smi.
+          // Neither of the two high-order bits can be set:
+          // - 0x80000000: high bit would be lost when smi tagging.
+          // - 0x40000000: this number would convert to negative when
+          // Smi tagging these two cases can only happen with shifts
+          // by 0 or 1 when handed a valid smi.
+          __ test(eax, Immediate(0xc0000000));
+          __ j(not_zero, slow, not_taken);
+          break;
+        case Token::SHL:
+          __ shl(eax);
+          // Check that the *signed* result fits in a smi.
+          __ cmp(eax, 0xc0000000);
+          __ j(sign, slow, not_taken);
+          break;
+        default:
+          UNREACHABLE();
+      }
+      // Tag the result and store it in register eax.
+      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+      break;
+
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+  Label call_runtime;
+
+  if (flags_ == SMI_CODE_IN_STUB) {
+    // The fast case smi code wasn't inlined in the stub caller
+    // code. Generate it here to speed up common operations.
+    Label slow;
+    __ mov(ebx, Operand(esp, 1 * kPointerSize));  // get y
+    __ mov(eax, Operand(esp, 2 * kPointerSize));  // get x
+    GenerateSmiCode(masm, &slow);
+    __ ret(2 * kPointerSize);  // remove both operands
+
+    // Too bad. The fast case smi code didn't succeed.
+    __ bind(&slow);
+  }
+
+  // Setup registers.
+  __ mov(eax, Operand(esp, 1 * kPointerSize));  // get y
+  __ mov(edx, Operand(esp, 2 * kPointerSize));  // get x
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      // eax: y
+      // edx: x
+
+      if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
+        CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
+        FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
+
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        // Allocate a heap number, if needed.
+        Label skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+            __ mov(eax, Operand(edx));
+            // Fall through!
+          case OVERWRITE_RIGHT:
+            // If the argument in eax is already an object, we skip the
+            // allocation of a heap number.
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            FloatingPointHelper::AllocateHeapNumber(masm,
+                                                    &call_runtime,
+                                                    ecx,
+                                                    edx,
+                                                    eax);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(2 * kPointerSize);
+
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+        // Allocate a heap number, if needed.
+        Label skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+            __ mov(eax, Operand(edx));
+            // Fall through!
+          case OVERWRITE_RIGHT:
+            // If the argument in eax is already an object, we skip the
+            // allocation of a heap number.
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            FloatingPointHelper::AllocateHeapNumber(masm,
+                                                    &call_runtime,
+                                                    ecx,
+                                                    edx,
+                                                    eax);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        FloatingPointHelper::LoadFloatOperands(masm, ecx);
+
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(2 * kPointerSize);
+      }
+    }
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+      FloatingPointHelper::LoadFloatOperands(masm, ecx);
+
+      Label skip_allocation, non_smi_result, operand_conversion_failure;
+
+      // Reserve space for converted numbers.
+      __ sub(Operand(esp), Immediate(2 * kPointerSize));
+
+      if (use_sse3_) {
+        // Truncate the operands to 32-bit integers and check for
+        // exceptions in doing so.
+        CpuFeatures::Scope scope(CpuFeatures::SSE3);
+        __ fisttp_s(Operand(esp, 0 * kPointerSize));
+        __ fisttp_s(Operand(esp, 1 * kPointerSize));
+        __ fnstsw_ax();
+        __ test(eax, Immediate(1));
+        __ j(not_zero, &operand_conversion_failure);
+      } else {
+        // Check if right operand is int32.
+        __ fist_s(Operand(esp, 0 * kPointerSize));
+        __ fild_s(Operand(esp, 0 * kPointerSize));
+        __ fucompp();
+        __ fnstsw_ax();
+        __ sahf();
+        __ j(not_zero, &operand_conversion_failure);
+        __ j(parity_even, &operand_conversion_failure);
+
+        // Check if left operand is int32.
+        __ fist_s(Operand(esp, 1 * kPointerSize));
+        __ fild_s(Operand(esp, 1 * kPointerSize));
+        __ fucompp();
+        __ fnstsw_ax();
+        __ sahf();
+        __ j(not_zero, &operand_conversion_failure);
+        __ j(parity_even, &operand_conversion_failure);
+      }
+
+      // Get int32 operands and perform bitop.
+      __ pop(ecx);
+      __ pop(eax);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::SAR: __ sar(eax); break;
+        case Token::SHL: __ shl(eax); break;
+        case Token::SHR: __ shr(eax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ test(eax, Immediate(0xc0000000));
+        __ j(not_zero, &non_smi_result);
+      } else {
+        // Check if result fits in a smi.
+        __ cmp(eax, 0xc0000000);
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+      __ ret(2 * kPointerSize);
+
+      // All ops except SHR return a signed int32 that we load in a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ mov(ebx, Operand(eax));  // ebx: result
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+            // allocation of a heap number.
+            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                1 * kPointerSize : 2 * kPointerSize));
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
+                                                    ecx, edx, eax);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        __ mov(Operand(esp, 1 * kPointerSize), ebx);
+        __ fild_s(Operand(esp, 1 * kPointerSize));
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(2 * kPointerSize);
+      }
+
+      // Clear the FPU exception flag and reset the stack before calling
+      // the runtime system.
+      __ bind(&operand_conversion_failure);
+      __ add(Operand(esp), Immediate(2 * kPointerSize));
+      if (use_sse3_) {
+        // If we've used the SSE3 instructions for truncating the
+        // floating point values to integers and it failed, we have a
+        // pending #IA exception. Clear it.
+        __ fnclex();
+      } else {
+        // The non-SSE3 variant does early bailout if the right
+        // operand isn't a 32-bit integer, so we may have a single
+        // value on the FPU stack we need to get rid of.
+        __ ffree(0);
+      }
+
+      // SHR should return uint32 - go to runtime for non-smi/negative result.
+      if (op_ == Token::SHR) {
+        __ bind(&non_smi_result);
+      }
+      __ mov(eax, Operand(esp, 1 * kPointerSize));
+      __ mov(edx, Operand(esp, 2 * kPointerSize));
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+
+  // If all else fails, use the runtime system to get the correct
+  // result.
+  __ bind(&call_runtime);
+  switch (op_) {
+    case Token::ADD: {
+      // Test for string arguments before calling runtime.
+      Label not_strings, both_strings, not_string1, string1;
+      Result answer;
+      __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
+      __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
+      __ test(eax, Immediate(kSmiTagMask));
+      __ j(zero, &not_string1);
+      __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax);
+      __ j(above_equal, &not_string1);
+
+      // First argument is a a string, test second.
+      __ test(edx, Immediate(kSmiTagMask));
+      __ j(zero, &string1);
+      __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+      __ j(above_equal, &string1);
+
+      // First and second argument are strings.
+      __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+
+      // Only first argument is a string.
+      __ bind(&string1);
+      __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+      // First argument was not a string, test second.
+      __ bind(&not_string1);
+      __ test(edx, Immediate(kSmiTagMask));
+      __ j(zero, &not_strings);
+      __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+      __ j(above_equal, &not_strings);
+
+      // Only second argument is a string.
+      __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+      __ bind(&not_strings);
+      // Neither argument is a string.
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    }
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+        break;
+    case Token::DIV:
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
+                                             Label* need_gc,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Register result) {
+  // Allocate heap number in new space.
+  __ AllocateInNewSpace(HeapNumber::kSize,
+                        result,
+                        scratch1,
+                        scratch2,
+                        need_gc,
+                        TAG_OBJECT);
+
+  // Set the map.
+  __ mov(FieldOperand(result, HeapObject::kMapOffset),
+         Immediate(Factory::heap_number_map()));
+}
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+                                           Register number) {
+  Label load_smi, done;
+
+  __ test(number, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi, not_taken);
+  __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi);
+  __ sar(number, kSmiTagSize);
+  __ push(number);
+  __ fild_s(Operand(esp, 0));
+  __ pop(number);
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
+                                           Label* not_numbers) {
+  Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+  // Load operand in edx into xmm0, or branch to not_numbers.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
+  __ j(not_equal, not_numbers);  // Argument in edx is not a number.
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+  __ bind(&load_eax);
+  // Load operand in eax into xmm1, or branch to not_numbers.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_eax, not_taken);  // Argument in eax is a smi.
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
+  __ j(equal, &load_float_eax);
+  __ jmp(not_numbers);  // Argument in eax is not a number.
+  __ bind(&load_smi_edx);
+  __ sar(edx, 1);  // Untag smi before converting to float.
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ shl(edx, 1);  // Retag smi for heap number overwriting test.
+  __ jmp(&load_eax);
+  __ bind(&load_smi_eax);
+  __ sar(eax, 1);  // Untag smi before converting to float.
+  __ cvtsi2sd(xmm1, Operand(eax));
+  __ shl(eax, 1);  // Retag smi for heap number overwriting test.
+  __ jmp(&done);
+  __ bind(&load_float_eax);
+  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+                                            Register scratch) {
+  Label load_smi_1, load_smi_2, done_load_1, done;
+  __ mov(scratch, Operand(esp, 2 * kPointerSize));
+  __ test(scratch, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_1, not_taken);
+  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+  __ bind(&done_load_1);
+
+  __ mov(scratch, Operand(esp, 1 * kPointerSize));
+  __ test(scratch, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_2, not_taken);
+  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi_1);
+  __ sar(scratch, kSmiTagSize);
+  __ push(scratch);
+  __ fild_s(Operand(esp, 0));
+  __ pop(scratch);
+  __ jmp(&done_load_1);
+
+  __ bind(&load_smi_2);
+  __ sar(scratch, kSmiTagSize);
+  __ push(scratch);
+  __ fild_s(Operand(esp, 0));
+  __ pop(scratch);
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+                                             Label* non_float,
+                                             Register scratch) {
+  Label test_other, done;
+  // Test if both operands are floats or smi -> scratch=k_is_float;
+  // Otherwise scratch = k_not_float.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &test_other, not_taken);  // argument in edx is OK
+  __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
+  __ cmp(scratch, Factory::heap_number_map());
+  __ j(not_equal, non_float);  // argument in edx is not a number -> NaN
+
+  __ bind(&test_other);
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &done);  // argument in eax is OK
+  __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
+  __ cmp(scratch, Factory::heap_number_map());
+  __ j(not_equal, non_float);  // argument in eax is not a number -> NaN
+
+  // Fall-through: Both operands are numbers.
+  __ bind(&done);
+}
+
+
+void UnarySubStub::Generate(MacroAssembler* masm) {
+  Label undo;
+  Label slow;
+  Label done;
+  Label try_float;
+
+  // Check whether the value is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &try_float, not_taken);
+
+  // Enter runtime system if the value of the expression is zero
+  // to make sure that we switch between 0 and -0.
+  __ test(eax, Operand(eax));
+  __ j(zero, &slow, not_taken);
+
+  // The value of the expression is a smi that is not zero.  Try
+  // optimistic subtraction '0 - value'.
+  __ mov(edx, Operand(eax));
+  __ Set(eax, Immediate(0));
+  __ sub(eax, Operand(edx));
+  __ j(overflow, &undo, not_taken);
+
+  // If result is a smi we are done.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &done, taken);
+
+  // Restore eax and enter runtime system.
+  __ bind(&undo);
+  __ mov(eax, Operand(edx));
+
+  // Enter runtime system.
+  __ bind(&slow);
+  __ pop(ecx);  // pop return address
+  __ push(eax);
+  __ push(ecx);  // push return address
+  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+
+  // Try floating point case.
+  __ bind(&try_float);
+  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ cmp(edx, Factory::heap_number_map());
+  __ j(not_equal, &slow);
+  if (overwrite_) {
+    __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+    __ xor_(edx, HeapNumber::kSignMask);  // Flip sign.
+    __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+  } else {
+    __ mov(edx, Operand(eax));
+    // edx: operand
+    FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx, eax);
+    // eax: allocated 'empty' number
+    __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+    __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
+    __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+    __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+    __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+  }
+
+  __ bind(&done);
+
+  __ StubReturn(1);
+}
+
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adaptor);
+
+  // Nothing to do: The formal number of parameters has already been
+  // passed in register eax by calling function. Just return it.
+  __ ret(0);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame and return it.
+  __ bind(&adaptor);
+  __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ ret(0);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+  // The key is in edx and the parameter count is in eax.
+
+  // The displacement is used for skipping the frame pointer on the
+  // stack. It is the offset of the last parameter (if any) relative
+  // to the frame pointer.
+  static const int kDisplacement = 1 * kPointerSize;
+
+  // Check that the key is a smi.
+  Label slow;
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow, not_taken);
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adaptor);
+
+  // Check index against formal parameters count limit passed in
+  // through register eax. Use unsigned comparison to get negative
+  // check for free.
+  __ cmp(edx, Operand(eax));
+  __ j(above_equal, &slow, not_taken);
+
+  // Read the argument from the stack and return it.
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);  // shifting code depends on this
+  __ lea(ebx, Operand(ebp, eax, times_2, 0));
+  __ neg(edx);
+  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+  __ ret(0);
+
+  // Arguments adaptor case: Check index against actual arguments
+  // limit found in the arguments adaptor frame. Use unsigned
+  // comparison to get negative check for free.
+  __ bind(&adaptor);
+  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ cmp(edx, Operand(ecx));
+  __ j(above_equal, &slow, not_taken);
+
+  // Read the argument from the stack and return it.
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);  // shifting code depends on this
+  __ lea(ebx, Operand(ebx, ecx, times_2, 0));
+  __ neg(edx);
+  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+  __ ret(0);
+
+  // Slow-case: Handle non-smi or out-of-bounds access to arguments
+  // by calling the runtime system.
+  __ bind(&slow);
+  __ pop(ebx);  // Return address.
+  __ push(edx);
+  __ push(ebx);
+  __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+  // The displacement is used for skipping the return address and the
+  // frame pointer on the stack. It is the offset of the last
+  // parameter (if any) relative to the frame pointer.
+  static const int kDisplacement = 2 * kPointerSize;
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &runtime);
+
+  // Patch the arguments.length and the parameters pointer.
+  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ mov(Operand(esp, 1 * kPointerSize), ecx);
+  __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
+  __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
+}
+
+
+void CompareStub::Generate(MacroAssembler* masm) {
+  Label call_builtin, done;
+
+  // NOTICE! This code is only reached after a smi-fast-case check, so
+  // it is certain that at least one operand isn't a smi.
+
+  if (cc_ == equal) {  // Both strict and non-strict.
+    Label slow;  // Fallthrough label.
+    // Equality is almost reflexive (everything but NaN), so start by testing
+    // for "identity and not NaN".
+    {
+      Label not_identical;
+      __ cmp(eax, Operand(edx));
+      __ j(not_equal, &not_identical);
+      // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+      // so we do the second best thing - test it ourselves.
+
+      Label return_equal;
+      Label heap_number;
+      // If it's not a heap number, then return equal.
+      __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+             Immediate(Factory::heap_number_map()));
+      __ j(equal, &heap_number);
+      __ bind(&return_equal);
+      __ Set(eax, Immediate(0));
+      __ ret(0);
+
+      __ bind(&heap_number);
+      // It is a heap number, so return non-equal if it's NaN and equal if it's
+      // not NaN.
+      // The representation of NaN values has all exponent bits (52..62) set,
+      // and not all mantissa bits (0..51) clear.
+      // Read top bits of double representation (second word of value).
+      __ mov(eax, FieldOperand(edx, HeapNumber::kExponentOffset));
+      // Test that exponent bits are all set.
+      __ not_(eax);
+      __ test(eax, Immediate(0x7ff00000));
+      __ j(not_zero, &return_equal);
+      __ not_(eax);
+
+      // Shift out flag and all exponent bits, retaining only mantissa.
+      __ shl(eax, 12);
+      // Or with all low-bits of mantissa.
+      __ or_(eax, FieldOperand(edx, HeapNumber::kMantissaOffset));
+      // Return zero equal if all bits in mantissa is zero (it's an Infinity)
+      // and non-zero if not (it's a NaN).
+      __ ret(0);
+
+      __ bind(&not_identical);
+    }
+
+    // If we're doing a strict equality comparison, we don't have to do
+    // type conversion, so we generate code to do fast comparison for objects
+    // and oddballs. Non-smi numbers and strings still go through the usual
+    // slow-case code.
+    if (strict_) {
+      // If either is a Smi (we know that not both are), then they can only
+      // be equal if the other is a HeapNumber. If so, use the slow case.
+      {
+        Label not_smis;
+        ASSERT_EQ(0, kSmiTag);
+        ASSERT_EQ(0, Smi::FromInt(0));
+        __ mov(ecx, Immediate(kSmiTagMask));
+        __ and_(ecx, Operand(eax));
+        __ test(ecx, Operand(edx));
+        __ j(not_zero, &not_smis);
+        // One operand is a smi.
+
+        // Check whether the non-smi is a heap number.
+        ASSERT_EQ(1, kSmiTagMask);
+        // ecx still holds eax & kSmiTag, which is either zero or one.
+        __ sub(Operand(ecx), Immediate(0x01));
+        __ mov(ebx, edx);
+        __ xor_(ebx, Operand(eax));
+        __ and_(ebx, Operand(ecx));  // ebx holds either 0 or eax ^ edx.
+        __ xor_(ebx, Operand(eax));
+        // if eax was smi, ebx is now edx, else eax.
+
+        // Check if the non-smi operand is a heap number.
+        __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+               Immediate(Factory::heap_number_map()));
+        // If heap number, handle it in the slow case.
+        __ j(equal, &slow);
+        // Return non-equal (ebx is not zero)
+        __ mov(eax, ebx);
+        __ ret(0);
+
+        __ bind(&not_smis);
+      }
+
+      // If either operand is a JSObject or an oddball value, then they are not
+      // equal since their pointers are different
+      // There is no test for undetectability in strict equality.
+
+      // Get the type of the first operand.
+      __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+      __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+
+      // If the first object is a JS object, we have done pointer comparison.
+      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+      Label first_non_object;
+      __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+      __ j(less, &first_non_object);
+
+      // Return non-zero (eax is not zero)
+      Label return_not_equal;
+      ASSERT(kHeapObjectTag != 0);
+      __ bind(&return_not_equal);
+      __ ret(0);
+
+      __ bind(&first_non_object);
+      // Check for oddballs: true, false, null, undefined.
+      __ cmp(ecx, ODDBALL_TYPE);
+      __ j(equal, &return_not_equal);
+
+      __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+      __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+
+      __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+      __ j(greater_equal, &return_not_equal);
+
+      // Check for oddballs: true, false, null, undefined.
+      __ cmp(ecx, ODDBALL_TYPE);
+      __ j(equal, &return_not_equal);
+
+      // Fall through to the general case.
+    }
+    __ bind(&slow);
+  }
+
+  // Push arguments below the return address.
+  __ pop(ecx);
+  __ push(eax);
+  __ push(edx);
+  __ push(ecx);
+
+  // Inlined floating point compare.
+  // Call builtin if operands are not floating point or smi.
+  Label check_for_symbols;
+  Label unordered;
+  if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
+    CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
+    CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
+
+    FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
+    __ comisd(xmm0, xmm1);
+
+    // Jump to builtin for NaN.
+    __ j(parity_even, &unordered, not_taken);
+    __ mov(eax, 0);  // equal
+    __ mov(ecx, Immediate(Smi::FromInt(1)));
+    __ cmov(above, eax, Operand(ecx));
+    __ mov(ecx, Immediate(Smi::FromInt(-1)));
+    __ cmov(below, eax, Operand(ecx));
+    __ ret(2 * kPointerSize);
+  } else {
+    FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
+    FloatingPointHelper::LoadFloatOperands(masm, ecx);
+    __ FCmp();
+
+    // Jump to builtin for NaN.
+    __ j(parity_even, &unordered, not_taken);
+
+    Label below_lbl, above_lbl;
+    // Return a result of -1, 0, or 1, to indicate result of comparison.
+    __ j(below, &below_lbl, not_taken);
+    __ j(above, &above_lbl, not_taken);
+
+    __ xor_(eax, Operand(eax));  // equal
+    // Both arguments were pushed in case a runtime call was needed.
+    __ ret(2 * kPointerSize);
+
+    __ bind(&below_lbl);
+    __ mov(eax, Immediate(Smi::FromInt(-1)));
+    __ ret(2 * kPointerSize);
+
+    __ bind(&above_lbl);
+    __ mov(eax, Immediate(Smi::FromInt(1)));
+    __ ret(2 * kPointerSize);  // eax, edx were pushed
+  }
+  // If one of the numbers was NaN, then the result is always false.
+  // The cc is never not-equal.
+  __ bind(&unordered);
+  ASSERT(cc_ != not_equal);
+  if (cc_ == less || cc_ == less_equal) {
+    __ mov(eax, Immediate(Smi::FromInt(1)));
+  } else {
+    __ mov(eax, Immediate(Smi::FromInt(-1)));
+  }
+  __ ret(2 * kPointerSize);  // eax, edx were pushed
+
+  // Fast negative check for symbol-to-symbol equality.
+  __ bind(&check_for_symbols);
+  if (cc_ == equal) {
+    BranchIfNonSymbol(masm, &call_builtin, eax, ecx);
+    BranchIfNonSymbol(masm, &call_builtin, edx, ecx);
+
+    // We've already checked for object identity, so if both operands
+    // are symbols they aren't equal. Register eax already holds a
+    // non-zero value, which indicates not equal, so just return.
+    __ ret(2 * kPointerSize);
+  }
+
+  __ bind(&call_builtin);
+  // must swap argument order
+  __ pop(ecx);
+  __ pop(edx);
+  __ pop(eax);
+  __ push(edx);
+  __ push(eax);
+
+  // Figure out which native to call and setup the arguments.
+  Builtins::JavaScript builtin;
+  if (cc_ == equal) {
+    builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+  } else {
+    builtin = Builtins::COMPARE;
+    int ncr;  // NaN compare result
+    if (cc_ == less || cc_ == less_equal) {
+      ncr = GREATER;
+    } else {
+      ASSERT(cc_ == greater || cc_ == greater_equal);  // remaining cases
+      ncr = LESS;
+    }
+    __ push(Immediate(Smi::FromInt(ncr)));
+  }
+
+  // Restore return address on the stack.
+  __ push(ecx);
+
+  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+                                    Label* label,
+                                    Register object,
+                                    Register scratch) {
+  __ test(object, Immediate(kSmiTagMask));
+  __ j(zero, label);
+  __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+  __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
+  __ cmp(scratch, kSymbolTag | kStringTag);
+  __ j(not_equal, label);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+  // Because builtins always remove the receiver from the stack, we
+  // have to fake one to avoid underflowing the stack. The receiver
+  // must be inserted below the return address on the stack so we
+  // temporarily store that in a register.
+  __ pop(eax);
+  __ push(Immediate(Smi::FromInt(0)));
+  __ push(eax);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+  Label slow;
+
+  // Get the function to call from the stack.
+  // +2 ~ receiver, return address
+  __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
+
+  // Check that the function really is a JavaScript function.
+  __ test(edi, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+  // Goto slow case if we do not have a function.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &slow, not_taken);
+
+  // Fast-case: Just invoke the function.
+  ParameterCount actual(argc_);
+  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+  // Slow-case: Non-function called.
+  __ bind(&slow);
+  __ Set(eax, Immediate(argc_));
+  __ Set(ebx, Immediate(0));
+  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+  Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+  __ jmp(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+int CEntryStub::MinorKey() {
+  ASSERT(result_size_ <= 2);
+  // Result returned in eax, or eax+edx if result_size_ is 2.
+  return 0;
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  // eax holds the exception.
+
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop the sp to the top of the handler.
+  ExternalReference handler_address(Top::k_handler_address);
+  __ mov(esp, Operand::StaticVariable(handler_address));
+
+  // Restore next handler and frame pointer, discard handler state.
+  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  __ pop(Operand::StaticVariable(handler_address));
+  ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+  __ pop(ebp);
+  __ pop(edx);  // Remove state.
+
+  // Before returning we restore the context from the frame pointer if
+  // not NULL.  The frame pointer is NULL in the exception handler of
+  // a JS entry frame.
+  __ xor_(esi, Operand(esi));  // Tentatively set context pointer to NULL.
+  Label skip;
+  __ cmp(ebp, 0);
+  __ j(equal, &skip, not_taken);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ bind(&skip);
+
+  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  __ ret(0);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+                              Label* throw_normal_exception,
+                              Label* throw_termination_exception,
+                              Label* throw_out_of_memory_exception,
+                              StackFrame::Type frame_type,
+                              bool do_gc,
+                              bool always_allocate_scope) {
+  // eax: result parameter for PerformGC, if any
+  // ebx: pointer to C function  (C callee-saved)
+  // ebp: frame pointer  (restored after C call)
+  // esp: stack pointer  (restored after C call)
+  // edi: number of arguments including receiver  (C callee-saved)
+  // esi: pointer to the first argument (C callee-saved)
+
+  if (do_gc) {
+    __ mov(Operand(esp, 0 * kPointerSize), eax);  // Result.
+    __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
+  }
+
+  ExternalReference scope_depth =
+      ExternalReference::heap_always_allocate_scope_depth();
+  if (always_allocate_scope) {
+    __ inc(Operand::StaticVariable(scope_depth));
+  }
+
+  // Call C function.
+  __ mov(Operand(esp, 0 * kPointerSize), edi);  // argc.
+  __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
+  __ call(Operand(ebx));
+  // Result is in eax or edx:eax - do not destroy these registers!
+
+  if (always_allocate_scope) {
+    __ dec(Operand::StaticVariable(scope_depth));
+  }
+
+  // Make sure we're not trying to return 'the hole' from the runtime
+  // call as this may lead to crashes in the IC code later.
+  if (FLAG_debug_code) {
+    Label okay;
+    __ cmp(eax, Factory::the_hole_value());
+    __ j(not_equal, &okay);
+    __ int3();
+    __ bind(&okay);
+  }
+
+  // Check for failure result.
+  Label failure_returned;
+  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  __ lea(ecx, Operand(eax, 1));
+  // Lower 2 bits of ecx are 0 iff eax has failure tag.
+  __ test(ecx, Immediate(kFailureTagMask));
+  __ j(zero, &failure_returned, not_taken);
+
+  // Exit the JavaScript to C++ exit frame.
+  __ LeaveExitFrame(frame_type);
+  __ ret(0);
+
+  // Handling of failure.
+  __ bind(&failure_returned);
+
+  Label retry;
+  // If the returned exception is RETRY_AFTER_GC continue at retry label
+  ASSERT(Failure::RETRY_AFTER_GC == 0);
+  __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+  __ j(zero, &retry, taken);
+
+  // Special handling of out of memory exceptions.
+  __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+  __ j(equal, throw_out_of_memory_exception);
+
+  // Retrieve the pending exception and clear the variable.
+  ExternalReference pending_exception_address(Top::k_pending_exception_address);
+  __ mov(eax, Operand::StaticVariable(pending_exception_address));
+  __ mov(edx,
+         Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+  __ mov(Operand::StaticVariable(pending_exception_address), edx);
+
+  // Special handling of termination exceptions which are uncatchable
+  // by javascript code.
+  __ cmp(eax, Factory::termination_exception());
+  __ j(equal, throw_termination_exception);
+
+  // Handle normal exception.
+  __ jmp(throw_normal_exception);
+
+  // Retry.
+  __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+                                          UncatchableExceptionType type) {
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop sp to the top stack handler.
+  ExternalReference handler_address(Top::k_handler_address);
+  __ mov(esp, Operand::StaticVariable(handler_address));
+
+  // Unwind the handlers until the ENTRY handler is found.
+  Label loop, done;
+  __ bind(&loop);
+  // Load the type of the current stack handler.
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
+  __ j(equal, &done);
+  // Fetch the next handler in the list.
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  __ mov(esp, Operand(esp, kNextOffset));
+  __ jmp(&loop);
+  __ bind(&done);
+
+  // Set the top handler address to next handler past the current ENTRY handler.
+  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  __ pop(Operand::StaticVariable(handler_address));
+
+  if (type == OUT_OF_MEMORY) {
+    // Set external caught exception to false.
+    ExternalReference external_caught(Top::k_external_caught_exception_address);
+    __ mov(eax, false);
+    __ mov(Operand::StaticVariable(external_caught), eax);
+
+    // Set pending exception and eax to out of memory exception.
+    ExternalReference pending_exception(Top::k_pending_exception_address);
+    __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+    __ mov(Operand::StaticVariable(pending_exception), eax);
+  }
+
+  // Clear the context pointer.
+  __ xor_(esi, Operand(esi));
+
+  // Restore fp from handler and discard handler state.
+  ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+  __ pop(ebp);
+  __ pop(edx);  // State.
+
+  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  __ ret(0);
+}
+
+
+void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+  // eax: number of arguments including receiver
+  // ebx: pointer to C function  (C callee-saved)
+  // ebp: frame pointer  (restored after C call)
+  // esp: stack pointer  (restored after C call)
+  // esi: current context (C callee-saved)
+  // edi: JS function of the caller (C callee-saved)
+
+  // NOTE: Invocations of builtins may return failure objects instead
+  // of a proper result. The builtin entry handles this by performing
+  // a garbage collection and retrying the builtin (twice).
+
+  StackFrame::Type frame_type = is_debug_break ?
+      StackFrame::EXIT_DEBUG :
+      StackFrame::EXIT;
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  __ EnterExitFrame(frame_type);
+
+  // eax: result parameter for PerformGC, if any (setup below)
+  // ebx: pointer to builtin function  (C callee-saved)
+  // ebp: frame pointer  (restored after C call)
+  // esp: stack pointer  (restored after C call)
+  // edi: number of arguments including receiver (C callee-saved)
+  // esi: argv pointer (C callee-saved)
+
+  Label throw_normal_exception;
+  Label throw_termination_exception;
+  Label throw_out_of_memory_exception;
+
+  // Call into the runtime system.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               false,
+               false);
+
+  // Do space-specific GC and retry runtime call.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               true,
+               false);
+
+  // Do full GC and retry runtime call one final time.
+  Failure* failure = Failure::InternalError();
+  __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               true,
+               true);
+
+  __ bind(&throw_out_of_memory_exception);
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+  __ bind(&throw_termination_exception);
+  GenerateThrowUncatchable(masm, TERMINATION);
+
+  __ bind(&throw_normal_exception);
+  GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+  Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  Label not_outermost_js, not_outermost_js_2;
+#endif
+
+  // Setup frame.
+  __ push(ebp);
+  __ mov(ebp, Operand(esp));
+
+  // Push marker in two places.
+  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  __ push(Immediate(Smi::FromInt(marker)));  // context slot
+  __ push(Immediate(Smi::FromInt(marker)));  // function slot
+  // Save callee-saved registers (C calling conventions).
+  __ push(edi);
+  __ push(esi);
+  __ push(ebx);
+
+  // Save copies of the top frame descriptor on the stack.
+  ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+  __ push(Operand::StaticVariable(c_entry_fp));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If this is the outermost JS call, set js_entry_sp value.
+  ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+  __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
+  __ j(not_equal, &not_outermost_js);
+  __ mov(Operand::StaticVariable(js_entry_sp), ebp);
+  __ bind(&not_outermost_js);
+#endif
+
+  // Call a faked try-block that does the invoke.
+  __ call(&invoke);
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  ExternalReference pending_exception(Top::k_pending_exception_address);
+  __ mov(Operand::StaticVariable(pending_exception), eax);
+  __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
+  __ jmp(&exit);
+
+  // Invoke: Link this frame into the handler chain.
+  __ bind(&invoke);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+
+  // Clear any pending exceptions.
+  __ mov(edx,
+         Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+  __ mov(Operand::StaticVariable(pending_exception), edx);
+
+  // Fake a receiver (NULL).
+  __ push(Immediate(0));  // receiver
+
+  // Invoke the function by calling through JS entry trampoline
+  // builtin and pop the faked function when we return. Notice that we
+  // cannot store a reference to the trampoline code directly in this
+  // stub, because the builtin stubs may not have been generated yet.
+  if (is_construct) {
+    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+    __ mov(edx, Immediate(construct_entry));
+  } else {
+    ExternalReference entry(Builtins::JSEntryTrampoline);
+    __ mov(edx, Immediate(entry));
+  }
+  __ mov(edx, Operand(edx, 0));  // deref address
+  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+  __ call(Operand(edx));
+
+  // Unlink this frame from the handler chain.
+  __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+  // Pop next_sp.
+  __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If current EBP value is the same as js_entry_sp value, it means that
+  // the current function is the outermost.
+  __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
+  __ j(not_equal, &not_outermost_js_2);
+  __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
+  __ bind(&not_outermost_js_2);
+#endif
+
+  // Restore the top frame descriptor from the stack.
+  __ bind(&exit);
+  __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
+
+  // Restore callee-saved registers (C calling conventions).
+  __ pop(ebx);
+  __ pop(esi);
+  __ pop(edi);
+  __ add(Operand(esp), Immediate(2 * kPointerSize));  // remove markers
+
+  // Restore frame pointer and return.
+  __ pop(ebp);
+  __ ret(0);
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+  // Get the object - go slow case if it's a smi.
+  Label slow;
+  __ mov(eax, Operand(esp, 2 * kPointerSize));  // 2 ~ return address, function
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+
+  // Check that the left hand is a JS object.
+  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));  // eax - object map
+  __ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset));  // ecx - type
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(less, &slow, not_taken);
+  __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+  __ j(greater, &slow, not_taken);
+
+  // Get the prototype of the function.
+  __ mov(edx, Operand(esp, 1 * kPointerSize));  // 1 ~ return address
+  __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
+
+  // Check that the function prototype is a JS object.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+  __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(less, &slow, not_taken);
+  __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+  __ j(greater, &slow, not_taken);
+
+  // Register mapping: eax is object map and ebx is function prototype.
+  __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
+
+  // Loop through the prototype chain looking for the function prototype.
+  Label loop, is_instance, is_not_instance;
+  __ bind(&loop);
+  __ cmp(ecx, Operand(ebx));
+  __ j(equal, &is_instance);
+  __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+  __ j(equal, &is_not_instance);
+  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+  __ jmp(&loop);
+
+  __ bind(&is_instance);
+  __ Set(eax, Immediate(0));
+  __ ret(2 * kPointerSize);
+
+  __ bind(&is_not_instance);
+  __ Set(eax, Immediate(Smi::FromInt(1)));
+  __ ret(2 * kPointerSize);
+
+  // Slow-case: Go through the JavaScript implementation.
+  __ bind(&slow);
+  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+}
+
+
+int CompareStub::MinorKey() {
+  // Encode the two parameters in a unique 16 bit value.
+  ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
+  return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
+}
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
new file mode 100644
index 0000000..142a5a1
--- /dev/null
+++ b/src/ia32/codegen-ia32.h
@@ -0,0 +1,664 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_CODEGEN_IA32_H_
+#define V8_IA32_CODEGEN_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations
+class DeferredCode;
+class RegisterAllocator;
+class RegisterFile;
+
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+// -------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
+class Reference BASE_EMBEDDED {
+ public:
+  // The values of the types is important, see size().
+  enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+  Reference(CodeGenerator* cgen, Expression* expression);
+  ~Reference();
+
+  Expression* expression() const { return expression_; }
+  Type type() const { return type_; }
+  void set_type(Type value) {
+    ASSERT(type_ == ILLEGAL);
+    type_ = value;
+  }
+
+  // The size the reference takes up on the stack.
+  int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+
+  bool is_illegal() const { return type_ == ILLEGAL; }
+  bool is_slot() const { return type_ == SLOT; }
+  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+
+  // Return the name.  Only valid for named property references.
+  Handle<String> GetName();
+
+  // Generate code to push the value of the reference on top of the
+  // expression stack.  The reference is expected to be already on top of
+  // the expression stack, and it is left in place with its value above it.
+  void GetValue(TypeofState typeof_state);
+
+  // Like GetValue except that the slot is expected to be written to before
+  // being read from again.  Thae value of the reference may be invalidated,
+  // causing subsequent attempts to read it to fail.
+  void TakeValue(TypeofState typeof_state);
+
+  // Generate code to store the value on top of the expression stack in the
+  // reference.  The reference is expected to be immediately below the value
+  // on the expression stack.  The stored value is left in place (with the
+  // reference intact below it) to support chained assignments.
+  void SetValue(InitState init_state);
+
+ private:
+  CodeGenerator* cgen_;
+  Expression* expression_;
+  Type type_;
+};
+
+
+// -------------------------------------------------------------------------
+// Control destinations.
+
+// A control destination encapsulates a pair of jump targets and a
+// flag indicating which one is the preferred fall-through.  The
+// preferred fall-through must be unbound, the other may be already
+// bound (ie, a backward target).
+//
+// The true and false targets may be jumped to unconditionally or
+// control may split conditionally.  Unconditional jumping and
+// splitting should be emitted in tail position (as the last thing
+// when compiling an expression) because they can cause either label
+// to be bound or the non-fall through to be jumped to leaving an
+// invalid virtual frame.
+//
+// The labels in the control destination can be extracted and
+// manipulated normally without affecting the state of the
+// destination.
+
+class ControlDestination BASE_EMBEDDED {
+ public:
+  ControlDestination(JumpTarget* true_target,
+                     JumpTarget* false_target,
+                     bool true_is_fall_through)
+      : true_target_(true_target),
+        false_target_(false_target),
+        true_is_fall_through_(true_is_fall_through),
+        is_used_(false) {
+    ASSERT(true_is_fall_through ? !true_target->is_bound()
+                                : !false_target->is_bound());
+  }
+
+  // Accessors for the jump targets.  Directly jumping or branching to
+  // or binding the targets will not update the destination's state.
+  JumpTarget* true_target() const { return true_target_; }
+  JumpTarget* false_target() const { return false_target_; }
+
+  // True if the the destination has been jumped to unconditionally or
+  // control has been split to both targets.  This predicate does not
+  // test whether the targets have been extracted and manipulated as
+  // raw jump targets.
+  bool is_used() const { return is_used_; }
+
+  // True if the destination is used and the true target (respectively
+  // false target) was the fall through.  If the target is backward,
+  // "fall through" included jumping unconditionally to it.
+  bool true_was_fall_through() const {
+    return is_used_ && true_is_fall_through_;
+  }
+
+  bool false_was_fall_through() const {
+    return is_used_ && !true_is_fall_through_;
+  }
+
+  // Emit a branch to one of the true or false targets, and bind the
+  // other target.  Because this binds the fall-through target, it
+  // should be emitted in tail position (as the last thing when
+  // compiling an expression).
+  void Split(Condition cc) {
+    ASSERT(!is_used_);
+    if (true_is_fall_through_) {
+      false_target_->Branch(NegateCondition(cc));
+      true_target_->Bind();
+    } else {
+      true_target_->Branch(cc);
+      false_target_->Bind();
+    }
+    is_used_ = true;
+  }
+
+  // Emit an unconditional jump in tail position, to the true target
+  // (if the argument is true) or the false target.  The "jump" will
+  // actually bind the jump target if it is forward, jump to it if it
+  // is backward.
+  void Goto(bool where) {
+    ASSERT(!is_used_);
+    JumpTarget* target = where ? true_target_ : false_target_;
+    if (target->is_bound()) {
+      target->Jump();
+    } else {
+      target->Bind();
+    }
+    is_used_ = true;
+    true_is_fall_through_ = where;
+  }
+
+  // Mark this jump target as used as if Goto had been called, but
+  // without generating a jump or binding a label (the control effect
+  // should have already happened).  This is used when the left
+  // subexpression of the short-circuit boolean operators are
+  // compiled.
+  void Use(bool where) {
+    ASSERT(!is_used_);
+    ASSERT((where ? true_target_ : false_target_)->is_bound());
+    is_used_ = true;
+    true_is_fall_through_ = where;
+  }
+
+  // Swap the true and false targets but keep the same actual label as
+  // the fall through.  This is used when compiling negated
+  // expressions, where we want to swap the targets but preserve the
+  // state.
+  void Invert() {
+    JumpTarget* temp_target = true_target_;
+    true_target_ = false_target_;
+    false_target_ = temp_target;
+
+    true_is_fall_through_ = !true_is_fall_through_;
+  }
+
+ private:
+  // True and false jump targets.
+  JumpTarget* true_target_;
+  JumpTarget* false_target_;
+
+  // Before using the destination: true if the true target is the
+  // preferred fall through, false if the false target is.  After
+  // using the destination: true if the true target was actually used
+  // as the fall through, false if the false target was.
+  bool true_is_fall_through_;
+
+  // True if the Split or Goto functions have been called.
+  bool is_used_;
+};
+
+
+// -------------------------------------------------------------------------
+// Code generation state
+
+// The state is passed down the AST by the code generator (and back up, in
+// the form of the state of the jump target pair).  It is threaded through
+// the call stack.  Constructing a state implicitly pushes it on the owning
+// code generator's stack of states, and destroying one implicitly pops it.
+//
+// The code generator state is only used for expressions, so statements have
+// the initial state.
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+  // Create an initial code generator state.  Destroying the initial state
+  // leaves the code generator with a NULL state.
+  explicit CodeGenState(CodeGenerator* owner);
+
+  // Create a code generator state based on a code generator's current
+  // state.  The new state may or may not be inside a typeof, and has its
+  // own control destination.
+  CodeGenState(CodeGenerator* owner,
+               TypeofState typeof_state,
+               ControlDestination* destination);
+
+  // Destroy a code generator state and restore the owning code generator's
+  // previous state.
+  ~CodeGenState();
+
+  // Accessors for the state.
+  TypeofState typeof_state() const { return typeof_state_; }
+  ControlDestination* destination() const { return destination_; }
+
+ private:
+  // The owning code generator.
+  CodeGenerator* owner_;
+
+  // A flag indicating whether we are compiling the immediate subexpression
+  // of a typeof expression.
+  TypeofState typeof_state_;
+
+  // A control destination in case the expression has a control-flow
+  // effect.
+  ControlDestination* destination_;
+
+  // The previous state of the owning code generator, restored when
+  // this state is destroyed.
+  CodeGenState* previous_;
+};
+
+
+// -------------------------------------------------------------------------
+// Arguments allocation mode
+
+enum ArgumentsAllocationMode {
+  NO_ARGUMENTS_ALLOCATION,
+  EAGER_ARGUMENTS_ALLOCATION,
+  LAZY_ARGUMENTS_ALLOCATION
+};
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator
+
+class CodeGenerator: public AstVisitor {
+ public:
+  // Takes a function literal, generates code for it. This function should only
+  // be called by compiler.cc.
+  static Handle<Code> MakeCode(FunctionLiteral* fun,
+                               Handle<Script> script,
+                               bool is_eval);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static bool ShouldGenerateLog(Expression* type);
+#endif
+
+  static void SetFunctionInfo(Handle<JSFunction> fun,
+                              FunctionLiteral* lit,
+                              bool is_toplevel,
+                              Handle<Script> script);
+
+  // Accessors
+  MacroAssembler* masm() { return masm_; }
+
+  VirtualFrame* frame() const { return frame_; }
+
+  bool has_valid_frame() const { return frame_ != NULL; }
+
+  // Set the virtual frame to be new_frame, with non-frame register
+  // reference counts given by non_frame_registers.  The non-frame
+  // register reference counts of the old frame are returned in
+  // non_frame_registers.
+  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+  void DeleteFrame();
+
+  RegisterAllocator* allocator() const { return allocator_; }
+
+  CodeGenState* state() { return state_; }
+  void set_state(CodeGenState* state) { state_ = state; }
+
+  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+
+  bool in_spilled_code() const { return in_spilled_code_; }
+  void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+
+ private:
+  // Construction/Destruction
+  CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
+  virtual ~CodeGenerator() { delete masm_; }
+
+  // Accessors
+  Scope* scope() const { return scope_; }
+  bool is_eval() { return is_eval_; }
+
+  // Generating deferred code.
+  void ProcessDeferred();
+
+  // State
+  TypeofState typeof_state() const { return state_->typeof_state(); }
+  ControlDestination* destination() const { return state_->destination(); }
+
+  // Track loop nesting level.
+  int loop_nesting() const { return loop_nesting_; }
+  void IncrementLoopNesting() { loop_nesting_++; }
+  void DecrementLoopNesting() { loop_nesting_--; }
+
+  // Node visitors.
+  void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DEF_VISIT(type) \
+  void Visit##type(type* node);
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+  // Visit a statement and then spill the virtual frame if control flow can
+  // reach the end of the statement (ie, it does not exit via break,
+  // continue, return, or throw).  This function is used temporarily while
+  // the code generator is being transformed.
+  void VisitAndSpill(Statement* statement);
+
+  // Visit a list of statements and then spill the virtual frame if control
+  // flow can reach the end of the list.
+  void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
+
+  // Main code generation function
+  void GenCode(FunctionLiteral* fun);
+
+  // Generate the return sequence code.  Should be called no more than
+  // once per compiled function, immediately after binding the return
+  // target (which can not be done more than once).
+  void GenerateReturnSequence(Result* return_value);
+
+  // Returns the arguments allocation mode.
+  ArgumentsAllocationMode ArgumentsMode() const;
+
+  // Store the arguments object and allocate it if necessary.
+  Result StoreArgumentsObject(bool initial);
+
+  // The following are used by class Reference.
+  void LoadReference(Reference* ref);
+  void UnloadReference(Reference* ref);
+
+  Operand ContextOperand(Register context, int index) const {
+    return Operand(context, Context::SlotOffset(index));
+  }
+
+  Operand SlotOperand(Slot* slot, Register tmp);
+
+  Operand ContextSlotOperandCheckExtensions(Slot* slot,
+                                            Result tmp,
+                                            JumpTarget* slow);
+
+  // Expressions
+  Operand GlobalObject() const {
+    return ContextOperand(esi, Context::GLOBAL_INDEX);
+  }
+
+  void LoadCondition(Expression* x,
+                     TypeofState typeof_state,
+                     ControlDestination* destination,
+                     bool force_control);
+  void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+  void LoadGlobal();
+  void LoadGlobalReceiver();
+
+  // Generate code to push the value of an expression on top of the frame
+  // and then spill the frame fully to memory.  This function is used
+  // temporarily while the code generator is being transformed.
+  void LoadAndSpill(Expression* expression,
+                    TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+
+  // Read a value from a slot and leave it on top of the expression stack.
+  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
+  Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
+                                           TypeofState typeof_state,
+                                           JumpTarget* slow);
+
+  // Store the value on top of the expression stack into a slot, leaving the
+  // value in place.
+  void StoreToSlot(Slot* slot, InitState init_state);
+
+  // Special code for typeof expressions: Unfortunately, we must
+  // be careful when loading the expression in 'typeof'
+  // expressions. We are not allowed to throw reference errors for
+  // non-existing properties of the global object, so we must make it
+  // look like an explicit property access, instead of an access
+  // through the context chain.
+  void LoadTypeofExpression(Expression* x);
+
+  // Translate the value on top of the frame into control flow to the
+  // control destination.
+  void ToBoolean(ControlDestination* destination);
+
+  void GenericBinaryOperation(
+      Token::Value op,
+      SmiAnalysis* type,
+      OverwriteMode overwrite_mode);
+
+  // If possible, combine two constant smi values using op to produce
+  // a smi result, and push it on the virtual frame, all at compile time.
+  // Returns true if it succeeds.  Otherwise it has no effect.
+  bool FoldConstantSmis(Token::Value op, int left, int right);
+
+  // Emit code to perform a binary operation on a constant
+  // smi and a likely smi.  Consumes the Result *operand.
+  void ConstantSmiBinaryOperation(Token::Value op,
+                                  Result* operand,
+                                  Handle<Object> constant_operand,
+                                  SmiAnalysis* type,
+                                  bool reversed,
+                                  OverwriteMode overwrite_mode);
+
+  // Emit code to perform a binary operation on two likely smis.
+  // The code to handle smi arguments is produced inline.
+  // Consumes the Results *left and *right.
+  void LikelySmiBinaryOperation(Token::Value op,
+                                Result* left,
+                                Result* right,
+                                OverwriteMode overwrite_mode);
+
+  void Comparison(Condition cc,
+                  bool strict,
+                  ControlDestination* destination);
+
+  // To prevent long attacker-controlled byte sequences, integer constants
+  // from the JavaScript source are loaded in two parts if they are larger
+  // than 16 bits.
+  static const int kMaxSmiInlinedBits = 16;
+  bool IsUnsafeSmi(Handle<Object> value);
+  // Load an integer constant x into a register target using
+  // at most 16 bits of user-controlled data per assembly operation.
+  void LoadUnsafeSmi(Register target, Handle<Object> value);
+
+  void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+
+  // Use an optimized version of Function.prototype.apply that avoid
+  // allocating the arguments object and just copies the arguments
+  // from the stack.
+  void CallApplyLazy(Property* apply,
+                     Expression* receiver,
+                     VariableProxy* arguments,
+                     int position);
+
+  void CheckStack();
+
+  struct InlineRuntimeLUT {
+    void (CodeGenerator::*method)(ZoneList<Expression*>*);
+    const char* name;
+  };
+
+  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+  bool CheckForInlineRuntimeCall(CallRuntime* node);
+  static bool PatchInlineRuntimeEntry(Handle<String> name,
+                                      const InlineRuntimeLUT& new_entry,
+                                      InlineRuntimeLUT* old_entry);
+
+  Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
+  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+
+  // Declare global variables and functions in the given array of
+  // name/value pairs.
+  void DeclareGlobals(Handle<FixedArray> pairs);
+
+  // Instantiate the function boilerplate.
+  void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+
+  // Support for type checks.
+  void GenerateIsSmi(ZoneList<Expression*>* args);
+  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
+  void GenerateIsArray(ZoneList<Expression*>* args);
+
+  // Support for construct call checks.
+  void GenerateIsConstructCall(ZoneList<Expression*>* args);
+
+  // Support for arguments.length and arguments[?].
+  void GenerateArgumentsLength(ZoneList<Expression*>* args);
+  void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+  // Support for accessing the class and value fields of an object.
+  void GenerateClassOf(ZoneList<Expression*>* args);
+  void GenerateValueOf(ZoneList<Expression*>* args);
+  void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+  // Fast support for charCodeAt(n).
+  void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+
+  // Fast support for object equality testing.
+  void GenerateObjectEquals(ZoneList<Expression*>* args);
+
+  void GenerateLog(ZoneList<Expression*>* args);
+
+  void GenerateGetFramePointer(ZoneList<Expression*>* args);
+
+  // Fast support for Math.random().
+  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+  // Fast support for Math.sin and Math.cos.
+  enum MathOp { SIN, COS };
+  void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
+  inline void GenerateMathSin(ZoneList<Expression*>* args);
+  inline void GenerateMathCos(ZoneList<Expression*>* args);
+
+  // Methods used to indicate which source code is generated for. Source
+  // positions are collected by the assembler and emitted with the relocation
+  // information.
+  void CodeForFunctionPosition(FunctionLiteral* fun);
+  void CodeForReturnPosition(FunctionLiteral* fun);
+  void CodeForStatementPosition(Statement* stmt);
+  void CodeForSourcePosition(int pos);
+
+#ifdef DEBUG
+  // True if the registers are valid for entry to a block.  There should
+  // be no frame-external references to (non-reserved) registers.
+  bool HasValidEntryRegisters();
+#endif
+
+  bool is_eval_;  // Tells whether code is generated for eval.
+  Handle<Script> script_;
+  ZoneList<DeferredCode*> deferred_;
+
+  // Assembler
+  MacroAssembler* masm_;  // to generate code
+
+  // Code generation state
+  Scope* scope_;
+  VirtualFrame* frame_;
+  RegisterAllocator* allocator_;
+  CodeGenState* state_;
+  int loop_nesting_;
+
+  // Jump targets.
+  // The target of the return from the function.
+  BreakTarget function_return_;
+
+  // True if the function return is shadowed (ie, jumping to the target
+  // function_return_ does not jump to the true function return, but rather
+  // to some unlinking code).
+  bool function_return_is_shadowed_;
+
+  // True when we are in code that expects the virtual frame to be fully
+  // spilled.  Some virtual frame function are disabled in DEBUG builds when
+  // called from spilled code, because they do not leave the virtual frame
+  // in a spilled state.
+  bool in_spilled_code_;
+
+  static InlineRuntimeLUT kInlineRuntimeLUT[];
+
+  friend class VirtualFrame;
+  friend class JumpTarget;
+  friend class Reference;
+  friend class Result;
+
+  friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
+
+  DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
+// Flag that indicates whether or not the code that handles smi arguments
+// should be placed in the stub, inlined, or omitted entirely.
+enum GenericBinaryFlags {
+  SMI_CODE_IN_STUB,
+  SMI_CODE_INLINED
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+  GenericBinaryOpStub(Token::Value op,
+                      OverwriteMode mode,
+                      GenericBinaryFlags flags)
+      : op_(op), mode_(mode), flags_(flags) {
+    use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
+    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+  }
+
+  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+
+ private:
+  Token::Value op_;
+  OverwriteMode mode_;
+  GenericBinaryFlags flags_;
+  bool use_sse3_;
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+           Token::String(op_),
+           static_cast<int>(mode_),
+           static_cast<int>(flags_));
+  }
+#endif
+
+  // Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 12> {};
+  class SSE3Bits: public BitField<bool, 14, 1> {};
+  class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+
+  Major MajorKey() { return GenericBinaryOp; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return OpBits::encode(op_)
+           | ModeBits::encode(mode_)
+           | FlagBits::encode(flags_)
+           | SSE3Bits::encode(use_sse3_);
+  }
+  void Generate(MacroAssembler* masm);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
new file mode 100644
index 0000000..2107ad9
--- /dev/null
+++ b/src/ia32/cpu-ia32.cc
@@ -0,0 +1,79 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for ia32 independent of OS goes here.
+
+#ifdef __GNUC__
+#include "third_party/valgrind/valgrind.h"
+#endif
+
+#include "v8.h"
+
+#include "cpu.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::Setup() {
+  CpuFeatures::Probe();
+}
+
+
+void CPU::FlushICache(void* start, size_t size) {
+  // No need to flush the instruction cache on Intel. On Intel instruction
+  // cache flushing is only necessary when multiple cores running the same
+  // code simultaneously. V8 (and JavaScript) is single threaded and when code
+  // is patched on an intel CPU the core performing the patching will have its
+  // own instruction cache updated automatically.
+
+  // If flushing of the instruction cache becomes necessary Windows has the
+  // API function FlushInstructionCache.
+
+  // By default, valgrind only checks the stack for writes that might need to
+  // invalidate already cached translated code.  This leads to random
+  // instability when code patches or moves are sometimes unnoticed.  One
+  // solution is to run valgrind with --smc-check=all, but this comes at a big
+  // performance cost.  We can notify valgrind to invalidate its cache.
+#ifdef VALGRIND_DISCARD_TRANSLATIONS
+  VALGRIND_DISCARD_TRANSLATIONS(start, size);
+#endif
+}
+
+
+void CPU::DebugBreak() {
+#ifdef _MSC_VER
+  // To avoid Visual Studio runtime support the following code can be used
+  // instead
+  // __asm { int 3 }
+  __debugbreak();
+#else
+  asm("int $3");
+#endif
+}
+
+} }  // namespace v8::internal
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
new file mode 100644
index 0000000..7e0dfd1
--- /dev/null
+++ b/src/ia32/debug-ia32.cc
@@ -0,0 +1,208 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+  return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+// Patch the JS frame exit code with a debug break call. See
+// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
+// for the precise return instructions sequence.
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+  ASSERT(Debug::kIa32JSReturnSequenceLength >=
+         Debug::kIa32CallInstructionLength);
+  rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
+      Debug::kIa32JSReturnSequenceLength - Debug::kIa32CallInstructionLength);
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Debug::kIa32JSReturnSequenceLength);
+}
+
+
+// A debug break in the frame exit code is identified by the JS frame exit code
+// having been patched with a call instruction.
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  return rinfo->IsCallInstruction();
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+                                          RegList pointer_regs,
+                                          bool convert_call_to_jmp) {
+  // Save the content of all general purpose registers in memory. This copy in
+  // memory is later pushed onto the JS expression stack for the fake JS frame
+  // generated and also to the C frame generated on top of that. In the JS
+  // frame ONLY the registers containing pointers will be pushed on the
+  // expression stack. This causes the GC to update these pointers so that
+  // they will have the correct value when returning from the debugger.
+  __ SaveRegistersToMemory(kJSCallerSaved);
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Store the registers containing object pointers on the expression stack to
+  // make sure that these are correctly updated during GC.
+  __ PushRegistersFromMemory(pointer_regs);
+
+#ifdef DEBUG
+  __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+  __ Set(eax, Immediate(0));  // no arguments
+  __ mov(ebx, Immediate(ExternalReference::debug_break()));
+
+  CEntryDebugBreakStub ceb;
+  __ CallStub(&ceb);
+
+  // Restore the register values containing object pointers from the expression
+  // stack in the reverse order as they where pushed.
+  __ PopRegistersToMemory(pointer_regs);
+
+  // Get rid of the internal frame.
+  __ LeaveInternalFrame();
+
+  // If this call did not replace a call but patched other code then there will
+  // be an unwanted return address left on the stack. Here we get rid of that.
+  if (convert_call_to_jmp) {
+    __ pop(eax);
+  }
+
+  // Finally restore all registers.
+  __ RestoreRegistersFromMemory(kJSCallerSaved);
+
+  // Now that the break point has been handled, resume normal execution by
+  // jumping to the target address intended by the caller and that was
+  // overwritten by the address of DebugBreakXXX.
+  ExternalReference after_break_target =
+      ExternalReference(Debug_Address::AfterBreakTarget());
+  __ jmp(Operand::StaticVariable(after_break_target));
+}
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+  // Register state for IC load call (from ic-ia32.cc).
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, ecx.bit(), false);
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+  // REgister state for IC store call (from ic-ia32.cc).
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false);
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+  // Register state for keyed IC load call (from ic-ia32.cc).
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+  // Register state for keyed IC load call (from ic-ia32.cc).
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  // -----------------------------------
+  // Register eax contains an object that needs to be pushed on the
+  // expression stack of the fake JS frame.
+  Generate_DebugBreakCallHelper(masm, eax.bit(), false);
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+  // Register state for keyed IC call call (from ic-ia32.cc)
+  // ----------- S t a t e -------------
+  //  -- eax: number of arguments
+  // -----------------------------------
+  // The number of arguments in eax is not smi encoded.
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+  // Register state just before return from JS function (from codegen-ia32.cc).
+  // eax is the actual number of arguments not encoded as a smi see comment
+  // above IC call.
+  // ----------- S t a t e -------------
+  //  -- eax: number of arguments
+  // -----------------------------------
+  // The number of arguments in eax is not smi encoded.
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+  // Register state just before return from JS function (from codegen-ia32.cc).
+  // ----------- S t a t e -------------
+  //  -- eax: return value
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, eax.bit(), true);
+}
+
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+  // Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+#undef __
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+} }  // namespace v8::internal
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
new file mode 100644
index 0000000..458844e
--- /dev/null
+++ b/src/ia32/disasm-ia32.cc
@@ -0,0 +1,1202 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "v8.h"
+#include "disasm.h"
+
+namespace disasm {
+
+enum OperandOrder {
+  UNSET_OP_ORDER = 0,
+  REG_OPER_OP_ORDER,
+  OPER_REG_OP_ORDER
+};
+
+
+//------------------------------------------------------------------
+// Tables
+//------------------------------------------------------------------
+struct ByteMnemonic {
+  int b;  // -1 terminates, otherwise must be in range (0..255)
+  const char* mnem;
+  OperandOrder op_order_;
+};
+
+
+static ByteMnemonic two_operands_instr[] = {
+  {0x03, "add", REG_OPER_OP_ORDER},
+  {0x21, "and", OPER_REG_OP_ORDER},
+  {0x23, "and", REG_OPER_OP_ORDER},
+  {0x3B, "cmp", REG_OPER_OP_ORDER},
+  {0x8D, "lea", REG_OPER_OP_ORDER},
+  {0x09, "or", OPER_REG_OP_ORDER},
+  {0x0B, "or", REG_OPER_OP_ORDER},
+  {0x1B, "sbb", REG_OPER_OP_ORDER},
+  {0x29, "sub", OPER_REG_OP_ORDER},
+  {0x2B, "sub", REG_OPER_OP_ORDER},
+  {0x85, "test", REG_OPER_OP_ORDER},
+  {0x31, "xor", OPER_REG_OP_ORDER},
+  {0x33, "xor", REG_OPER_OP_ORDER},
+  {0x87, "xchg", REG_OPER_OP_ORDER},
+  {0x8A, "mov_b", REG_OPER_OP_ORDER},
+  {0x8B, "mov", REG_OPER_OP_ORDER},
+  {-1, "", UNSET_OP_ORDER}
+};
+
+
+static ByteMnemonic zero_operands_instr[] = {
+  {0xC3, "ret", UNSET_OP_ORDER},
+  {0xC9, "leave", UNSET_OP_ORDER},
+  {0x90, "nop", UNSET_OP_ORDER},
+  {0xF4, "hlt", UNSET_OP_ORDER},
+  {0xCC, "int3", UNSET_OP_ORDER},
+  {0x60, "pushad", UNSET_OP_ORDER},
+  {0x61, "popad", UNSET_OP_ORDER},
+  {0x9C, "pushfd", UNSET_OP_ORDER},
+  {0x9D, "popfd", UNSET_OP_ORDER},
+  {0x9E, "sahf", UNSET_OP_ORDER},
+  {0x99, "cdq", UNSET_OP_ORDER},
+  {0x9B, "fwait", UNSET_OP_ORDER},
+  {-1, "", UNSET_OP_ORDER}
+};
+
+
+static ByteMnemonic call_jump_instr[] = {
+  {0xE8, "call", UNSET_OP_ORDER},
+  {0xE9, "jmp", UNSET_OP_ORDER},
+  {-1, "", UNSET_OP_ORDER}
+};
+
+
+static ByteMnemonic short_immediate_instr[] = {
+  {0x05, "add", UNSET_OP_ORDER},
+  {0x0D, "or", UNSET_OP_ORDER},
+  {0x15, "adc", UNSET_OP_ORDER},
+  {0x25, "and", UNSET_OP_ORDER},
+  {0x2D, "sub", UNSET_OP_ORDER},
+  {0x35, "xor", UNSET_OP_ORDER},
+  {0x3D, "cmp", UNSET_OP_ORDER},
+  {-1, "", UNSET_OP_ORDER}
+};
+
+
+static const char* jump_conditional_mnem[] = {
+  /*0*/ "jo", "jno", "jc", "jnc",
+  /*4*/ "jz", "jnz", "jna", "ja",
+  /*8*/ "js", "jns", "jpe", "jpo",
+  /*12*/ "jl", "jnl", "jng", "jg"
+};
+
+
+static const char* set_conditional_mnem[] = {
+  /*0*/ "seto", "setno", "setc", "setnc",
+  /*4*/ "setz", "setnz", "setna", "seta",
+  /*8*/ "sets", "setns", "setpe", "setpo",
+  /*12*/ "setl", "setnl", "setng", "setg"
+};
+
+
+enum InstructionType {
+  NO_INSTR,
+  ZERO_OPERANDS_INSTR,
+  TWO_OPERANDS_INSTR,
+  JUMP_CONDITIONAL_SHORT_INSTR,
+  REGISTER_INSTR,
+  MOVE_REG_INSTR,
+  CALL_JUMP_INSTR,
+  SHORT_IMMEDIATE_INSTR
+};
+
+
+struct InstructionDesc {
+  const char* mnem;
+  InstructionType type;
+  OperandOrder op_order_;
+};
+
+
+class InstructionTable {
+ public:
+  InstructionTable();
+  const InstructionDesc& Get(byte x) const { return instructions_[x]; }
+
+ private:
+  InstructionDesc instructions_[256];
+  void Clear();
+  void Init();
+  void CopyTable(ByteMnemonic bm[], InstructionType type);
+  void SetTableRange(InstructionType type,
+                     byte start,
+                     byte end,
+                     const char* mnem);
+  void AddJumpConditionalShort();
+};
+
+
+InstructionTable::InstructionTable() {
+  Clear();
+  Init();
+}
+
+
+void InstructionTable::Clear() {
+  for (int i = 0; i < 256; i++) {
+    instructions_[i].mnem = "";
+    instructions_[i].type = NO_INSTR;
+    instructions_[i].op_order_ = UNSET_OP_ORDER;
+  }
+}
+
+
+void InstructionTable::Init() {
+  CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
+  CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
+  CopyTable(call_jump_instr, CALL_JUMP_INSTR);
+  CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
+  AddJumpConditionalShort();
+  SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
+  SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
+  SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
+  SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
+  SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,");  // 0x90 is nop.
+  SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
+}
+
+
+void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
+  for (int i = 0; bm[i].b >= 0; i++) {
+    InstructionDesc* id = &instructions_[bm[i].b];
+    id->mnem = bm[i].mnem;
+    id->op_order_ = bm[i].op_order_;
+    assert(id->type == NO_INSTR);  // Information already entered
+    id->type = type;
+  }
+}
+
+
+void InstructionTable::SetTableRange(InstructionType type,
+                                     byte start,
+                                     byte end,
+                                     const char* mnem) {
+  for (byte b = start; b <= end; b++) {
+    InstructionDesc* id = &instructions_[b];
+    assert(id->type == NO_INSTR);  // Information already entered
+    id->mnem = mnem;
+    id->type = type;
+  }
+}
+
+
+void InstructionTable::AddJumpConditionalShort() {
+  for (byte b = 0x70; b <= 0x7F; b++) {
+    InstructionDesc* id = &instructions_[b];
+    assert(id->type == NO_INSTR);  // Information already entered
+    id->mnem = jump_conditional_mnem[b & 0x0F];
+    id->type = JUMP_CONDITIONAL_SHORT_INSTR;
+  }
+}
+
+
+static InstructionTable instruction_table;
+
+
+// The IA32 disassembler implementation.
+class DisassemblerIA32 {
+ public:
+  DisassemblerIA32(const NameConverter& converter,
+                   bool abort_on_unimplemented = true)
+      : converter_(converter),
+        tmp_buffer_pos_(0),
+        abort_on_unimplemented_(abort_on_unimplemented) {
+    tmp_buffer_[0] = '\0';
+  }
+
+  virtual ~DisassemblerIA32() {}
+
+  // Writes one disassembled instruction into 'buffer' (0-terminated).
+  // Returns the length of the disassembled machine instruction in bytes.
+  int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
+
+ private:
+  const NameConverter& converter_;
+  v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
+  unsigned int tmp_buffer_pos_;
+  bool abort_on_unimplemented_;
+
+
+  enum {
+    eax = 0,
+    ecx = 1,
+    edx = 2,
+    ebx = 3,
+    esp = 4,
+    ebp = 5,
+    esi = 6,
+    edi = 7
+  };
+
+
+  const char* NameOfCPURegister(int reg) const {
+    return converter_.NameOfCPURegister(reg);
+  }
+
+
+  const char* NameOfByteCPURegister(int reg) const {
+    return converter_.NameOfByteCPURegister(reg);
+  }
+
+
+  const char* NameOfXMMRegister(int reg) const {
+    return converter_.NameOfXMMRegister(reg);
+  }
+
+
+  const char* NameOfAddress(byte* addr) const {
+    return converter_.NameOfAddress(addr);
+  }
+
+
+  // Disassembler helper functions.
+  static void get_modrm(byte data, int* mod, int* regop, int* rm) {
+    *mod = (data >> 6) & 3;
+    *regop = (data & 0x38) >> 3;
+    *rm = data & 7;
+  }
+
+
+  static void get_sib(byte data, int* scale, int* index, int* base) {
+    *scale = (data >> 6) & 3;
+    *index = (data >> 3) & 7;
+    *base = data & 7;
+  }
+
+  typedef const char* (DisassemblerIA32::*RegisterNameMapping)(int reg) const;
+
+  int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
+  int PrintRightOperand(byte* modrmp);
+  int PrintRightByteOperand(byte* modrmp);
+  int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
+  int PrintImmediateOp(byte* data);
+  int F7Instruction(byte* data);
+  int D1D3C1Instruction(byte* data);
+  int JumpShort(byte* data);
+  int JumpConditional(byte* data, const char* comment);
+  int JumpConditionalShort(byte* data, const char* comment);
+  int SetCC(byte* data);
+  int FPUInstruction(byte* data);
+  void AppendToBuffer(const char* format, ...);
+
+
+  void UnimplementedInstruction() {
+    if (abort_on_unimplemented_) {
+      UNIMPLEMENTED();
+    } else {
+      AppendToBuffer("'Unimplemented Instruction'");
+    }
+  }
+};
+
+
+void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
+  v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
+  va_list args;
+  va_start(args, format);
+  int result = v8::internal::OS::VSNPrintF(buf, format, args);
+  va_end(args);
+  tmp_buffer_pos_ += result;
+}
+
+int DisassemblerIA32::PrintRightOperandHelper(
+    byte* modrmp,
+    RegisterNameMapping register_name) {
+  int mod, regop, rm;
+  get_modrm(*modrmp, &mod, &regop, &rm);
+  switch (mod) {
+    case 0:
+      if (rm == ebp) {
+        int32_t disp = *reinterpret_cast<int32_t*>(modrmp+1);
+        AppendToBuffer("[0x%x]", disp);
+        return 5;
+      } else if (rm == esp) {
+        byte sib = *(modrmp + 1);
+        int scale, index, base;
+        get_sib(sib, &scale, &index, &base);
+        if (index == esp && base == esp && scale == 0 /*times_1*/) {
+          AppendToBuffer("[%s]", (this->*register_name)(rm));
+          return 2;
+        } else if (base == ebp) {
+          int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
+          AppendToBuffer("[%s*%d+0x%x]",
+                         (this->*register_name)(index),
+                         1 << scale,
+                         disp);
+          return 6;
+        } else if (index != esp && base != ebp) {
+          // [base+index*scale]
+          AppendToBuffer("[%s+%s*%d]",
+                         (this->*register_name)(base),
+                         (this->*register_name)(index),
+                         1 << scale);
+          return 2;
+        } else {
+          UnimplementedInstruction();
+          return 1;
+        }
+      } else {
+        AppendToBuffer("[%s]", (this->*register_name)(rm));
+        return 1;
+      }
+      break;
+    case 1:  // fall through
+    case 2:
+      if (rm == esp) {
+        byte sib = *(modrmp + 1);
+        int scale, index, base;
+        get_sib(sib, &scale, &index, &base);
+        int disp =
+            mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
+        if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
+          AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+        } else {
+          AppendToBuffer("[%s+%s*%d+0x%x]",
+                         (this->*register_name)(base),
+                         (this->*register_name)(index),
+                         1 << scale,
+                         disp);
+        }
+        return mod == 2 ? 6 : 3;
+      } else {
+        // No sib.
+        int disp =
+            mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
+        AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+        return mod == 2 ? 5 : 2;
+      }
+      break;
+    case 3:
+      AppendToBuffer("%s", (this->*register_name)(rm));
+      return 1;
+    default:
+      UnimplementedInstruction();
+      return 1;
+  }
+  UNREACHABLE();
+}
+
+
+int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
+  return PrintRightOperandHelper(modrmp, &DisassemblerIA32::NameOfCPURegister);
+}
+
+
+int DisassemblerIA32::PrintRightByteOperand(byte* modrmp) {
+  return PrintRightOperandHelper(modrmp,
+                                 &DisassemblerIA32::NameOfByteCPURegister);
+}
+
+
+// Returns number of bytes used including the current *data.
+// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
+int DisassemblerIA32::PrintOperands(const char* mnem,
+                                    OperandOrder op_order,
+                                    byte* data) {
+  byte modrm = *data;
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  int advance = 0;
+  switch (op_order) {
+    case REG_OPER_OP_ORDER: {
+      AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+      advance = PrintRightOperand(data);
+      break;
+    }
+    case OPER_REG_OP_ORDER: {
+      AppendToBuffer("%s ", mnem);
+      advance = PrintRightOperand(data);
+      AppendToBuffer(",%s", NameOfCPURegister(regop));
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return advance;
+}
+
+
+// Returns number of bytes used by machine instruction, including *data byte.
+// Writes immediate instructions to 'tmp_buffer_'.
+int DisassemblerIA32::PrintImmediateOp(byte* data) {
+  bool sign_extension_bit = (*data & 0x02) != 0;
+  byte modrm = *(data+1);
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  const char* mnem = "Imm???";
+  switch (regop) {
+    case 0: mnem = "add"; break;
+    case 1: mnem = "or"; break;
+    case 2: mnem = "adc"; break;
+    case 4: mnem = "and"; break;
+    case 5: mnem = "sub"; break;
+    case 6: mnem = "xor"; break;
+    case 7: mnem = "cmp"; break;
+    default: UnimplementedInstruction();
+  }
+  AppendToBuffer("%s ", mnem);
+  int count = PrintRightOperand(data+1);
+  if (sign_extension_bit) {
+    AppendToBuffer(",0x%x", *(data + 1 + count));
+    return 1 + count + 1 /*int8*/;
+  } else {
+    AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
+    return 1 + count + 4 /*int32_t*/;
+  }
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::F7Instruction(byte* data) {
+  assert(*data == 0xF7);
+  byte modrm = *(data+1);
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  if (mod == 3 && regop != 0) {
+    const char* mnem = NULL;
+    switch (regop) {
+      case 2: mnem = "not"; break;
+      case 3: mnem = "neg"; break;
+      case 4: mnem = "mul"; break;
+      case 7: mnem = "idiv"; break;
+      default: UnimplementedInstruction();
+    }
+    AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
+    return 2;
+  } else if (mod == 3 && regop == eax) {
+    int32_t imm = *reinterpret_cast<int32_t*>(data+2);
+    AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
+    return 6;
+  } else if (regop == eax) {
+    AppendToBuffer("test ");
+    int count = PrintRightOperand(data+1);
+    int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
+    AppendToBuffer(",0x%x", imm);
+    return 1+count+4 /*int32_t*/;
+  } else {
+    UnimplementedInstruction();
+    return 2;
+  }
+}
+
+int DisassemblerIA32::D1D3C1Instruction(byte* data) {
+  byte op = *data;
+  assert(op == 0xD1 || op == 0xD3 || op == 0xC1);
+  byte modrm = *(data+1);
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  int imm8 = -1;
+  int num_bytes = 2;
+  if (mod == 3) {
+    const char* mnem = NULL;
+    if (op == 0xD1) {
+      imm8 = 1;
+      switch (regop) {
+        case edx: mnem = "rcl"; break;
+        case edi: mnem = "sar"; break;
+        case esp: mnem = "shl"; break;
+        default: UnimplementedInstruction();
+      }
+    } else if (op == 0xC1) {
+      imm8 = *(data+2);
+      num_bytes = 3;
+      switch (regop) {
+        case edx: mnem = "rcl"; break;
+        case esp: mnem = "shl"; break;
+        case ebp: mnem = "shr"; break;
+        case edi: mnem = "sar"; break;
+        default: UnimplementedInstruction();
+      }
+    } else if (op == 0xD3) {
+      switch (regop) {
+        case esp: mnem = "shl"; break;
+        case ebp: mnem = "shr"; break;
+        case edi: mnem = "sar"; break;
+        default: UnimplementedInstruction();
+      }
+    }
+    assert(mnem != NULL);
+    AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
+    if (imm8 > 0) {
+      AppendToBuffer("%d", imm8);
+    } else {
+      AppendToBuffer("cl");
+    }
+  } else {
+    UnimplementedInstruction();
+  }
+  return num_bytes;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::JumpShort(byte* data) {
+  assert(*data == 0xEB);
+  byte b = *(data+1);
+  byte* dest = data + static_cast<int8_t>(b) + 2;
+  AppendToBuffer("jmp %s", NameOfAddress(dest));
+  return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
+  assert(*data == 0x0F);
+  byte cond = *(data+1) & 0x0F;
+  byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
+  const char* mnem = jump_conditional_mnem[cond];
+  AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
+  if (comment != NULL) {
+    AppendToBuffer(", %s", comment);
+  }
+  return 6;  // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
+  byte cond = *data & 0x0F;
+  byte b = *(data+1);
+  byte* dest = data + static_cast<int8_t>(b) + 2;
+  const char* mnem = jump_conditional_mnem[cond];
+  AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
+  if (comment != NULL) {
+    AppendToBuffer(", %s", comment);
+  }
+  return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::SetCC(byte* data) {
+  assert(*data == 0x0F);
+  byte cond = *(data+1) & 0x0F;
+  const char* mnem = set_conditional_mnem[cond];
+  AppendToBuffer("%s ", mnem);
+  PrintRightByteOperand(data+2);
+  return 3;  // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::FPUInstruction(byte* data) {
+  byte b1 = *data;
+  byte b2 = *(data + 1);
+  if (b1 == 0xD9) {
+    const char* mnem = NULL;
+    switch (b2) {
+      case 0xE8: mnem = "fld1"; break;
+      case 0xEE: mnem = "fldz"; break;
+      case 0xE1: mnem = "fabs"; break;
+      case 0xE0: mnem = "fchs"; break;
+      case 0xF8: mnem = "fprem"; break;
+      case 0xF5: mnem = "fprem1"; break;
+      case 0xF7: mnem = "fincstp"; break;
+      case 0xE4: mnem = "ftst"; break;
+    }
+    if (mnem != NULL) {
+      AppendToBuffer("%s", mnem);
+      return 2;
+    } else if ((b2 & 0xF8) == 0xC8) {
+      AppendToBuffer("fxch st%d", b2 & 0x7);
+      return 2;
+    } else {
+      int mod, regop, rm;
+      get_modrm(*(data+1), &mod, &regop, &rm);
+      const char* mnem = "?";
+      switch (regop) {
+        case eax: mnem = "fld_s"; break;
+        case ebx: mnem = "fstp_s"; break;
+        default: UnimplementedInstruction();
+      }
+      AppendToBuffer("%s ", mnem);
+      int count = PrintRightOperand(data + 1);
+      return count + 1;
+    }
+  } else if (b1 == 0xDD) {
+    if ((b2 & 0xF8) == 0xC0) {
+      AppendToBuffer("ffree st%d", b2 & 0x7);
+      return 2;
+    } else {
+      int mod, regop, rm;
+      get_modrm(*(data+1), &mod, &regop, &rm);
+      const char* mnem = "?";
+      switch (regop) {
+        case eax: mnem = "fld_d"; break;
+        case ebx: mnem = "fstp_d"; break;
+        default: UnimplementedInstruction();
+      }
+      AppendToBuffer("%s ", mnem);
+      int count = PrintRightOperand(data + 1);
+      return count + 1;
+    }
+  } else if (b1 == 0xDB) {
+    int mod, regop, rm;
+    get_modrm(*(data+1), &mod, &regop, &rm);
+    const char* mnem = "?";
+    switch (regop) {
+      case eax: mnem = "fild_s"; break;
+      case edx: mnem = "fist_s"; break;
+      case ebx: mnem = "fistp_s"; break;
+      default: UnimplementedInstruction();
+    }
+    AppendToBuffer("%s ", mnem);
+    int count = PrintRightOperand(data + 1);
+    return count + 1;
+  } else if (b1 == 0xDF) {
+    if (b2 == 0xE0) {
+      AppendToBuffer("fnstsw_ax");
+      return 2;
+    }
+    int mod, regop, rm;
+    get_modrm(*(data+1), &mod, &regop, &rm);
+    const char* mnem = "?";
+    switch (regop) {
+      case ebp: mnem = "fild_d"; break;
+      case edi: mnem = "fistp_d"; break;
+      default: UnimplementedInstruction();
+    }
+    AppendToBuffer("%s ", mnem);
+    int count = PrintRightOperand(data + 1);
+    return count + 1;
+  } else if (b1 == 0xDC || b1 == 0xDE) {
+    bool is_pop = (b1 == 0xDE);
+    if (is_pop && b2 == 0xD9) {
+      AppendToBuffer("fcompp");
+      return 2;
+    }
+    const char* mnem = "FP0xDC";
+    switch (b2 & 0xF8) {
+      case 0xC0: mnem = "fadd"; break;
+      case 0xE8: mnem = "fsub"; break;
+      case 0xC8: mnem = "fmul"; break;
+      case 0xF8: mnem = "fdiv"; break;
+      default: UnimplementedInstruction();
+    }
+    AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
+    return 2;
+  } else if (b1 == 0xDA && b2 == 0xE9) {
+    const char* mnem = "fucompp";
+    AppendToBuffer("%s", mnem);
+    return 2;
+  }
+  AppendToBuffer("Unknown FP instruction");
+  return 2;
+}
+
+
+// Mnemonics for instructions 0xF0 byte.
+// Returns NULL if the instruction is not handled here.
+static const char* F0Mnem(byte f0byte) {
+  switch (f0byte) {
+    case 0xA2: return "cpuid";
+    case 0x31: return "rdtsc";
+    case 0xBE: return "movsx_b";
+    case 0xBF: return "movsx_w";
+    case 0xB6: return "movzx_b";
+    case 0xB7: return "movzx_w";
+    case 0xAF: return "imul";
+    case 0xA5: return "shld";
+    case 0xAD: return "shrd";
+    case 0xAB: return "bts";
+    default: return NULL;
+  }
+}
+
+
+// Disassembled instruction '*instr' and writes it into 'out_buffer'.
+int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
+                                        byte* instr) {
+  tmp_buffer_pos_ = 0;  // starting to write as position 0
+  byte* data = instr;
+  // Check for hints.
+  const char* branch_hint = NULL;
+  // We use these two prefixes only with branch prediction
+  if (*data == 0x3E /*ds*/) {
+    branch_hint = "predicted taken";
+    data++;
+  } else if (*data == 0x2E /*cs*/) {
+    branch_hint = "predicted not taken";
+    data++;
+  }
+  bool processed = true;  // Will be set to false if the current instruction
+                          // is not in 'instructions' table.
+  const InstructionDesc& idesc = instruction_table.Get(*data);
+  switch (idesc.type) {
+    case ZERO_OPERANDS_INSTR:
+      AppendToBuffer(idesc.mnem);
+      data++;
+      break;
+
+    case TWO_OPERANDS_INSTR:
+      data++;
+      data += PrintOperands(idesc.mnem, idesc.op_order_, data);
+      break;
+
+    case JUMP_CONDITIONAL_SHORT_INSTR:
+      data += JumpConditionalShort(data, branch_hint);
+      break;
+
+    case REGISTER_INSTR:
+      AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
+      data++;
+      break;
+
+    case MOVE_REG_INSTR: {
+      byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
+      AppendToBuffer("mov %s,%s",
+                     NameOfCPURegister(*data & 0x07),
+                     NameOfAddress(addr));
+      data += 5;
+      break;
+    }
+
+    case CALL_JUMP_INSTR: {
+      byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5;
+      AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
+      data += 5;
+      break;
+    }
+
+    case SHORT_IMMEDIATE_INSTR: {
+      byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
+      AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr));
+      data += 5;
+      break;
+    }
+
+    case NO_INSTR:
+      processed = false;
+      break;
+
+    default:
+      UNIMPLEMENTED();  // This type is not implemented.
+  }
+  //----------------------------
+  if (!processed) {
+    switch (*data) {
+      case 0xC2:
+        AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data+1));
+        data += 3;
+        break;
+
+      case 0x69:  // fall through
+      case 0x6B:
+        { int mod, regop, rm;
+          get_modrm(*(data+1), &mod, &regop, &rm);
+          int32_t imm =
+              *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2);
+          AppendToBuffer("imul %s,%s,0x%x",
+                         NameOfCPURegister(regop),
+                         NameOfCPURegister(rm),
+                         imm);
+          data += 2 + (*data == 0x6B ? 1 : 4);
+        }
+        break;
+
+      case 0xF6:
+        { int mod, regop, rm;
+          get_modrm(*(data+1), &mod, &regop, &rm);
+          if (mod == 3 && regop == eax) {
+            AppendToBuffer("test_b %s,%d", NameOfCPURegister(rm), *(data+2));
+          } else {
+            UnimplementedInstruction();
+          }
+          data += 3;
+        }
+        break;
+
+      case 0x81:  // fall through
+      case 0x83:  // 0x81 with sign extension bit set
+        data += PrintImmediateOp(data);
+        break;
+
+      case 0x0F:
+        { byte f0byte = *(data+1);
+          const char* f0mnem = F0Mnem(f0byte);
+          if (f0byte == 0xA2 || f0byte == 0x31) {
+            AppendToBuffer("%s", f0mnem);
+            data += 2;
+          } else if ((f0byte & 0xF0) == 0x80) {
+            data += JumpConditional(data, branch_hint);
+          } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
+                     f0byte == 0xB7 || f0byte == 0xAF) {
+            data += 2;
+            data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
+          } else if ((f0byte & 0xF0) == 0x90) {
+            data += SetCC(data);
+          } else {
+            data += 2;
+            if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
+              // shrd, shld, bts
+              AppendToBuffer("%s ", f0mnem);
+              int mod, regop, rm;
+              get_modrm(*data, &mod, &regop, &rm);
+              data += PrintRightOperand(data);
+              if (f0byte == 0xAB) {
+                AppendToBuffer(",%s", NameOfCPURegister(regop));
+              } else {
+                AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
+              }
+            } else {
+              UnimplementedInstruction();
+            }
+          }
+        }
+        break;
+
+      case 0x8F:
+        { data++;
+          int mod, regop, rm;
+          get_modrm(*data, &mod, &regop, &rm);
+          if (regop == eax) {
+            AppendToBuffer("pop ");
+            data += PrintRightOperand(data);
+          }
+        }
+        break;
+
+      case 0xFF:
+        { data++;
+          int mod, regop, rm;
+          get_modrm(*data, &mod, &regop, &rm);
+          const char* mnem = NULL;
+          switch (regop) {
+            case esi: mnem = "push"; break;
+            case eax: mnem = "inc"; break;
+            case ecx: mnem = "dec"; break;
+            case edx: mnem = "call"; break;
+            case esp: mnem = "jmp"; break;
+            default: mnem = "???";
+          }
+          AppendToBuffer("%s ", mnem);
+          data += PrintRightOperand(data);
+        }
+        break;
+
+      case 0xC7:  // imm32, fall through
+      case 0xC6:  // imm8
+        { bool is_byte = *data == 0xC6;
+          data++;
+          AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
+          data += PrintRightOperand(data);
+          int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data);
+          AppendToBuffer(",0x%x", imm);
+          data += is_byte ? 1 : 4;
+        }
+        break;
+
+      case 0x80:
+        { data++;
+          AppendToBuffer("%s ", "cmpb");
+          data += PrintRightOperand(data);
+          int32_t imm = *data;
+          AppendToBuffer(",0x%x", imm);
+          data++;
+        }
+        break;
+
+      case 0x88:  // 8bit, fall through
+      case 0x89:  // 32bit
+        { bool is_byte = *data == 0x88;
+          int mod, regop, rm;
+          data++;
+          get_modrm(*data, &mod, &regop, &rm);
+          AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
+          data += PrintRightOperand(data);
+          AppendToBuffer(",%s", NameOfCPURegister(regop));
+        }
+        break;
+
+      case 0x66:  // prefix
+        data++;
+        if (*data == 0x8B) {
+          data++;
+          data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
+        } else if (*data == 0x89) {
+          data++;
+          int mod, regop, rm;
+          get_modrm(*data, &mod, &regop, &rm);
+          AppendToBuffer("mov_w ");
+          data += PrintRightOperand(data);
+          AppendToBuffer(",%s", NameOfCPURegister(regop));
+        } else {
+          UnimplementedInstruction();
+        }
+        break;
+
+      case 0xFE:
+        { data++;
+          int mod, regop, rm;
+          get_modrm(*data, &mod, &regop, &rm);
+          if (mod == 3 && regop == ecx) {
+            AppendToBuffer("dec_b %s", NameOfCPURegister(rm));
+          } else {
+            UnimplementedInstruction();
+          }
+          data++;
+        }
+        break;
+
+      case 0x68:
+        AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data+1));
+        data += 5;
+        break;
+
+      case 0x6A:
+        AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+        data += 2;
+        break;
+
+      case 0xA8:
+        AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data+1));
+        data += 2;
+        break;
+
+      case 0xA9:
+        AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
+        data += 5;
+        break;
+
+      case 0xD1:  // fall through
+      case 0xD3:  // fall through
+      case 0xC1:
+        data += D1D3C1Instruction(data);
+        break;
+
+      case 0xD9:  // fall through
+      case 0xDA:  // fall through
+      case 0xDB:  // fall through
+      case 0xDC:  // fall through
+      case 0xDD:  // fall through
+      case 0xDE:  // fall through
+      case 0xDF:
+        data += FPUInstruction(data);
+        break;
+
+      case 0xEB:
+        data += JumpShort(data);
+        break;
+
+      case 0xF2:
+        if (*(data+1) == 0x0F) {
+          byte b2 = *(data+2);
+          if (b2 == 0x11) {
+            AppendToBuffer("movsd ");
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            data += PrintRightOperand(data);
+            AppendToBuffer(",%s", NameOfXMMRegister(regop));
+          } else if (b2 == 0x10) {
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
+            data += PrintRightOperand(data);
+          } else {
+            const char* mnem = "?";
+            switch (b2) {
+              case 0x2A: mnem = "cvtsi2sd"; break;
+              case 0x58: mnem = "addsd"; break;
+              case 0x59: mnem = "mulsd"; break;
+              case 0x5C: mnem = "subsd"; break;
+              case 0x5E: mnem = "divsd"; break;
+            }
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            if (b2 == 0x2A) {
+              AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+              data += PrintRightOperand(data);
+            } else {
+              AppendToBuffer("%s %s,%s",
+                             mnem,
+                             NameOfXMMRegister(regop),
+                             NameOfXMMRegister(rm));
+              data++;
+            }
+          }
+        } else {
+          UnimplementedInstruction();
+        }
+        break;
+
+      case 0xF3:
+        if (*(data+1) == 0x0F && *(data+2) == 0x2C) {
+          data += 3;
+          data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+        } else {
+          UnimplementedInstruction();
+        }
+        break;
+
+      case 0xF7:
+        data += F7Instruction(data);
+        break;
+
+      default:
+        UnimplementedInstruction();
+    }
+  }
+
+  if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
+    tmp_buffer_[tmp_buffer_pos_] = '\0';
+  }
+
+  int instr_len = data - instr;
+  ASSERT(instr_len > 0);  // Ensure progress.
+
+  int outp = 0;
+  // Instruction bytes.
+  for (byte* bp = instr; bp < data; bp++) {
+    outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+                                       "%02x",
+                                       *bp);
+  }
+  for (int i = 6 - instr_len; i >= 0; i--) {
+    outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+                                       "  ");
+  }
+
+  outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+                                     " %s",
+                                     tmp_buffer_.start());
+  return instr_len;
+}
+
+
+//------------------------------------------------------------------------------
+
+
+static const char* cpu_regs[8] = {
+  "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
+};
+
+
+static const char* byte_cpu_regs[8] = {
+  "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
+};
+
+
+static const char* xmm_regs[8] = {
+  "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+};
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+  static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
+  v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
+  return tmp_buffer.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+  return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+  if (0 <= reg && reg < 8) return cpu_regs[reg];
+  return "noreg";
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+  if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
+  return "noreg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+  if (0 <= reg && reg < 8) return xmm_regs[reg];
+  return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+  // IA32 does not embed debug strings at the moment.
+  UNREACHABLE();
+  return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+    : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+                                    byte* instruction) {
+  DisassemblerIA32 d(converter_, false /*do not crash if unimplemented*/);
+  return d.InstructionDecode(buffer, instruction);
+}
+
+
+// The IA-32 assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+
+/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+  NameConverter converter;
+  Disassembler d(converter);
+  for (byte* pc = begin; pc < end;) {
+    v8::internal::EmbeddedVector<char, 128> buffer;
+    buffer[0] = '\0';
+    byte* prev_pc = pc;
+    pc += d.InstructionDecode(buffer, pc);
+    fprintf(f, "%p", prev_pc);
+    fprintf(f, "    ");
+
+    for (byte* bp = prev_pc; bp < pc; bp++) {
+      fprintf(f, "%02x",  *bp);
+    }
+    for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+      fprintf(f, "  ");
+    }
+    fprintf(f, "  %s\n", buffer.start());
+  }
+}
+
+
+}  // namespace disasm
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
new file mode 100644
index 0000000..dea439f
--- /dev/null
+++ b/src/ia32/frames-ia32.cc
@@ -0,0 +1,116 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+StackFrame::Type StackFrame::ComputeType(State* state) {
+  ASSERT(state->fp != NULL);
+  if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+    return ARGUMENTS_ADAPTOR;
+  }
+  // The marker and function offsets overlap. If the marker isn't a
+  // smi then the frame is a JavaScript frame -- and the marker is
+  // really the function.
+  const int offset = StandardFrameConstants::kMarkerOffset;
+  Object* marker = Memory::Object_at(state->fp + offset);
+  if (!marker->IsSmi()) return JAVA_SCRIPT;
+  return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+  if (fp == 0) return NONE;
+  // Compute the stack pointer.
+  Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+  // Fill in the state.
+  state->fp = fp;
+  state->sp = sp;
+  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+  // Determine frame type.
+  if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
+    return EXIT_DEBUG;
+  } else {
+    return EXIT;
+  }
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+  // Exit frames on IA-32 do not contain any pointers. The arguments
+  // are traversed as part of the expression stack of the calling
+  // frame.
+}
+
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+  return ComputeParametersCount();
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+  int arguments;
+  if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
+    // The arguments for cooked frames are traversed as if they were
+    // expression stack elements of the calling frame. The reason for
+    // this rather strange decision is that we cannot access the
+    // function during mark-compact GCs when the stack is cooked.
+    // In fact accessing heap objects (like function->shared() below)
+    // at all during GC is problematic.
+    arguments = 0;
+  } else {
+    // Compute the number of arguments by getting the number of formal
+    // parameters of the function. We must remember to take the
+    // receiver into account (+1).
+    JSFunction* function = JSFunction::cast(this->function());
+    arguments = function->shared()->formal_parameter_count() + 1;
+  }
+  const int offset = StandardFrameConstants::kCallerSPOffset;
+  return fp() + offset + (arguments * kPointerSize);
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+  const int arguments = Smi::cast(GetExpression(0))->value();
+  const int offset = StandardFrameConstants::kCallerSPOffset;
+  return fp() + offset + (arguments + 1) * kPointerSize;
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+  // Internal frames have no arguments. The stack pointer of the
+  // caller is at a fixed offset from the frame pointer.
+  return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
new file mode 100644
index 0000000..3a7c86b
--- /dev/null
+++ b/src/ia32/frames-ia32.h
@@ -0,0 +1,135 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_FRAMES_IA32_H_
+#define V8_IA32_FRAMES_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Register lists
+// Note that the bit values must match those used in actual instruction encoding
+static const int kNumRegs = 8;
+
+
+// Caller-saved registers
+static const RegList kJSCallerSaved =
+  1 << 0 |  // eax
+  1 << 1 |  // ecx
+  1 << 2 |  // edx
+  1 << 3 |  // ebx - used as a caller-saved register in JavaScript code
+  1 << 7;   // edi - callee function
+
+static const int kNumJSCallerSaved = 5;
+
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+// ----------------------------------------------------
+
+
+class StackHandlerConstants : public AllStatic {
+ public:
+  static const int kNextOffset  = 0 * kPointerSize;
+  static const int kFPOffset    = 1 * kPointerSize;
+  static const int kStateOffset = 2 * kPointerSize;
+  static const int kPCOffset    = 3 * kPointerSize;
+
+  static const int kSize = kPCOffset + kPointerSize;
+};
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+  static const int kCallerFPOffset      = -6 * kPointerSize;
+
+  static const int kFunctionArgOffset   = +3 * kPointerSize;
+  static const int kReceiverArgOffset   = +4 * kPointerSize;
+  static const int kArgcOffset          = +5 * kPointerSize;
+  static const int kArgvOffset          = +6 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+  static const int kDebugMarkOffset = -2 * kPointerSize;
+  static const int kSPOffset        = -1 * kPointerSize;
+
+  static const int kCallerFPOffset =  0 * kPointerSize;
+  static const int kCallerPCOffset = +1 * kPointerSize;
+
+  // FP-relative displacement of the caller's SP.  It points just
+  // below the saved PC.
+  static const int kCallerSPDisplacement = +2 * kPointerSize;
+};
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+  static const int kExpressionsOffset = -3 * kPointerSize;
+  static const int kMarkerOffset      = -2 * kPointerSize;
+  static const int kContextOffset     = -1 * kPointerSize;
+  static const int kCallerFPOffset    =  0 * kPointerSize;
+  static const int kCallerPCOffset    = +1 * kPointerSize;
+  static const int kCallerSPOffset    = +2 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+  // Caller SP-relative.
+  static const int kParam0Offset   = -2 * kPointerSize;
+  static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+  static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+  const int offset = JavaScriptFrameConstants::kFunctionOffset;
+  return Memory::Object_at(fp() + offset);
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_FRAMES_IA32_H_
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
new file mode 100644
index 0000000..f7369a8
--- /dev/null
+++ b/src/ia32/ic-ia32.cc
@@ -0,0 +1,1042 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+// Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if the receiver has fast properties,
+// or if name is not a symbol, and will jump to the miss_label in that case.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+                                   Register r0, Register r1, Register r2,
+                                   Register name) {
+  // Register use:
+  //
+  // r0   - used to hold the property dictionary.
+  //
+  // r1   - initially the receiver
+  //      - used for the index into the property dictionary
+  //      - holds the result on exit.
+  //
+  // r2   - used to hold the capacity of the property dictionary.
+  //
+  // name - holds the name of the property and is unchanged.
+
+  Label done;
+
+  // Check for the absence of an interceptor.
+  // Load the map into r0.
+  __ mov(r0, FieldOperand(r1, JSObject::kMapOffset));
+  // Test the has_named_interceptor bit in the map.
+  __ test(FieldOperand(r0, Map::kInstanceAttributesOffset),
+          Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+
+  // Jump to miss if the interceptor bit is set.
+  __ j(not_zero, miss_label, not_taken);
+
+  // Bail out if we have a JS global proxy object.
+  __ movzx_b(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
+  __ cmp(r0, JS_GLOBAL_PROXY_TYPE);
+  __ j(equal, miss_label, not_taken);
+
+  // Possible work-around for http://crbug.com/16276.
+  __ cmp(r0, JS_GLOBAL_OBJECT_TYPE);
+  __ j(equal, miss_label, not_taken);
+  __ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
+  __ j(equal, miss_label, not_taken);
+
+  // Check that the properties array is a dictionary.
+  __ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
+  __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
+         Immediate(Factory::hash_table_map()));
+  __ j(not_equal, miss_label);
+
+  // Compute the capacity mask.
+  const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+  __ mov(r2, FieldOperand(r0, kCapacityOffset));
+  __ shr(r2, kSmiTagSize);  // convert smi to int
+  __ dec(r2);
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  static const int kProbes = 4;
+  const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  for (int i = 0; i < kProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ mov(r1, FieldOperand(name, String::kLengthOffset));
+    __ shr(r1, String::kHashShift);
+    if (i > 0) {
+      __ add(Operand(r1), Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(r1, Operand(r2));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
+
+    // Check if the key is identical to the name.
+    __ cmp(name,
+           Operand(r0, r1, times_4, kElementsStartOffset - kHeapObjectTag));
+    if (i != kProbes - 1) {
+      __ j(equal, &done, taken);
+    } else {
+      __ j(not_equal, miss_label, not_taken);
+    }
+  }
+
+  // Check that the value is a normal property.
+  __ bind(&done);
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ test(Operand(r0, r1, times_4, kDetailsOffset - kHeapObjectTag),
+          Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+  __ j(not_zero, miss_label, not_taken);
+
+  // Get the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ mov(r1, Operand(r0, r1, times_4, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to check that a value is either not an object
+// or is loaded if it is an object.
+static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
+                                           Register value, Register scratch) {
+  Label done;
+  // Check if the value is a Smi.
+  __ test(value, Immediate(kSmiTagMask));
+  __ j(zero, &done, not_taken);
+  // Check if the object has been loaded.
+  __ mov(scratch, FieldOperand(value, JSFunction::kMapOffset));
+  __ mov(scratch, FieldOperand(scratch, Map::kBitField2Offset));
+  __ test(scratch, Immediate(1 << Map::kNeedsLoading));
+  __ j(not_zero, miss, not_taken);
+  __ bind(&done);
+}
+
+
+// The offset from the inlined patch site to the start of the
+// inlined load instruction.  It is 7 bytes (test eax, imm) plus
+// 6 bytes (jne slow_label).
+const int LoadIC::kOffsetToLoadInstruction = 13;
+
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  StubCompiler::GenerateLoadArrayLength(masm, eax, edx, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  StubCompiler::GenerateLoadStringLength(masm, eax, edx, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  StubCompiler::GenerateLoadFunctionPrototype(masm, eax, edx, ebx, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+#ifdef DEBUG
+// For use in assert below.
+static int TenToThe(int exponent) {
+  ASSERT(exponent <= 9);
+  ASSERT(exponent >= 1);
+  int answer = 10;
+  for (int i = 1; i < exponent; i++) answer *= 10;
+  return answer;
+}
+#endif
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label slow, check_string, index_int, index_string, check_pixel_array;
+
+  // Load name and receiver.
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+
+  // Get the map of the receiver.
+  __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to check this explicitly since this generic stub does not perform
+  // map checks.
+  __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow, not_taken);
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing
+  // into string objects work as intended.
+  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
+  __ cmp(edx, JS_OBJECT_TYPE);
+  __ j(less, &slow, not_taken);
+  // Check that the key is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &check_string, not_taken);
+  __ sar(eax, kSmiTagSize);
+  // Get the elements array of the object.
+  __ bind(&index_int);
+  __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+  // Check that the object is in fast mode (not dictionary).
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &check_pixel_array);
+  // Check that the key (index) is within bounds.
+  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  // Fast case: Do the load.
+  __ mov(eax,
+         Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ j(equal, &slow);
+  __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+  __ ret(0);
+
+  // Check whether the elements is a pixel array.
+  // eax: untagged index
+  // ecx: elements array
+  __ bind(&check_pixel_array);
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(Factory::pixel_array_map()));
+  __ j(not_equal, &slow);
+  __ cmp(eax, FieldOperand(ecx, PixelArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
+  __ movzx_b(eax, Operand(ecx, eax, times_1, 0));
+  __ shl(eax, kSmiTagSize);
+  __ ret(0);
+
+
+  // Slow case: Load name and receiver from stack and jump to runtime.
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+  KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+
+  __ bind(&check_string);
+  // The key is not a smi.
+  // Is it a string?
+  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+  __ j(above_equal, &slow);
+  // Is the string an array index, with cached numeric value?
+  __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+  __ test(ebx, Immediate(String::kIsArrayIndexMask));
+  __ j(not_zero, &index_string, not_taken);
+
+  // If the string is a symbol, do a quick inline probe of the receiver's
+  // dictionary, if it exists.
+  __ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
+  __ test(ebx, Immediate(kIsSymbolMask));
+  __ j(zero, &slow, not_taken);
+  // Probe the dictionary leaving result in ecx.
+  GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax);
+  GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
+  __ mov(eax, Operand(ecx));
+  __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+  __ ret(0);
+  // Array index string: If short enough use cache in length/hash field (ebx).
+  // We assert that there are enough bits in an int32_t after the hash shift
+  // bits have been subtracted to allow space for the length and the cached
+  // array index.
+  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+         (1 << (String::kShortLengthShift - String::kHashShift)));
+  __ bind(&index_string);
+  const int kLengthFieldLimit =
+      (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
+  __ cmp(ebx, kLengthFieldLimit);
+  __ j(above_equal, &slow);
+  __ mov(eax, Operand(ebx));
+  __ and_(eax, (1 << String::kShortLengthShift) - 1);
+  __ shr(eax, String::kLongLengthShift);
+  __ jmp(&index_int);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label slow, fast, array, extra, check_pixel_array;
+
+  // Get the receiver from the stack.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));  // 2 ~ return address, key
+  // Check that the object isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+  // Get the map from the receiver.
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow, not_taken);
+  // Get the key from the stack.
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));  // 1 ~ return address
+  // Check that the key is a smi.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow, not_taken);
+  // Get the instance type from the map of the receiver.
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  // Check if the object is a JS array or not.
+  __ cmp(ecx, JS_ARRAY_TYPE);
+  __ j(equal, &array);
+  // Check that the object is some kind of JS object.
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(less, &slow, not_taken);
+
+  // Object case: Check key against length in the elements array.
+  // eax: value
+  // edx: JSObject
+  // ebx: index (as a smi)
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  // Check that the object is in fast mode (not dictionary).
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &check_pixel_array, not_taken);
+  // Untag the key (for checking against untagged length in the fixed array).
+  __ mov(edx, Operand(ebx));
+  __ sar(edx, kSmiTagSize);  // untag the index and use it for the comparison
+  __ cmp(edx, FieldOperand(ecx, Array::kLengthOffset));
+  // eax: value
+  // ecx: FixedArray
+  // ebx: index (as a smi)
+  __ j(below, &fast, taken);
+
+  // Slow case: Push extra copies of the arguments (3).
+  __ bind(&slow);
+  __ pop(ecx);
+  __ push(Operand(esp, 1 * kPointerSize));
+  __ push(Operand(esp, 1 * kPointerSize));
+  __ push(eax);
+  __ push(ecx);
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+
+  // Check whether the elements is a pixel array.
+  // eax: value
+  // ecx: elements array
+  // ebx: index (as a smi)
+  __ bind(&check_pixel_array);
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(Factory::pixel_array_map()));
+  __ j(not_equal, &slow);
+  // Check that the value is a smi. If a conversion is needed call into the
+  // runtime to convert and clamp.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow);
+  __ sar(ebx, kSmiTagSize);  // Untag the index.
+  __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  __ mov(edx, eax);  // Save the value.
+  __ sar(eax, kSmiTagSize);  // Untag the value.
+  {  // Clamp the value to [0..255].
+    Label done, is_negative;
+    __ test(eax, Immediate(0xFFFFFF00));
+    __ j(zero, &done);
+    __ j(negative, &is_negative);
+    __ mov(eax, Immediate(255));
+    __ jmp(&done);
+    __ bind(&is_negative);
+    __ xor_(eax, Operand(eax));  // Clear eax.
+    __ bind(&done);
+  }
+  __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
+  __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+  __ mov(eax, edx);  // Return the original value.
+  __ ret(0);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // eax: value
+  // edx: JSArray
+  // ecx: FixedArray
+  // ebx: index (as a smi)
+  // flags: compare (ebx, edx.length())
+  __ j(not_equal, &slow, not_taken);  // do not leave holes in the array
+  __ sar(ebx, kSmiTagSize);  // untag
+  __ cmp(ebx, FieldOperand(ecx, Array::kLengthOffset));
+  __ j(above_equal, &slow, not_taken);
+  // Restore tag and increment.
+  __ lea(ebx, Operand(ebx, times_2, 1 << kSmiTagSize));
+  __ mov(FieldOperand(edx, JSArray::kLengthOffset), ebx);
+  __ sub(Operand(ebx), Immediate(1 << kSmiTagSize));  // decrement ebx again
+  __ jmp(&fast);
+
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode; if it is the
+  // length is always a smi.
+  __ bind(&array);
+  // eax: value
+  // edx: JSArray
+  // ebx: index (as a smi)
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &check_pixel_array);
+
+  // Check the key against the length in the array, compute the
+  // address to store into and fall through to fast case.
+  __ cmp(ebx, FieldOperand(edx, JSArray::kLengthOffset));
+  __ j(above_equal, &extra, not_taken);
+
+  // Fast case: Do the store.
+  __ bind(&fast);
+  // eax: value
+  // ecx: FixedArray
+  // ebx: index (as a smi)
+  __ mov(Operand(ecx, ebx, times_2, FixedArray::kHeaderSize - kHeapObjectTag),
+         eax);
+  // Update write barrier for the elements array address.
+  __ mov(edx, Operand(eax));
+  __ RecordWrite(ecx, 0, edx, ebx);
+  __ ret(0);
+}
+
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label number, non_number, non_string, boolean, probe, miss;
+
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+  // Get the name of the function from the stack; 2 ~ return address, receiver
+  __ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
+
+  // Probe the stub cache.
+  Code::Flags flags =
+      Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+  StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax);
+
+  // If the stub cache probing failed, the receiver might be a value.
+  // For value objects, we use the map of the prototype objects for
+  // the corresponding JSValue for the cache and that is what we need
+  // to probe.
+  //
+  // Check for number.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &number, not_taken);
+  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
+  __ j(not_equal, &non_number, taken);
+  __ bind(&number);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::NUMBER_FUNCTION_INDEX, edx);
+  __ jmp(&probe);
+
+  // Check for string.
+  __ bind(&non_number);
+  __ cmp(ebx, FIRST_NONSTRING_TYPE);
+  __ j(above_equal, &non_string, taken);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::STRING_FUNCTION_INDEX, edx);
+  __ jmp(&probe);
+
+  // Check for boolean.
+  __ bind(&non_string);
+  __ cmp(edx, Factory::true_value());
+  __ j(equal, &boolean, not_taken);
+  __ cmp(edx, Factory::false_value());
+  __ j(not_equal, &miss, taken);
+  __ bind(&boolean);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
+
+  // Probe the stub cache for the value object.
+  __ bind(&probe);
+  StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+static void GenerateNormalHelper(MacroAssembler* masm,
+                                 int argc,
+                                 bool is_global_object,
+                                 Label* miss) {
+  // Search dictionary - put result in register edx.
+  GenerateDictionaryLoad(masm, miss, eax, edx, ebx, ecx);
+
+  // Move the result to register edi and check that it isn't a smi.
+  __ mov(edi, Operand(edx));
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the value is a JavaScript function.
+  __ CmpObjectType(edx, JS_FUNCTION_TYPE, edx);
+  __ j(not_equal, miss, not_taken);
+
+  // Check that the function has been loaded.
+  __ mov(edx, FieldOperand(edi, JSFunction::kMapOffset));
+  __ mov(edx, FieldOperand(edx, Map::kBitField2Offset));
+  __ test(edx, Immediate(1 << Map::kNeedsLoading));
+  __ j(not_zero, miss, not_taken);
+
+  // Patch the receiver with the global proxy if necessary.
+  if (is_global_object) {
+    __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+  }
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+
+  Label miss, global_object, non_global_object;
+
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+  // Get the name of the function from the stack; 2 ~ return address, receiver.
+  __ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the receiver is a valid JS object.
+  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  __ cmp(eax, FIRST_JS_OBJECT_TYPE);
+  __ j(below, &miss, not_taken);
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  // Check for access to global object.
+  __ cmp(eax, JS_GLOBAL_OBJECT_TYPE);
+  __ j(equal, &global_object);
+  __ cmp(eax, JS_BUILTINS_OBJECT_TYPE);
+  __ j(not_equal, &non_global_object);
+
+  // Accessing global object: Load and invoke.
+  __ bind(&global_object);
+  // Check that the global object does not require access checks.
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_equal, &miss, not_taken);
+  GenerateNormalHelper(masm, argc, true, &miss);
+
+  // Accessing non-global object: Check for access to global proxy.
+  Label global_proxy, invoke;
+  __ bind(&non_global_object);
+  __ cmp(eax, JS_GLOBAL_PROXY_TYPE);
+  __ j(equal, &global_proxy, not_taken);
+  // Check that the non-global, non-global-proxy object does not
+  // require access checks.
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_equal, &miss, not_taken);
+  __ bind(&invoke);
+  GenerateNormalHelper(masm, argc, false, &miss);
+
+  // Global object proxy access: Check access rights.
+  __ bind(&global_proxy);
+  __ CheckAccessGlobalProxy(edx, eax, &miss);
+  __ jmp(&invoke);
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::Generate(MacroAssembler* masm,
+                      int argc,
+                      const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+  // Get the name of the function to call from the stack.
+  // 2 ~ receiver, return address.
+  __ mov(ebx, Operand(esp, (argc + 2) * kPointerSize));
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push the receiver and the name of the function.
+  __ push(edx);
+  __ push(ebx);
+
+  // Call the entry.
+  CEntryStub stub(1);
+  __ mov(eax, Immediate(2));
+  __ mov(ebx, Immediate(f));
+  __ CallStub(&stub);
+
+  // Move result to edi and exit the internal frame.
+  __ mov(edi, eax);
+  __ LeaveInternalFrame();
+
+  // Check if the receiver is a global object of some sort.
+  Label invoke, global;
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));  // receiver
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &invoke, not_taken);
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  __ cmp(ecx, JS_GLOBAL_OBJECT_TYPE);
+  __ j(equal, &global);
+  __ cmp(ecx, JS_BUILTINS_OBJECT_TYPE);
+  __ j(not_equal, &invoke);
+
+  // Patch the receiver on the stack.
+  __ bind(&global);
+  __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+  __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ bind(&invoke);
+  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+}
+
+
+// Defined in ic.cc.
+Object* LoadIC_Miss(Arguments args);
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  // Probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
+  StubCache::GenerateProbe(masm, flags, eax, ecx, ebx, edx);
+
+  // Cache miss: Jump to runtime.
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  Label miss, probe, global;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the receiver is a valid JS object.
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(edx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  __ cmp(edx, FIRST_JS_OBJECT_TYPE);
+  __ j(less, &miss, not_taken);
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  // Check for access to global object (unlikely).
+  __ cmp(edx, JS_GLOBAL_PROXY_TYPE);
+  __ j(equal, &global, not_taken);
+
+  // Check for non-global object that requires access check.
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &miss, not_taken);
+
+  // Search the dictionary placing the result in eax.
+  __ bind(&probe);
+  GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx);
+  GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
+  __ ret(0);
+
+  // Global object access: Check access rights.
+  __ bind(&global);
+  __ CheckAccessGlobalProxy(eax, edx, &miss);
+  __ jmp(&probe);
+
+  // Cache miss: Restore receiver from stack and jump to runtime.
+  __ bind(&miss);
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ pop(ebx);
+  __ push(eax);  // receiver
+  __ push(ecx);  // name
+  __ push(ebx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(f, 2, 1);
+}
+
+
+// One byte opcode for test eax,0xXXXXXXXX.
+static const byte kTestEaxByte = 0xA9;
+
+
+void LoadIC::ClearInlinedVersion(Address address) {
+  // Reset the map check of the inlined inobject property load (if
+  // present) to guarantee failure by holding an invalid map (the null
+  // value).  The offset can be patched to anything.
+  PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
+}
+
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {
+  // Insert null as the map to check for to make sure the map check fails
+  // sending control flow to the IC instead of the inlined version.
+  PatchInlinedLoad(address, Heap::null_value());
+}
+
+
+void KeyedStoreIC::ClearInlinedVersion(Address address) {
+  // Insert null as the elements map to check for.  This will make
+  // sure that the elements fast-case map check fails so that control
+  // flows to the IC instead of the inlined version.
+  PatchInlinedStore(address, Heap::null_value());
+}
+
+
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {
+  // Restore the fast-case elements map check so that the inlined
+  // version can be used again.
+  PatchInlinedStore(address, Heap::fixed_array_map());
+}
+
+
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+  // If the instruction following the call is not a test eax, nothing
+  // was inlined.
+  if (*test_instruction_address != kTestEaxByte) return false;
+
+  Address delta_address = test_instruction_address + 1;
+  // The delta to the start of the map check instruction.
+  int delta = *reinterpret_cast<int*>(delta_address);
+
+  // The map address is the last 4 bytes of the 7-byte
+  // operand-immediate compare instruction, so we add 3 to get the
+  // offset to the last 4 bytes.
+  Address map_address = test_instruction_address + delta + 3;
+  *(reinterpret_cast<Object**>(map_address)) = map;
+
+  // The offset is in the last 4 bytes of a six byte
+  // memory-to-register move instruction, so we add 2 to get the
+  // offset to the last 4 bytes.
+  Address offset_address =
+      test_instruction_address + delta + kOffsetToLoadInstruction + 2;
+  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+  return true;
+}
+
+
+static bool PatchInlinedMapCheck(Address address, Object* map) {
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+  // The keyed load has a fast inlined case if the IC call instruction
+  // is immediately followed by a test instruction.
+  if (*test_instruction_address != kTestEaxByte) return false;
+
+  // Fetch the offset from the test instruction to the map cmp
+  // instruction.  This offset is stored in the last 4 bytes of the 5
+  // byte test instruction.
+  Address delta_address = test_instruction_address + 1;
+  int delta = *reinterpret_cast<int*>(delta_address);
+  // Compute the map address.  The map address is in the last 4 bytes
+  // of the 7-byte operand-immediate compare instruction, so we add 3
+  // to the offset to get the map address.
+  Address map_address = test_instruction_address + delta + 3;
+  // Patch the map check.
+  *(reinterpret_cast<Object**>(map_address)) = map;
+  return true;
+}
+
+
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  return PatchInlinedMapCheck(address, map);
+}
+
+
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  return PatchInlinedMapCheck(address, map);
+}
+
+
+// Defined in ic.cc.
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+
+  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ pop(ebx);
+  __ push(ecx);  // receiver
+  __ push(eax);  // name
+  __ push(ebx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(f, 2, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  // Get the receiver from the stack and probe the stub cache.
+  __ mov(edx, Operand(esp, 4));
+  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
+  StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
+
+  // Cache miss: Jump to runtime.
+  Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+}
+
+
+void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : transition map
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  __ pop(ebx);
+  __ push(Operand(esp, 0));  // receiver
+  __ push(ecx);  // transition map
+  __ push(eax);  // value
+  __ push(ebx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(
+      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+}
+
+
+void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  // Move the return address below the arguments.
+  __ pop(ebx);
+  __ push(Operand(esp, 0));
+  __ push(ecx);
+  __ push(eax);
+  __ push(ebx);
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(f, 3, 1);
+}
+
+
+// Defined in ic.cc.
+Object* KeyedStoreIC_Miss(Arguments args);
+
+void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+
+  // Move the return address below the arguments.
+  __ pop(ecx);
+  __ push(Operand(esp, 1 * kPointerSize));
+  __ push(Operand(esp, 1 * kPointerSize));
+  __ push(eax);
+  __ push(ecx);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(f, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : transition map
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+
+  // Move the return address below the arguments.
+  __ pop(ebx);
+  __ push(Operand(esp, 1 * kPointerSize));
+  __ push(ecx);
+  __ push(eax);
+  __ push(ebx);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(
+      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+}
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/jump-target-ia32.cc b/src/ia32/jump-target-ia32.cc
new file mode 100644
index 0000000..c3f2bc1
--- /dev/null
+++ b/src/ia32/jump-target-ia32.cc
@@ -0,0 +1,432 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ ACCESS_MASM(cgen()->masm())
+
+void JumpTarget::DoJump() {
+  ASSERT(cgen()->has_valid_frame());
+  // Live non-frame registers are not allowed at unconditional jumps
+  // because we have no way of invalidating the corresponding results
+  // which are still live in the C++ code.
+  ASSERT(cgen()->HasValidEntryRegisters());
+
+  if (is_bound()) {
+    // Backward jump.  There is an expected frame to merge to.
+    ASSERT(direction_ == BIDIRECTIONAL);
+    cgen()->frame()->PrepareMergeTo(entry_frame_);
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
+    __ jmp(&entry_label_);
+  } else if (entry_frame_ != NULL) {
+    // Forward jump with a preconfigured entry frame.  Assert the
+    // current frame matches the expected one and jump to the block.
+    ASSERT(cgen()->frame()->Equals(entry_frame_));
+    cgen()->DeleteFrame();
+    __ jmp(&entry_label_);
+  } else {
+    // Forward jump.  Remember the current frame and emit a jump to
+    // its merge code.
+    AddReachingFrame(cgen()->frame());
+    RegisterFile empty;
+    cgen()->SetFrame(NULL, &empty);
+    __ jmp(&merge_labels_.last());
+  }
+}
+
+
+void JumpTarget::DoBranch(Condition cc, Hint hint) {
+  ASSERT(cgen() != NULL);
+  ASSERT(cgen()->has_valid_frame());
+
+  if (is_bound()) {
+    ASSERT(direction_ == BIDIRECTIONAL);
+    // Backward branch.  We have an expected frame to merge to on the
+    // backward edge.
+
+    // Swap the current frame for a copy (we do the swapping to get
+    // the off-frame registers off the fall through) to use for the
+    // branch.
+    VirtualFrame* fall_through_frame = cgen()->frame();
+    VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
+    RegisterFile non_frame_registers;
+    cgen()->SetFrame(branch_frame, &non_frame_registers);
+
+    // Check if we can avoid merge code.
+    cgen()->frame()->PrepareMergeTo(entry_frame_);
+    if (cgen()->frame()->Equals(entry_frame_)) {
+      // Branch right in to the block.
+      cgen()->DeleteFrame();
+      __ j(cc, &entry_label_, hint);
+      cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+      return;
+    }
+
+    // Check if we can reuse existing merge code.
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      if (reaching_frames_[i] != NULL &&
+          cgen()->frame()->Equals(reaching_frames_[i])) {
+        // Branch to the merge code.
+        cgen()->DeleteFrame();
+        __ j(cc, &merge_labels_[i], hint);
+        cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+        return;
+      }
+    }
+
+    // To emit the merge code here, we negate the condition and branch
+    // around the merge code on the fall through path.
+    Label original_fall_through;
+    __ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
+    __ jmp(&entry_label_);
+    cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+    __ bind(&original_fall_through);
+
+  } else if (entry_frame_ != NULL) {
+    // Forward branch with a preconfigured entry frame.  Assert the
+    // current frame matches the expected one and branch to the block.
+    ASSERT(cgen()->frame()->Equals(entry_frame_));
+    // Explicitly use the macro assembler instead of __ as forward
+    // branches are expected to be a fixed size (no inserted
+    // coverage-checking instructions please).  This is used in
+    // Reference::GetValue.
+    cgen()->masm()->j(cc, &entry_label_, hint);
+
+  } else {
+    // Forward branch.  A copy of the current frame is remembered and
+    // a branch to the merge code is emitted.  Explicitly use the
+    // macro assembler instead of __ as forward branches are expected
+    // to be a fixed size (no inserted coverage-checking instructions
+    // please).  This is used in Reference::GetValue.
+    AddReachingFrame(new VirtualFrame(cgen()->frame()));
+    cgen()->masm()->j(cc, &merge_labels_.last(), hint);
+  }
+}
+
+
+void JumpTarget::Call() {
+  // Call is used to push the address of the catch block on the stack as
+  // a return address when compiling try/catch and try/finally.  We
+  // fully spill the frame before making the call.  The expected frame
+  // at the label (which should be the only one) is the spilled current
+  // frame plus an in-memory return address.  The "fall-through" frame
+  // at the return site is the spilled current frame.
+  ASSERT(cgen() != NULL);
+  ASSERT(cgen()->has_valid_frame());
+  // There are no non-frame references across the call.
+  ASSERT(cgen()->HasValidEntryRegisters());
+  ASSERT(!is_linked());
+
+  cgen()->frame()->SpillAll();
+  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
+  target_frame->Adjust(1);
+  // We do not expect a call with a preconfigured entry frame.
+  ASSERT(entry_frame_ == NULL);
+  AddReachingFrame(target_frame);
+  __ call(&merge_labels_.last());
+}
+
+
+void JumpTarget::DoBind() {
+  ASSERT(cgen() != NULL);
+  ASSERT(!is_bound());
+
+  // Live non-frame registers are not allowed at the start of a basic
+  // block.
+  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
+
+  // Fast case: the jump target was manually configured with an entry
+  // frame to use.
+  if (entry_frame_ != NULL) {
+    // Assert no reaching frames to deal with.
+    ASSERT(reaching_frames_.is_empty());
+    ASSERT(!cgen()->has_valid_frame());
+
+    RegisterFile empty;
+    if (direction_ == BIDIRECTIONAL) {
+      // Copy the entry frame so the original can be used for a
+      // possible backward jump.
+      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+    } else {
+      // Take ownership of the entry frame.
+      cgen()->SetFrame(entry_frame_, &empty);
+      entry_frame_ = NULL;
+    }
+    __ bind(&entry_label_);
+    return;
+  }
+
+  if (!is_linked()) {
+    ASSERT(cgen()->has_valid_frame());
+    if (direction_ == FORWARD_ONLY) {
+      // Fast case: no forward jumps and no possible backward jumps.
+      // The stack pointer can be floating above the top of the
+      // virtual frame before the bind.  Afterward, it should not.
+      VirtualFrame* frame = cgen()->frame();
+      int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+      if (difference > 0) {
+        frame->stack_pointer_ -= difference;
+        __ add(Operand(esp), Immediate(difference * kPointerSize));
+      }
+    } else {
+      ASSERT(direction_ == BIDIRECTIONAL);
+      // Fast case: no forward jumps, possible backward ones.  Remove
+      // constants and copies above the watermark on the fall-through
+      // frame and use it as the entry frame.
+      cgen()->frame()->MakeMergable();
+      entry_frame_ = new VirtualFrame(cgen()->frame());
+    }
+    __ bind(&entry_label_);
+    return;
+  }
+
+  if (direction_ == FORWARD_ONLY &&
+      !cgen()->has_valid_frame() &&
+      reaching_frames_.length() == 1) {
+    // Fast case: no fall-through, a single forward jump, and no
+    // possible backward jumps.  Pick up the only reaching frame, take
+    // ownership of it, and use it for the block about to be emitted.
+    VirtualFrame* frame = reaching_frames_[0];
+    RegisterFile empty;
+    cgen()->SetFrame(frame, &empty);
+    reaching_frames_[0] = NULL;
+    __ bind(&merge_labels_[0]);
+
+    // The stack pointer can be floating above the top of the
+    // virtual frame before the bind.  Afterward, it should not.
+    int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+    if (difference > 0) {
+      frame->stack_pointer_ -= difference;
+      __ add(Operand(esp), Immediate(difference * kPointerSize));
+    }
+
+    __ bind(&entry_label_);
+    return;
+  }
+
+  // If there is a current frame, record it as the fall-through.  It
+  // is owned by the reaching frames for now.
+  bool had_fall_through = false;
+  if (cgen()->has_valid_frame()) {
+    had_fall_through = true;
+    AddReachingFrame(cgen()->frame());  // Return value ignored.
+    RegisterFile empty;
+    cgen()->SetFrame(NULL, &empty);
+  }
+
+  // Compute the frame to use for entry to the block.
+  ComputeEntryFrame();
+
+  // Some moves required to merge to an expected frame require purely
+  // frame state changes, and do not require any code generation.
+  // Perform those first to increase the possibility of finding equal
+  // frames below.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    if (reaching_frames_[i] != NULL) {
+      reaching_frames_[i]->PrepareMergeTo(entry_frame_);
+    }
+  }
+
+  if (is_linked()) {
+    // There were forward jumps.  Handle merging the reaching frames
+    // to the entry frame.
+
+    // Loop over the (non-null) reaching frames and process any that
+    // need merge code.  Iterate backwards through the list to handle
+    // the fall-through frame first.  Set frames that will be
+    // processed after 'i' to NULL if we want to avoid processing
+    // them.
+    for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
+      VirtualFrame* frame = reaching_frames_[i];
+
+      if (frame != NULL) {
+        // Does the frame (probably) need merge code?
+        if (!frame->Equals(entry_frame_)) {
+          // We could have a valid frame as the fall through to the
+          // binding site or as the fall through from a previous merge
+          // code block.  Jump around the code we are about to
+          // generate.
+          if (cgen()->has_valid_frame()) {
+            cgen()->DeleteFrame();
+            __ jmp(&entry_label_);
+          }
+          // Pick up the frame for this block.  Assume ownership if
+          // there cannot be backward jumps.
+          RegisterFile empty;
+          if (direction_ == BIDIRECTIONAL) {
+            cgen()->SetFrame(new VirtualFrame(frame), &empty);
+          } else {
+            cgen()->SetFrame(frame, &empty);
+            reaching_frames_[i] = NULL;
+          }
+          __ bind(&merge_labels_[i]);
+
+          // Loop over the remaining (non-null) reaching frames,
+          // looking for any that can share merge code with this one.
+          for (int j = 0; j < i; j++) {
+            VirtualFrame* other = reaching_frames_[j];
+            if (other != NULL && other->Equals(cgen()->frame())) {
+              // Set the reaching frame element to null to avoid
+              // processing it later, and then bind its entry label.
+              reaching_frames_[j] = NULL;
+              __ bind(&merge_labels_[j]);
+            }
+          }
+
+          // Emit the merge code.
+          cgen()->frame()->MergeTo(entry_frame_);
+        } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
+          // If this is the fall through frame, and it didn't need
+          // merge code, we need to pick up the frame so we can jump
+          // around subsequent merge blocks if necessary.
+          RegisterFile empty;
+          cgen()->SetFrame(frame, &empty);
+          reaching_frames_[i] = NULL;
+        }
+      }
+    }
+
+    // The code generator may not have a current frame if there was no
+    // fall through and none of the reaching frames needed merging.
+    // In that case, clone the entry frame as the current frame.
+    if (!cgen()->has_valid_frame()) {
+      RegisterFile empty;
+      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+    }
+
+    // There may be unprocessed reaching frames that did not need
+    // merge code.  They will have unbound merge labels.  Bind their
+    // merge labels to be the same as the entry label and deallocate
+    // them.
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      if (!merge_labels_[i].is_bound()) {
+        reaching_frames_[i] = NULL;
+        __ bind(&merge_labels_[i]);
+      }
+    }
+
+    // There are non-NULL reaching frames with bound labels for each
+    // merge block, but only on backward targets.
+  } else {
+    // There were no forward jumps.  There must be a current frame and
+    // this must be a bidirectional target.
+    ASSERT(reaching_frames_.length() == 1);
+    ASSERT(reaching_frames_[0] != NULL);
+    ASSERT(direction_ == BIDIRECTIONAL);
+
+    // Use a copy of the reaching frame so the original can be saved
+    // for possible reuse as a backward merge block.
+    RegisterFile empty;
+    cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
+    __ bind(&merge_labels_[0]);
+    cgen()->frame()->MergeTo(entry_frame_);
+  }
+
+  __ bind(&entry_label_);
+}
+
+
+void BreakTarget::Jump() {
+  // Drop leftover statement state from the frame before merging, without
+  // emitting code.
+  ASSERT(cgen()->has_valid_frame());
+  int count = cgen()->frame()->height() - expected_height_;
+  cgen()->frame()->ForgetElements(count);
+  DoJump();
+}
+
+
+void BreakTarget::Jump(Result* arg) {
+  // Drop leftover statement state from the frame before merging, without
+  // emitting code.
+  ASSERT(cgen()->has_valid_frame());
+  int count = cgen()->frame()->height() - expected_height_;
+  cgen()->frame()->ForgetElements(count);
+  cgen()->frame()->Push(arg);
+  DoJump();
+}
+
+
+void BreakTarget::Bind() {
+#ifdef DEBUG
+  // All the forward-reaching frames should have been adjusted at the
+  // jumps to this target.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i] == NULL ||
+           reaching_frames_[i]->height() == expected_height_);
+  }
+#endif
+  // Drop leftover statement state from the frame before merging, even on
+  // the fall through.  This is so we can bind the return target with state
+  // on the frame.
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    cgen()->frame()->ForgetElements(count);
+  }
+  DoBind();
+}
+
+
+void BreakTarget::Bind(Result* arg) {
+#ifdef DEBUG
+  // All the forward-reaching frames should have been adjusted at the
+  // jumps to this target.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i] == NULL ||
+           reaching_frames_[i]->height() == expected_height_ + 1);
+  }
+#endif
+  // Drop leftover statement state from the frame before merging, even on
+  // the fall through.  This is so we can bind the return target with state
+  // on the frame.
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    cgen()->frame()->ForgetElements(count);
+    cgen()->frame()->Push(arg);
+  }
+  DoBind();
+  *arg = cgen()->frame()->Pop();
+}
+
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
new file mode 100644
index 0000000..e83bb92
--- /dev/null
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -0,0 +1,1192 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// MacroAssembler implementation.
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+    : Assembler(buffer, size),
+      unresolved_(0),
+      generating_stub_(false),
+      allow_stub_calls_(true),
+      code_object_(Heap::undefined_value()) {
+}
+
+
+static void RecordWriteHelper(MacroAssembler* masm,
+                              Register object,
+                              Register addr,
+                              Register scratch) {
+  Label fast;
+
+  // Compute the page start address from the heap object pointer, and reuse
+  // the 'object' register for it.
+  masm->and_(object, ~Page::kPageAlignmentMask);
+  Register page_start = object;
+
+  // Compute the bit addr in the remembered set/index of the pointer in the
+  // page. Reuse 'addr' as pointer_offset.
+  masm->sub(addr, Operand(page_start));
+  masm->shr(addr, kObjectAlignmentBits);
+  Register pointer_offset = addr;
+
+  // If the bit offset lies beyond the normal remembered set range, it is in
+  // the extra remembered set area of a large object.
+  masm->cmp(pointer_offset, Page::kPageSize / kPointerSize);
+  masm->j(less, &fast);
+
+  // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
+  // extra remembered set after the large object.
+
+  // Find the length of the large object (FixedArray).
+  masm->mov(scratch, Operand(page_start, Page::kObjectStartOffset
+                                         + FixedArray::kLengthOffset));
+  Register array_length = scratch;
+
+  // Extra remembered set starts right after the large object (a FixedArray), at
+  //   page_start + kObjectStartOffset + objectSize
+  // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
+  // Add the delta between the end of the normal RSet and the start of the
+  // extra RSet to 'page_start', so that addressing the bit using
+  // 'pointer_offset' hits the extra RSet words.
+  masm->lea(page_start,
+            Operand(page_start, array_length, times_pointer_size,
+                    Page::kObjectStartOffset + FixedArray::kHeaderSize
+                        - Page::kRSetEndOffset));
+
+  // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+  // to limit code size. We should probably evaluate this decision by
+  // measuring the performance of an equivalent implementation using
+  // "simpler" instructions
+  masm->bind(&fast);
+  masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
+}
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+  RecordWriteStub(Register object, Register addr, Register scratch)
+      : object_(object), addr_(addr), scratch_(scratch) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Register object_;
+  Register addr_;
+  Register scratch_;
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+           object_.code(), addr_.code(), scratch_.code());
+  }
+#endif
+
+  // Minor key encoding in 12 bits of three registers (object, address and
+  // scratch) OOOOAAAASSSS.
+  class ScratchBits: public BitField<uint32_t, 0, 4> {};
+  class AddressBits: public BitField<uint32_t, 4, 4> {};
+  class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    // Encode the registers.
+    return ObjectBits::encode(object_.code()) |
+           AddressBits::encode(addr_.code()) |
+           ScratchBits::encode(scratch_.code());
+  }
+};
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  RecordWriteHelper(masm, object_, addr_, scratch_);
+  masm->ret(0);
+}
+
+
+// Set the remembered set bit for [object+offset].
+// object is the object being stored into, value is the object being stored.
+// If offset is zero, then the scratch register contains the array index into
+// the elements array represented as a Smi.
+// All registers are clobbered by the operation.
+void MacroAssembler::RecordWrite(Register object, int offset,
+                                 Register value, Register scratch) {
+  // First, check if a remembered set write is even needed. The tests below
+  // catch stores of Smis and stores into young gen (which does not have space
+  // for the remembered set bits.
+  Label done;
+
+  // Skip barrier if writing a smi.
+  ASSERT_EQ(0, kSmiTag);
+  test(value, Immediate(kSmiTagMask));
+  j(zero, &done);
+
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    mov(value, Operand(object));
+    and_(value, Heap::NewSpaceMask());
+    cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
+    j(equal, &done);
+  } else {
+    int32_t new_space_start = reinterpret_cast<int32_t>(
+        ExternalReference::new_space_start().address());
+    lea(value, Operand(object, -new_space_start));
+    and_(value, Heap::NewSpaceMask());
+    j(equal, &done);
+  }
+
+  if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
+    // Compute the bit offset in the remembered set, leave it in 'value'.
+    lea(value, Operand(object, offset));
+    and_(value, Page::kPageAlignmentMask);
+    shr(value, kPointerSizeLog2);
+
+    // Compute the page address from the heap object pointer, leave it in
+    // 'object'.
+    and_(object, ~Page::kPageAlignmentMask);
+
+    // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+    // to limit code size. We should probably evaluate this decision by
+    // measuring the performance of an equivalent implementation using
+    // "simpler" instructions
+    bts(Operand(object, Page::kRSetOffset), value);
+  } else {
+    Register dst = scratch;
+    if (offset != 0) {
+      lea(dst, Operand(object, offset));
+    } else {
+      // array access: calculate the destination address in the same manner as
+      // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
+      // into an array of words.
+      ASSERT_EQ(1, kSmiTagSize);
+      ASSERT_EQ(0, kSmiTag);
+      lea(dst, Operand(object, dst, times_half_pointer_size,
+                       FixedArray::kHeaderSize - kHeapObjectTag));
+    }
+    // If we are already generating a shared stub, not inlining the
+    // record write code isn't going to save us any memory.
+    if (generating_stub()) {
+      RecordWriteHelper(this, object, dst, value);
+    } else {
+      RecordWriteStub stub(object, dst, value);
+      CallStub(&stub);
+    }
+  }
+
+  bind(&done);
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::SaveRegistersToMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of registers to memory location.
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      Register reg = { r };
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      mov(Operand::StaticVariable(reg_addr), reg);
+    }
+  }
+}
+
+
+void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of memory location to registers.
+  for (int i = kNumJSCallerSaved; --i >= 0;) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      Register reg = { r };
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      mov(reg, Operand::StaticVariable(reg_addr));
+    }
+  }
+}
+
+
+void MacroAssembler::PushRegistersFromMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Push the content of the memory location to the stack.
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      push(Operand::StaticVariable(reg_addr));
+    }
+  }
+}
+
+
+void MacroAssembler::PopRegistersToMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Pop the content from the stack to the memory location.
+  for (int i = kNumJSCallerSaved; --i >= 0;) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      pop(Operand::StaticVariable(reg_addr));
+    }
+  }
+}
+
+
+void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
+                                                    Register scratch,
+                                                    RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of the stack to the memory location and adjust base.
+  for (int i = kNumJSCallerSaved; --i >= 0;) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      mov(scratch, Operand(base, 0));
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      mov(Operand::StaticVariable(reg_addr), scratch);
+      lea(base, Operand(base, kPointerSize));
+    }
+  }
+}
+#endif
+
+void MacroAssembler::Set(Register dst, const Immediate& x) {
+  if (x.is_zero()) {
+    xor_(dst, Operand(dst));  // shorter than mov
+  } else {
+    mov(dst, x);
+  }
+}
+
+
+void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
+  mov(dst, x);
+}
+
+
+void MacroAssembler::CmpObjectType(Register heap_object,
+                                   InstanceType type,
+                                   Register map) {
+  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+  CmpInstanceType(map, type);
+}
+
+
+void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
+  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
+       static_cast<int8_t>(type));
+}
+
+
+void MacroAssembler::FCmp() {
+  fucompp();
+  push(eax);
+  fnstsw_ax();
+  sahf();
+  pop(eax);
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+  push(ebp);
+  mov(ebp, Operand(esp));
+  push(esi);
+  push(Immediate(Smi::FromInt(type)));
+  push(Immediate(CodeObject()));
+  if (FLAG_debug_code) {
+    cmp(Operand(esp, 0), Immediate(Factory::undefined_value()));
+    Check(not_equal, "code object not properly patched");
+  }
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+  if (FLAG_debug_code) {
+    cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+        Immediate(Smi::FromInt(type)));
+    Check(equal, "stack frame types must match");
+  }
+  leave();
+}
+
+
+void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
+  ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
+
+  // Setup the frame structure on the stack.
+  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
+  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
+  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
+  push(ebp);
+  mov(ebp, Operand(esp));
+
+  // Reserve room for entry stack pointer and push the debug marker.
+  ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
+  push(Immediate(0));  // saved entry sp, patched before call
+  push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+
+  // Save the frame pointer and the context in top.
+  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+  ExternalReference context_address(Top::k_context_address);
+  mov(Operand::StaticVariable(c_entry_fp_address), ebp);
+  mov(Operand::StaticVariable(context_address), esi);
+
+  // Setup argc and argv in callee-saved registers.
+  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+  mov(edi, Operand(eax));
+  lea(esi, Operand(ebp, eax, times_4, offset));
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Save the state of all registers to the stack from the memory
+  // location. This is needed to allow nested break points.
+  if (type == StackFrame::EXIT_DEBUG) {
+    // TODO(1243899): This should be symmetric to
+    // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
+    // correct here, but computed for the other call. Very error
+    // prone! FIX THIS.  Actually there are deeper problems with
+    // register saving than this asymmetry (see the bug report
+    // associated with this issue).
+    PushRegistersFromMemory(kJSCallerSaved);
+  }
+#endif
+
+  // Reserve space for two arguments: argc and argv.
+  sub(Operand(esp), Immediate(2 * kPointerSize));
+
+  // Get the required frame alignment for the OS.
+  static const int kFrameAlignment = OS::ActivationFrameAlignment();
+  if (kFrameAlignment > 0) {
+    ASSERT(IsPowerOf2(kFrameAlignment));
+    and_(esp, -kFrameAlignment);
+  }
+
+  // Patch the saved entry sp.
+  mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
+}
+
+
+void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Restore the memory copy of the registers by digging them out from
+  // the stack. This is needed to allow nested break points.
+  if (type == StackFrame::EXIT_DEBUG) {
+    // It's okay to clobber register ebx below because we don't need
+    // the function pointer after this.
+    const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
+    int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+    lea(ebx, Operand(ebp, kOffset));
+    CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
+  }
+#endif
+
+  // Get the return address from the stack and restore the frame pointer.
+  mov(ecx, Operand(ebp, 1 * kPointerSize));
+  mov(ebp, Operand(ebp, 0 * kPointerSize));
+
+  // Pop the arguments and the receiver from the caller stack.
+  lea(esp, Operand(esi, 1 * kPointerSize));
+
+  // Restore current context from top and clear it in debug mode.
+  ExternalReference context_address(Top::k_context_address);
+  mov(esi, Operand::StaticVariable(context_address));
+#ifdef DEBUG
+  mov(Operand::StaticVariable(context_address), Immediate(0));
+#endif
+
+  // Push the return address to get ready to return.
+  push(ecx);
+
+  // Clear the top frame.
+  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+  mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
+}
+
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+                                    HandlerType type) {
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  // The pc (return address) is already on TOS.
+  if (try_location == IN_JAVASCRIPT) {
+    if (type == TRY_CATCH_HANDLER) {
+      push(Immediate(StackHandler::TRY_CATCH));
+    } else {
+      push(Immediate(StackHandler::TRY_FINALLY));
+    }
+    push(ebp);
+  } else {
+    ASSERT(try_location == IN_JS_ENTRY);
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for ebp. We expect the code throwing an exception to check ebp
+    // before dereferencing it to restore the context.
+    push(Immediate(StackHandler::ENTRY));
+    push(Immediate(0));  // NULL frame pointer.
+  }
+  // Save the current handler as the next handler.
+  push(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+  // Link this handler as the new current one.
+  mov(Operand::StaticVariable(ExternalReference(Top::k_handler_address)), esp);
+}
+
+
+Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
+                                   JSObject* holder, Register holder_reg,
+                                   Register scratch,
+                                   Label* miss) {
+  // Make sure there's no overlap between scratch and the other
+  // registers.
+  ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 1;
+
+  // Check the maps in the prototype chain.
+  // Traverse the prototype chain from the object and do map checks.
+  while (object != holder) {
+    depth++;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+    JSObject* prototype = JSObject::cast(object->GetPrototype());
+    if (Heap::InNewSpace(prototype)) {
+      // Get the map of the current object.
+      mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+      cmp(Operand(scratch), Immediate(Handle<Map>(object->map())));
+      // Branch on the result of the map check.
+      j(not_equal, miss, not_taken);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (object->IsJSGlobalProxy()) {
+        CheckAccessGlobalProxy(reg, scratch, miss);
+
+        // Restore scratch register to be the map of the object.
+        // We load the prototype from the map in the scratch register.
+        mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+      }
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      reg = holder_reg;  // from now the object is in holder_reg
+      mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+
+    } else {
+      // Check the map of the current object.
+      cmp(FieldOperand(reg, HeapObject::kMapOffset),
+          Immediate(Handle<Map>(object->map())));
+      // Branch on the result of the map check.
+      j(not_equal, miss, not_taken);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (object->IsJSGlobalProxy()) {
+        CheckAccessGlobalProxy(reg, scratch, miss);
+      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // from now the object is in holder_reg
+      mov(reg, Handle<JSObject>(prototype));
+    }
+
+    // Go to the next object in the prototype chain.
+    object = prototype;
+  }
+
+  // Check the holder map.
+  cmp(FieldOperand(reg, HeapObject::kMapOffset),
+      Immediate(Handle<Map>(holder->map())));
+  j(not_equal, miss, not_taken);
+
+  // Log the check depth.
+  LOG(IntEvent("check-maps-depth", depth));
+
+  // Perform security check for access to the global object and return
+  // the holder register.
+  ASSERT(object == holder);
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+  if (object->IsJSGlobalProxy()) {
+    CheckAccessGlobalProxy(reg, scratch, miss);
+  }
+  return reg;
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+                                            Register scratch,
+                                            Label* miss) {
+  Label same_contexts;
+
+  ASSERT(!holder_reg.is(scratch));
+
+  // Load current lexical context from the stack frame.
+  mov(scratch, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  // When generating debug code, make sure the lexical context is set.
+  if (FLAG_debug_code) {
+    cmp(Operand(scratch), Immediate(0));
+    Check(not_equal, "we should not have an empty lexical context");
+  }
+  // Load the global context of the current context.
+  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  mov(scratch, FieldOperand(scratch, offset));
+  mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    push(scratch);
+    // Read the first word and compare to global_context_map.
+    mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+    cmp(scratch, Factory::global_context_map());
+    Check(equal, "JSGlobalObject::global_context should be a global context.");
+    pop(scratch);
+  }
+
+  // Check if both contexts are the same.
+  cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+  j(equal, &same_contexts, taken);
+
+  // Compare security tokens, save holder_reg on the stack so we can use it
+  // as a temporary register.
+  //
+  // TODO(119): avoid push(holder_reg)/pop(holder_reg)
+  push(holder_reg);
+  // Check that the security token in the calling global object is
+  // compatible with the security token in the receiving global
+  // object.
+  mov(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    cmp(holder_reg, Factory::null_value());
+    Check(not_equal, "JSGlobalProxy::context() should not be null.");
+
+    push(holder_reg);
+    // Read the first word and compare to global_context_map(),
+    mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
+    cmp(holder_reg, Factory::global_context_map());
+    Check(equal, "JSGlobalObject::global_context should be a global context.");
+    pop(holder_reg);
+  }
+
+  int token_offset = Context::kHeaderSize +
+                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
+  mov(scratch, FieldOperand(scratch, token_offset));
+  cmp(scratch, FieldOperand(holder_reg, token_offset));
+  pop(holder_reg);
+  j(not_equal, miss, not_taken);
+
+  bind(&same_contexts);
+}
+
+
+void MacroAssembler::LoadAllocationTopHelper(Register result,
+                                             Register result_end,
+                                             Register scratch,
+                                             AllocationFlags flags) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+
+  // Just return if allocation top is already known.
+  if ((flags & RESULT_CONTAINS_TOP) != 0) {
+    // No use of scratch if allocation top is provided.
+    ASSERT(scratch.is(no_reg));
+#ifdef DEBUG
+    // Assert that result actually contains top on entry.
+    cmp(result, Operand::StaticVariable(new_space_allocation_top));
+    Check(equal, "Unexpected allocation top");
+#endif
+    return;
+  }
+
+  // Move address of new object to result. Use scratch register if available.
+  if (scratch.is(no_reg)) {
+    mov(result, Operand::StaticVariable(new_space_allocation_top));
+  } else {
+    ASSERT(!scratch.is(result_end));
+    mov(Operand(scratch), Immediate(new_space_allocation_top));
+    mov(result, Operand(scratch, 0));
+  }
+}
+
+
+void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
+                                               Register scratch) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+
+  // Update new top. Use scratch if available.
+  if (scratch.is(no_reg)) {
+    mov(Operand::StaticVariable(new_space_allocation_top), result_end);
+  } else {
+    mov(Operand(scratch, 0), result_end);
+  }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(int object_size,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  ASSERT(!result.is(result_end));
+
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, result_end, scratch, flags);
+
+  // Calculate new top and bail out if new space is exhausted.
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  lea(result_end, Operand(result, object_size));
+  cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+  j(above, gc_required, not_taken);
+
+  // Update allocation top.
+  UpdateAllocationTopHelper(result_end, scratch);
+
+  // Tag result if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    or_(Operand(result), Immediate(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(int header_size,
+                                        ScaleFactor element_size,
+                                        Register element_count,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  ASSERT(!result.is(result_end));
+
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, result_end, scratch, flags);
+
+  // Calculate new top and bail out if new space is exhausted.
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  lea(result_end, Operand(result, element_count, element_size, header_size));
+  cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+  j(above, gc_required);
+
+  // Update allocation top.
+  UpdateAllocationTopHelper(result_end, scratch);
+
+  // Tag result if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    or_(Operand(result), Immediate(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  ASSERT(!result.is(result_end));
+
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, result_end, scratch, flags);
+
+  // Calculate new top and bail out if new space is exhausted.
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  if (!object_size.is(result_end)) {
+    mov(result_end, object_size);
+  }
+  add(result_end, Operand(result));
+  cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+  j(above, gc_required, not_taken);
+
+  // Update allocation top.
+  UpdateAllocationTopHelper(result_end, scratch);
+
+  // Tag result if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    or_(Operand(result), Immediate(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+
+  // Make sure the object has no tag before resetting top.
+  and_(Operand(object), Immediate(~kHeapObjectTagMask));
+#ifdef DEBUG
+  cmp(object, Operand::StaticVariable(new_space_allocation_top));
+  Check(below, "Undo allocation of non allocated memory");
+#endif
+  mov(Operand::StaticVariable(new_space_allocation_top), object);
+}
+
+
+void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
+                                      Register result,
+                                      Register op,
+                                      JumpTarget* then_target) {
+  JumpTarget ok;
+  test(result, Operand(result));
+  ok.Branch(not_zero, taken);
+  test(op, Operand(op));
+  then_target->Branch(sign, not_taken);
+  ok.Bind();
+}
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+                                      Register op,
+                                      Label* then_label) {
+  Label ok;
+  test(result, Operand(result));
+  j(not_zero, &ok, taken);
+  test(op, Operand(op));
+  j(sign, then_label, not_taken);
+  bind(&ok);
+}
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+                                      Register op1,
+                                      Register op2,
+                                      Register scratch,
+                                      Label* then_label) {
+  Label ok;
+  test(result, Operand(result));
+  j(not_zero, &ok, taken);
+  mov(scratch, Operand(op1));
+  or_(scratch, Operand(op2));
+  j(sign, then_label, not_taken);
+  bind(&ok);
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+                                             Register result,
+                                             Register scratch,
+                                             Label* miss) {
+  // Check that the receiver isn't a smi.
+  test(function, Immediate(kSmiTagMask));
+  j(zero, miss, not_taken);
+
+  // Check that the function really is a function.
+  CmpObjectType(function, JS_FUNCTION_TYPE, result);
+  j(not_equal, miss, not_taken);
+
+  // Make sure that the function has an instance prototype.
+  Label non_instance;
+  movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
+  test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
+  j(not_zero, &non_instance, not_taken);
+
+  // Get the prototype or initial map from the function.
+  mov(result,
+      FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // If the prototype or initial map is the hole, don't return it and
+  // simply miss the cache instead. This will allow us to allocate a
+  // prototype object on-demand in the runtime system.
+  cmp(Operand(result), Immediate(Factory::the_hole_value()));
+  j(equal, miss, not_taken);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  CmpObjectType(result, MAP_TYPE, scratch);
+  j(not_equal, &done);
+
+  // Get the prototype from the initial map.
+  mov(result, FieldOperand(result, Map::kPrototypeOffset));
+  jmp(&done);
+
+  // Non-instance prototype: Fetch prototype from constructor field
+  // in initial map.
+  bind(&non_instance);
+  mov(result, FieldOperand(result, Map::kConstructorOffset));
+
+  // All done.
+  bind(&done);
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
+  call(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+  ASSERT(argc >= 1 && generating_stub());
+  ret((argc - 1) * kPointerSize);
+}
+
+
+void MacroAssembler::IllegalOperation(int num_arguments) {
+  if (num_arguments > 0) {
+    add(Operand(esp), Immediate(num_arguments * kPointerSize));
+  }
+  mov(eax, Immediate(Factory::undefined_value()));
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
+  CallRuntime(Runtime::FunctionForId(id), num_arguments);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+  // If the expected number of arguments of the runtime function is
+  // constant, we check that the actual number of arguments match the
+  // expectation.
+  if (f->nargs >= 0 && f->nargs != num_arguments) {
+    IllegalOperation(num_arguments);
+    return;
+  }
+
+  Runtime::FunctionId function_id =
+      static_cast<Runtime::FunctionId>(f->stub_id);
+  RuntimeStub stub(function_id, num_arguments);
+  CallStub(&stub);
+}
+
+
+void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
+                                     int num_arguments,
+                                     int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  Set(eax, Immediate(num_arguments));
+  JumpToRuntime(ext);
+}
+
+
+void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
+  // Set the entry point and jump to the C entry runtime stub.
+  mov(ebx, Immediate(ext));
+  CEntryStub ces(1);
+  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    const Operand& code_operand,
+                                    Label* done,
+                                    InvokeFlag flag) {
+  bool definitely_matches = false;
+  Label invoke;
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      mov(eax, actual.immediate());
+      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+      if (expected.immediate() == sentinel) {
+        // Don't worry about adapting arguments for builtins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        mov(ebx, expected.immediate());
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      // Expected is in register, actual is immediate. This is the
+      // case when we invoke function values without going through the
+      // IC mechanism.
+      cmp(expected.reg(), actual.immediate());
+      j(equal, &invoke);
+      ASSERT(expected.reg().is(ebx));
+      mov(eax, actual.immediate());
+    } else if (!expected.reg().is(actual.reg())) {
+      // Both expected and actual are in (different) registers. This
+      // is the case when we invoke functions using call and apply.
+      cmp(expected.reg(), Operand(actual.reg()));
+      j(equal, &invoke);
+      ASSERT(actual.reg().is(eax));
+      ASSERT(expected.reg().is(ebx));
+    }
+  }
+
+  if (!definitely_matches) {
+    Handle<Code> adaptor =
+        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+    if (!code_constant.is_null()) {
+      mov(edx, Immediate(code_constant));
+      add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+    } else if (!code_operand.is_reg(edx)) {
+      mov(edx, code_operand);
+    }
+
+    if (flag == CALL_FUNCTION) {
+      call(adaptor, RelocInfo::CODE_TARGET);
+      jmp(done);
+    } else {
+      jmp(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&invoke);
+  }
+}
+
+
+void MacroAssembler::InvokeCode(const Operand& code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                InvokeFlag flag) {
+  Label done;
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    call(code);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    jmp(code);
+  }
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeCode(Handle<Code> code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                RelocInfo::Mode rmode,
+                                InvokeFlag flag) {
+  Label done;
+  Operand dummy(eax);
+  InvokePrologue(expected, actual, code, dummy, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    call(code, rmode);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    jmp(code, rmode);
+  }
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register fun,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag) {
+  ASSERT(fun.is(edi));
+  mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+  mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+  lea(edx, FieldOperand(edx, Code::kHeaderSize));
+
+  ParameterCount expected(ebx);
+  InvokeCode(Operand(edx), expected, actual, flag);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+  bool resolved;
+  Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+  // Calls are not allowed in some stubs.
+  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+
+  // Rely on the assertion to check that the number of provided
+  // arguments match the expected number of arguments. Fake a
+  // parameter count to avoid emitting code to do the check.
+  ParameterCount expected(0);
+  InvokeCode(Handle<Code>(code), expected, expected,
+             RelocInfo::CODE_TARGET, flag);
+
+  const char* name = Builtins::GetName(id);
+  int argc = Builtins::GetArgumentsCount(id);
+
+  if (!resolved) {
+    uint32_t flags =
+        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
+        Bootstrapper::FixupFlagsUseCodeObject::encode(false);
+    Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
+    unresolved_.Add(entry);
+  }
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  bool resolved;
+  Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+  const char* name = Builtins::GetName(id);
+  int argc = Builtins::GetArgumentsCount(id);
+
+  mov(Operand(target), Immediate(code));
+  if (!resolved) {
+    uint32_t flags =
+        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+        Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
+        Bootstrapper::FixupFlagsUseCodeObject::encode(true);
+    Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
+    unresolved_.Add(entry);
+  }
+  add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
+                                            bool* resolved) {
+  // Move the builtin function into the temporary function slot by
+  // reading it from the builtins object. NOTE: We should be able to
+  // reduce this to two instructions by putting the function table in
+  // the global object instead of the "builtins" object and by using a
+  // real register for the function.
+  mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  mov(edx, FieldOperand(edx, GlobalObject::kBuiltinsOffset));
+  int builtins_offset =
+      JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
+  mov(edi, FieldOperand(edx, builtins_offset));
+
+
+  return Builtins::GetCode(id, resolved);
+}
+
+
+void MacroAssembler::Ret() {
+  ret(0);
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
+  }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    Operand operand = Operand::StaticVariable(ExternalReference(counter));
+    if (value == 1) {
+      inc(operand);
+    } else {
+      add(operand, Immediate(value));
+    }
+  }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    Operand operand = Operand::StaticVariable(ExternalReference(counter));
+    if (value == 1) {
+      dec(operand);
+    } else {
+      sub(operand, Immediate(value));
+    }
+  }
+}
+
+
+void MacroAssembler::Assert(Condition cc, const char* msg) {
+  if (FLAG_debug_code) Check(cc, msg);
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg) {
+  Label L;
+  j(cc, &L, taken);
+  Abort(msg);
+  // will not return here
+  bind(&L);
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+  // We want to pass the msg string like a smi to avoid GC
+  // problems, however msg is not guaranteed to be aligned
+  // properly. Instead, we pass an aligned pointer that is
+  // a proper v8 smi, but also pass the alignment difference
+  // from the real pointer as a smi.
+  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+  if (msg != NULL) {
+    RecordComment("Abort message: ");
+    RecordComment(msg);
+  }
+#endif
+  push(eax);
+  push(Immediate(p0));
+  push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
+  CallRuntime(Runtime::kAbort, 2);
+  // will not return here
+}
+
+
+CodePatcher::CodePatcher(byte* address, int size)
+    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+  // Create a new macro assembler pointing to the address of the code to patch.
+  // The size is adjusted with kGap on order for the assembler to generate size
+  // bytes of instructions without failing with buffer size constraints.
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+  // Indicate that code has changed.
+  CPU::FlushICache(address_, size_);
+
+  // Check that the code was patched as expected.
+  ASSERT(masm_.pc_ == address_ + size_);
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
new file mode 100644
index 0000000..ed72c96
--- /dev/null
+++ b/src/ia32/macro-assembler-ia32.h
@@ -0,0 +1,411 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
+#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
+
+#include "assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declaration.
+class JumpTarget;
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+  MacroAssembler(void* buffer, int size);
+
+  // ---------------------------------------------------------------------------
+  // GC Support
+
+  // Set the remembered set bit for [object+offset].
+  // object is the object being stored into, value is the object being stored.
+  // If offset is zero, then the scratch register contains the array index into
+  // the elements array represented as a Smi.
+  // All registers are clobbered by the operation.
+  void RecordWrite(Register object,
+                   int offset,
+                   Register value,
+                   Register scratch);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // ---------------------------------------------------------------------------
+  // Debugger Support
+
+  void SaveRegistersToMemory(RegList regs);
+  void RestoreRegistersFromMemory(RegList regs);
+  void PushRegistersFromMemory(RegList regs);
+  void PopRegistersToMemory(RegList regs);
+  void CopyRegistersFromStackToMemory(Register base,
+                                      Register scratch,
+                                      RegList regs);
+#endif
+
+  // ---------------------------------------------------------------------------
+  // Activation frames
+
+  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
+  // Enter specific kind of exit frame; either EXIT or
+  // EXIT_DEBUG. Expects the number of arguments in register eax and
+  // sets up the number of arguments in register edi and the pointer
+  // to the first argument in register esi.
+  void EnterExitFrame(StackFrame::Type type);
+
+  // Leave the current exit frame. Expects the return value in
+  // register eax:edx (untouched) and the pointer to the first
+  // argument in register esi.
+  void LeaveExitFrame(StackFrame::Type type);
+
+
+  // ---------------------------------------------------------------------------
+  // JavaScript invokes
+
+  // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(const Operand& code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  InvokeFlag flag);
+
+  void InvokeCode(Handle<Code> code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  RelocInfo::Mode rmode,
+                  InvokeFlag flag);
+
+  // Invoke the JavaScript function in the given register. Changes the
+  // current context to the context in the function before invoking.
+  void InvokeFunction(Register function,
+                      const ParameterCount& actual,
+                      InvokeFlag flag);
+
+  // Invoke specified builtin JavaScript function. Adds an entry to
+  // the unresolved list if the name does not resolve.
+  void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+
+  // Store the code object for the given builtin in the target register.
+  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+  // Expression support
+  void Set(Register dst, const Immediate& x);
+  void Set(const Operand& dst, const Immediate& x);
+
+  // Compare object type for heap object.
+  // Incoming register is heap_object and outgoing register is map.
+  void CmpObjectType(Register heap_object, InstanceType type, Register map);
+
+  // Compare instance type for map.
+  void CmpInstanceType(Register map, InstanceType type);
+
+  // FCmp is similar to integer cmp, but requires unsigned
+  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
+  void FCmp();
+
+  // ---------------------------------------------------------------------------
+  // Exception handling
+
+  // Push a new try handler and link into try handler chain.  The return
+  // address must be pushed before calling this helper.
+  void PushTryHandler(CodeLocation try_location, HandlerType type);
+
+
+  // ---------------------------------------------------------------------------
+  // Inline caching support
+
+  // Generates code that verifies that the maps of objects in the
+  // prototype chain of object hasn't changed since the code was
+  // generated and branches to the miss label if any map has. If
+  // necessary the function also generates code for security check
+  // in case of global object holders. The scratch and holder
+  // registers are always clobbered, but the object register is only
+  // clobbered if it the same as the holder register. The function
+  // returns a register containing the holder - either object_reg or
+  // holder_reg.
+  Register CheckMaps(JSObject* object, Register object_reg,
+                     JSObject* holder, Register holder_reg,
+                     Register scratch, Label* miss);
+
+  // Generate code for checking access rights - used for security checks
+  // on access to global objects across environments. The holder register
+  // is left untouched, but the scratch register is clobbered.
+  void CheckAccessGlobalProxy(Register holder_reg,
+                              Register scratch,
+                              Label* miss);
+
+
+  // ---------------------------------------------------------------------------
+  // Allocation support
+
+  // Allocate an object in new space. If the new space is exhausted control
+  // continues at the gc_required label. The allocated object is returned in
+  // result and end of the new object is returned in result_end. The register
+  // scratch can be passed as no_reg in which case an additional object
+  // reference will be added to the reloc info. The returned pointers in result
+  // and result_end have not yet been tagged as heap objects. If
+  // result_contains_top_on_entry is true the contnt of result is known to be
+  // the allocation top on entry (could be result_end from a previous call to
+  // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
+  // should be no_reg as it is never used.
+  void AllocateInNewSpace(int object_size,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
+
+  void AllocateInNewSpace(int header_size,
+                          ScaleFactor element_size,
+                          Register element_count,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
+
+  void AllocateInNewSpace(Register object_size,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
+
+  // Undo allocation in new space. The object passed and objects allocated after
+  // it will no longer be allocated. Make sure that no pointers are left to the
+  // object(s) no longer allocated as they would be invalid when allocation is
+  // un-done.
+  void UndoAllocationInNewSpace(Register object);
+
+  // ---------------------------------------------------------------------------
+  // Support functions.
+
+  // Check if result is zero and op is negative.
+  void NegativeZeroTest(Register result, Register op, Label* then_label);
+
+  // Check if result is zero and op is negative in code using jump targets.
+  void NegativeZeroTest(CodeGenerator* cgen,
+                        Register result,
+                        Register op,
+                        JumpTarget* then_target);
+
+  // Check if result is zero and any of op1 and op2 are negative.
+  // Register scratch is destroyed, and it must be different from op2.
+  void NegativeZeroTest(Register result, Register op1, Register op2,
+                        Register scratch, Label* then_label);
+
+  // Try to get function prototype of a function and puts the value in
+  // the result register. Checks that the function really is a
+  // function and jumps to the miss label if the fast checks fail. The
+  // function register will be untouched; the other registers may be
+  // clobbered.
+  void TryGetFunctionPrototype(Register function,
+                               Register result,
+                               Register scratch,
+                               Label* miss);
+
+  // Generates code for reporting that an illegal operation has
+  // occurred.
+  void IllegalOperation(int num_arguments);
+
+  // ---------------------------------------------------------------------------
+  // Runtime calls
+
+  // Call a code stub.
+  void CallStub(CodeStub* stub);
+
+  // Return from a code stub after popping its arguments.
+  void StubReturn(int argc);
+
+  // Call a runtime routine.
+  // Eventually this should be used for all C calls.
+  void CallRuntime(Runtime::Function* f, int num_arguments);
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId id, int num_arguments);
+
+  // Tail call of a runtime routine (jump).
+  // Like JumpToRuntime, but also takes care of passing the number
+  // of arguments.
+  void TailCallRuntime(const ExternalReference& ext,
+                       int num_arguments,
+                       int result_size);
+
+  // Jump to a runtime routine.
+  void JumpToRuntime(const ExternalReference& ext);
+
+
+  // ---------------------------------------------------------------------------
+  // Utilities
+
+  void Ret();
+
+  struct Unresolved {
+    int pc;
+    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
+    const char* name;
+  };
+  List<Unresolved>* unresolved() { return &unresolved_; }
+
+  Handle<Object> CodeObject() { return code_object_; }
+
+
+  // ---------------------------------------------------------------------------
+  // StatsCounter support
+
+  void SetCounter(StatsCounter* counter, int value);
+  void IncrementCounter(StatsCounter* counter, int value);
+  void DecrementCounter(StatsCounter* counter, int value);
+
+
+  // ---------------------------------------------------------------------------
+  // Debugging
+
+  // Calls Abort(msg) if the condition cc is not satisfied.
+  // Use --debug_code to enable.
+  void Assert(Condition cc, const char* msg);
+
+  // Like Assert(), but always enabled.
+  void Check(Condition cc, const char* msg);
+
+  // Print a message to stdout and abort execution.
+  void Abort(const char* msg);
+
+  // Verify restrictions about code generated in stubs.
+  void set_generating_stub(bool value) { generating_stub_ = value; }
+  bool generating_stub() { return generating_stub_; }
+  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
+  bool allow_stub_calls() { return allow_stub_calls_; }
+
+ private:
+  List<Unresolved> unresolved_;
+  bool generating_stub_;
+  bool allow_stub_calls_;
+  Handle<Object> code_object_;  // This handle will be patched with the
+                                // code object on installation.
+
+  // Helper functions for generating invokes.
+  void InvokePrologue(const ParameterCount& expected,
+                      const ParameterCount& actual,
+                      Handle<Code> code_constant,
+                      const Operand& code_operand,
+                      Label* done,
+                      InvokeFlag flag);
+
+  // Prepares for a call or jump to a builtin by doing two things:
+  // 1. Emits code that fetches the builtin's function object from the context
+  //    at runtime, and puts it in the register rdi.
+  // 2. Fetches the builtin's code object, and returns it in a handle, at
+  //    compile time, so that later code can emit instructions to jump or call
+  //    the builtin directly.  If the code object has not yet been created, it
+  //    returns the builtin code object for IllegalFunction, and sets the
+  //    output parameter "resolved" to false.  Code that uses the return value
+  //    should then add the address and the builtin name to the list of fixups
+  //    called unresolved_, which is fixed up by the bootstrapper.
+  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
+  // Allocation support helpers.
+  void LoadAllocationTopHelper(Register result,
+                               Register result_end,
+                               Register scratch,
+                               AllocationFlags flags);
+  void UpdateAllocationTopHelper(Register result_end, Register scratch);
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. Is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion.
+class CodePatcher {
+ public:
+  CodePatcher(byte* address, int size);
+  virtual ~CodePatcher();
+
+  // Macro assembler to emit code.
+  MacroAssembler* masm() { return &masm_; }
+
+ private:
+  byte* address_;  // The address of the code being patched.
+  int size_;  // Number of bytes of the expected patch size.
+  MacroAssembler masm_;  // Macro assembler used to generate the code.
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate an Operand for loading a field from an object.
+static inline Operand FieldOperand(Register object, int offset) {
+  return Operand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate an Operand for loading an indexed field from an object.
+static inline Operand FieldOperand(Register object,
+                                   Register index,
+                                   ScaleFactor scale,
+                                   int offset) {
+  return Operand(object, index, scale, offset - kHeapObjectTag);
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+extern void LogGeneratedCodeCoverage(const char* file_line);
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) {                                               \
+    byte* ia32_coverage_function =                                        \
+        reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
+    masm->pushfd();                                                       \
+    masm->pushad();                                                       \
+    masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));         \
+    masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY);         \
+    masm->pop(eax);                                                       \
+    masm->popad();                                                        \
+    masm->popfd();                                                        \
+  }                                                                       \
+  masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
new file mode 100644
index 0000000..7af4e89
--- /dev/null
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -0,0 +1,1174 @@
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "unicode.h"
+#include "log.h"
+#include "ast.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "ia32/macro-assembler-ia32.h"
+#include "ia32/regexp-macro-assembler-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_NATIVE_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - edx : current character. Must be loaded using LoadCurrentCharacter
+ *         before using any of the dispatch methods.
+ * - edi : current position in input, as negative offset from end of string.
+ *         Please notice that this is the byte offset, not the character offset!
+ * - esi : end of input (points to byte after last character in input).
+ * - ebp : frame pointer. Used to access arguments, local variables and
+ *         RegExp registers.
+ * - esp : points to tip of C stack.
+ * - ecx : points to tip of backtrack stack
+ *
+ * The registers eax, ebx and ecx are free to use for computations.
+ *
+ * Each call to a public method should retain this convention.
+ * The stack will have the following structure:
+ *       - stack_area_base     (High end of the memory area to use as
+ *                             backtracking stack)
+ *       - at_start           (if 1, start at start of string, if 0, don't)
+ *       - int* capture_array (int[num_saved_registers_], for output).
+ *       - end of input       (Address of end of string)
+ *       - start of input     (Address of first character in string)
+ *       - void* input_string (location of a handle containing the string)
+ *       --- frame alignment (if applicable) ---
+ *       - return address
+ * ebp-> - old ebp
+ *       - backup of caller esi
+ *       - backup of caller edi
+ *       - backup of caller ebx
+ *       - Offset of location before start of input (effectively character
+ *         position -1). Used to initialize capture registers to a non-position.
+ *       - register 0  ebp[-4]  (Only positions must be stored in the first
+ *       - register 1  ebp[-8]   num_saved_registers_ registers)
+ *       - ...
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers starts out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code, by calling the code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ *              Address start,
+ *              Address end,
+ *              int* capture_output_array,
+ *              bool at_start,
+ *              byte* stack_area_base)
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
+    Mode mode,
+    int registers_to_save)
+    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+      mode_(mode),
+      num_registers_(registers_to_save),
+      num_saved_registers_(registers_to_save),
+      entry_label_(),
+      start_label_(),
+      success_label_(),
+      backtrack_label_(),
+      exit_label_() {
+  ASSERT_EQ(0, registers_to_save % 2);
+  __ jmp(&entry_label_);   // We'll write the entry code later.
+  __ bind(&start_label_);  // And then continue from here.
+}
+
+
+RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
+  delete masm_;
+  // Unuse labels in case we throw away the assembler without calling GetCode.
+  entry_label_.Unuse();
+  start_label_.Unuse();
+  success_label_.Unuse();
+  backtrack_label_.Unuse();
+  exit_label_.Unuse();
+  check_preempt_label_.Unuse();
+  stack_overflow_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerIA32::stack_limit_slack()  {
+  return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
+  if (by != 0) {
+    Label inside_string;
+    __ add(Operand(edi), Immediate(by * char_size()));
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::AdvanceRegister(int reg, int by) {
+  ASSERT(reg >= 0);
+  ASSERT(reg < num_registers_);
+  if (by != 0) {
+    __ add(register_location(reg), Immediate(by));
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::Backtrack() {
+  CheckPreemption();
+  // Pop Code* offset from backtrack stack, add Code* and jump to location.
+  Pop(ebx);
+  __ add(Operand(ebx), Immediate(masm_->CodeObject()));
+  __ jmp(Operand(ebx));
+}
+
+
+void RegExpMacroAssemblerIA32::Bind(Label* label) {
+  __ bind(label);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckCharacter(uint32_t c, Label* on_equal) {
+  __ cmp(current_character(), c);
+  BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) {
+  __ cmp(current_character(), limit);
+  BranchOrBacktrack(greater, on_greater);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
+  Label not_at_start;
+  // Did we start the match at the start of the string at all?
+  __ cmp(Operand(ebp, kAtStart), Immediate(0));
+  BranchOrBacktrack(equal, &not_at_start);
+  // If we did, are we still at the start of the input?
+  __ lea(eax, Operand(esi, edi, times_1, 0));
+  __ cmp(eax, Operand(ebp, kInputStart));
+  BranchOrBacktrack(equal, on_at_start);
+  __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
+  // Did we start the match at the start of the string at all?
+  __ cmp(Operand(ebp, kAtStart), Immediate(0));
+  BranchOrBacktrack(equal, on_not_at_start);
+  // If we did, are we still at the start of the input?
+  __ lea(eax, Operand(esi, edi, times_1, 0));
+  __ cmp(eax, Operand(ebp, kInputStart));
+  BranchOrBacktrack(not_equal, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckCharacterLT(uc16 limit, Label* on_less) {
+  __ cmp(current_character(), limit);
+  BranchOrBacktrack(less, on_less);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
+                                               int cp_offset,
+                                               Label* on_failure,
+                                               bool check_end_of_string) {
+  int byte_length = str.length() * char_size();
+  int byte_offset = cp_offset * char_size();
+  if (check_end_of_string) {
+    // Check that there are at least str.length() characters left in the input.
+    __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length)));
+    BranchOrBacktrack(greater, on_failure);
+  }
+
+  if (on_failure == NULL) {
+    // Instead of inlining a backtrack, (re)use the global backtrack target.
+    on_failure = &backtrack_label_;
+  }
+
+  for (int i = 0; i < str.length(); i++) {
+    if (mode_ == ASCII) {
+      __ cmpb(Operand(esi, edi, times_1, byte_offset + i),
+              static_cast<int8_t>(str[i]));
+    } else {
+      ASSERT(mode_ == UC16);
+      __ cmpw(Operand(esi, edi, times_1, byte_offset + i * sizeof(uc16)),
+              Immediate(str[i]));
+    }
+    BranchOrBacktrack(not_equal, on_failure);
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
+  Label fallthrough;
+  __ cmp(edi, Operand(backtrack_stackpointer(), 0));
+  __ j(not_equal, &fallthrough);
+  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));  // Pop.
+  BranchOrBacktrack(no_condition, on_equal);
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
+    int start_reg,
+    Label* on_no_match) {
+  Label fallthrough;
+  __ mov(edx, register_location(start_reg));  // Index of start of capture
+  __ mov(ebx, register_location(start_reg + 1));  // Index of end of capture
+  __ sub(ebx, Operand(edx));  // Length of capture.
+
+  // The length of a capture should not be negative. This can only happen
+  // if the end of the capture is unrecorded, or at a point earlier than
+  // the start of the capture.
+  BranchOrBacktrack(less, on_no_match, not_taken);
+
+  // If length is zero, either the capture is empty or it is completely
+  // uncaptured. In either case succeed immediately.
+  __ j(equal, &fallthrough);
+
+  if (mode_ == ASCII) {
+    Label success;
+    Label fail;
+    Label loop_increment;
+    // Save register contents to make the registers available below.
+    __ push(edi);
+    __ push(backtrack_stackpointer());
+    // After this, the eax, ecx, and edi registers are available.
+
+    __ add(edx, Operand(esi));  // Start of capture
+    __ add(edi, Operand(esi));  // Start of text to match against capture.
+    __ add(ebx, Operand(edi));  // End of text to match against capture.
+
+    Label loop;
+    __ bind(&loop);
+    __ movzx_b(eax, Operand(edi, 0));
+    __ cmpb_al(Operand(edx, 0));
+    __ j(equal, &loop_increment);
+
+    // Mismatch, try case-insensitive match (converting letters to lower-case).
+    __ or_(eax, 0x20);  // Convert match character to lower-case.
+    __ lea(ecx, Operand(eax, -'a'));
+    __ cmp(ecx, static_cast<int32_t>('z' - 'a'));  // Is eax a lowercase letter?
+    __ j(above, &fail);
+    // Also convert capture character.
+    __ movzx_b(ecx, Operand(edx, 0));
+    __ or_(ecx, 0x20);
+
+    __ cmp(eax, Operand(ecx));
+    __ j(not_equal, &fail);
+
+    __ bind(&loop_increment);
+    // Increment pointers into match and capture strings.
+    __ add(Operand(edx), Immediate(1));
+    __ add(Operand(edi), Immediate(1));
+    // Compare to end of match, and loop if not done.
+    __ cmp(edi, Operand(ebx));
+    __ j(below, &loop, taken);
+    __ jmp(&success);
+
+    __ bind(&fail);
+    // Restore original values before failing.
+    __ pop(backtrack_stackpointer());
+    __ pop(edi);
+    BranchOrBacktrack(no_condition, on_no_match);
+
+    __ bind(&success);
+    // Restore original value before continuing.
+    __ pop(backtrack_stackpointer());
+    // Drop original value of character position.
+    __ add(Operand(esp), Immediate(kPointerSize));
+    // Compute new value of character position after the matched part.
+    __ sub(edi, Operand(esi));
+  } else {
+    ASSERT(mode_ == UC16);
+    // Save registers before calling C function.
+    __ push(esi);
+    __ push(edi);
+    __ push(backtrack_stackpointer());
+    __ push(ebx);
+
+    const int argument_count = 3;
+    FrameAlign(argument_count, ecx);
+    // Put arguments into allocated stack area, last argument highest on stack.
+    // Parameters are
+    //   Address byte_offset1 - Address captured substring's start.
+    //   Address byte_offset2 - Address of current character position.
+    //   size_t byte_length - length of capture in bytes(!)
+
+    // Set byte_length.
+    __ mov(Operand(esp, 2 * kPointerSize), ebx);
+    // Set byte_offset2.
+    // Found by adding negative string-end offset of current position (edi)
+    // to end of string.
+    __ add(edi, Operand(esi));
+    __ mov(Operand(esp, 1 * kPointerSize), edi);
+    // Set byte_offset1.
+    // Start of capture, where edx already holds string-end negative offset.
+    __ add(edx, Operand(esi));
+    __ mov(Operand(esp, 0 * kPointerSize), edx);
+
+    ExternalReference compare =
+        ExternalReference::re_case_insensitive_compare_uc16();
+    CallCFunction(compare, argument_count);
+    // Pop original values before reacting on result value.
+    __ pop(ebx);
+    __ pop(backtrack_stackpointer());
+    __ pop(edi);
+    __ pop(esi);
+
+    // Check if function returned non-zero for success or zero for failure.
+    __ or_(eax, Operand(eax));
+    BranchOrBacktrack(zero, on_no_match);
+    // On success, increment position by length of capture.
+    __ add(edi, Operand(ebx));
+  }
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotBackReference(
+    int start_reg,
+    Label* on_no_match) {
+  Label fallthrough;
+  Label success;
+  Label fail;
+
+  // Find length of back-referenced capture.
+  __ mov(edx, register_location(start_reg));
+  __ mov(eax, register_location(start_reg + 1));
+  __ sub(eax, Operand(edx));  // Length to check.
+  // Fail on partial or illegal capture (start of capture after end of capture).
+  BranchOrBacktrack(less, on_no_match);
+  // Succeed on empty capture (including no capture)
+  __ j(equal, &fallthrough);
+
+  // Check that there are sufficient characters left in the input.
+  __ mov(ebx, edi);
+  __ add(ebx, Operand(eax));
+  BranchOrBacktrack(greater, on_no_match);
+
+  // Save register to make it available below.
+  __ push(backtrack_stackpointer());
+
+  // Compute pointers to match string and capture string
+  __ lea(ebx, Operand(esi, edi, times_1, 0));  // Start of match.
+  __ add(edx, Operand(esi));  // Start of capture.
+  __ lea(ecx, Operand(eax, ebx, times_1, 0));  // End of match
+
+  Label loop;
+  __ bind(&loop);
+  if (mode_ == ASCII) {
+    __ movzx_b(eax, Operand(edx, 0));
+    __ cmpb_al(Operand(ebx, 0));
+  } else {
+    ASSERT(mode_ == UC16);
+    __ movzx_w(eax, Operand(edx, 0));
+    __ cmpw_ax(Operand(ebx, 0));
+  }
+  __ j(not_equal, &fail);
+  // Increment pointers into capture and match string.
+  __ add(Operand(edx), Immediate(char_size()));
+  __ add(Operand(ebx), Immediate(char_size()));
+  // Check if we have reached end of match area.
+  __ cmp(ebx, Operand(ecx));
+  __ j(below, &loop);
+  __ jmp(&success);
+
+  __ bind(&fail);
+  // Restore backtrack stackpointer.
+  __ pop(backtrack_stackpointer());
+  BranchOrBacktrack(no_condition, on_no_match);
+
+  __ bind(&success);
+  // Move current character position to position after match.
+  __ mov(edi, ecx);
+  __ sub(Operand(edi), esi);
+  // Restore backtrack stackpointer.
+  __ pop(backtrack_stackpointer());
+
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotRegistersEqual(int reg1,
+                                                      int reg2,
+                                                      Label* on_not_equal) {
+  __ mov(eax, register_location(reg1));
+  __ cmp(eax, register_location(reg2));
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotCharacter(uint32_t c,
+                                                 Label* on_not_equal) {
+  __ cmp(current_character(), c);
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
+                                                      uint32_t mask,
+                                                      Label* on_equal) {
+  __ mov(eax, current_character());
+  __ and_(eax, mask);
+  __ cmp(eax, c);
+  BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotCharacterAfterAnd(uint32_t c,
+                                                         uint32_t mask,
+                                                         Label* on_not_equal) {
+  __ mov(eax, current_character());
+  __ and_(eax, mask);
+  __ cmp(eax, c);
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
+    uc16 c,
+    uc16 minus,
+    uc16 mask,
+    Label* on_not_equal) {
+  ASSERT(minus < String::kMaxUC16CharCode);
+  __ lea(eax, Operand(current_character(), -minus));
+  __ and_(eax, mask);
+  __ cmp(eax, c);
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
+                                                          int cp_offset,
+                                                          bool check_offset,
+                                                          Label* on_no_match) {
+  // Range checks (c in min..max) are generally implemented by an unsigned
+  // (c - min) <= (max - min) check
+  switch (type) {
+  case 's':
+    // Match space-characters
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      if (check_offset) {
+        LoadCurrentCharacter(cp_offset, on_no_match);
+      } else {
+        LoadCurrentCharacterUnchecked(cp_offset, 1);
+      }
+      Label success;
+      __ cmp(current_character(), ' ');
+      __ j(equal, &success);
+      // Check range 0x09..0x0d
+      __ sub(Operand(current_character()), Immediate('\t'));
+      __ cmp(current_character(), '\r' - '\t');
+      BranchOrBacktrack(above, on_no_match);
+      __ bind(&success);
+      return true;
+    }
+    return false;
+  case 'S':
+    // Match non-space characters.
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      __ cmp(current_character(), ' ');
+      BranchOrBacktrack(equal, on_no_match);
+      __ sub(Operand(current_character()), Immediate('\t'));
+      __ cmp(current_character(), '\r' - '\t');
+      BranchOrBacktrack(below_equal, on_no_match);
+      return true;
+    }
+    return false;
+  case 'd':
+    // Match ASCII digits ('0'..'9')
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ sub(Operand(current_character()), Immediate('0'));
+    __ cmp(current_character(), '9' - '0');
+    BranchOrBacktrack(above, on_no_match);
+    return true;
+  case 'D':
+    // Match non ASCII-digits
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ sub(Operand(current_character()), Immediate('0'));
+    __ cmp(current_character(), '9' - '0');
+    BranchOrBacktrack(below_equal, on_no_match);
+    return true;
+  case '.': {
+    // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ xor_(Operand(current_character()), Immediate(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+    __ sub(Operand(current_character()), Immediate(0x0b));
+    __ cmp(current_character(), 0x0c - 0x0b);
+    BranchOrBacktrack(below_equal, on_no_match);
+    if (mode_ == UC16) {
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ sub(Operand(current_character()), Immediate(0x2028 - 0x0b));
+      __ cmp(current_character(), 1);
+      BranchOrBacktrack(below_equal, on_no_match);
+    }
+    return true;
+  }
+  case '*':
+    // Match any character.
+    if (check_offset) {
+      CheckPosition(cp_offset, on_no_match);
+    }
+    return true;
+  // No custom implementation (yet): w, W, s(UC16), S(UC16).
+  default:
+    return false;
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::Fail() {
+  ASSERT(FAILURE == 0);  // Return value for failure is zero.
+  __ xor_(eax, Operand(eax));  // zero eax.
+  __ jmp(&exit_label_);
+}
+
+
+Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
+  // Finalize code - write the entry point code now we know how many
+  // registers we need.
+
+  // Entry code:
+  __ bind(&entry_label_);
+  // Start new stack frame.
+  __ push(ebp);
+  __ mov(ebp, esp);
+  // Save callee-save registers. Order here should correspond to order of
+  // kBackup_ebx etc.
+  __ push(esi);
+  __ push(edi);
+  __ push(ebx);  // Callee-save on MacOS.
+  __ push(Immediate(0));  // Make room for "input start - 1" constant.
+
+  // Check if we have space on the stack for registers.
+  Label stack_limit_hit;
+  Label stack_ok;
+
+  ExternalReference stack_guard_limit =
+      ExternalReference::address_of_stack_guard_limit();
+  __ mov(ecx, esp);
+  __ sub(ecx, Operand::StaticVariable(stack_guard_limit));
+  // Handle it if the stack pointer is already below the stack limit.
+  __ j(below_equal, &stack_limit_hit, not_taken);
+  // Check if there is room for the variable number of registers above
+  // the stack limit.
+  __ cmp(ecx, num_registers_ * kPointerSize);
+  __ j(above_equal, &stack_ok, taken);
+  // Exit with OutOfMemory exception. There is not enough space on the stack
+  // for our working registers.
+  __ mov(eax, EXCEPTION);
+  __ jmp(&exit_label_);
+
+  __ bind(&stack_limit_hit);
+  CallCheckStackGuardState(ebx);
+  __ or_(eax, Operand(eax));
+  // If returned value is non-zero, we exit with the returned value as result.
+  __ j(not_zero, &exit_label_);
+
+  __ bind(&stack_ok);
+
+  // Allocate space on stack for registers.
+  __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
+  // Load string length.
+  __ mov(esi, Operand(ebp, kInputEnd));
+  // Load input position.
+  __ mov(edi, Operand(ebp, kInputStart));
+  // Set up edi to be negative offset from string end.
+  __ sub(edi, Operand(esi));
+  // Set eax to address of char before start of input
+  // (effectively string position -1).
+  __ lea(eax, Operand(edi, -char_size()));
+  // Store this value in a local variable, for use when clearing
+  // position registers.
+  __ mov(Operand(ebp, kInputStartMinusOne), eax);
+  if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
+    // Fill saved registers with initial value = start offset - 1
+    // Fill in stack push order, to avoid accessing across an unwritten
+    // page (a problem on Windows).
+    __ mov(ecx, kRegisterZero);
+    Label init_loop;
+    __ bind(&init_loop);
+    __ mov(Operand(ebp, ecx, times_1, +0), eax);
+    __ sub(Operand(ecx), Immediate(kPointerSize));
+    __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
+    __ j(greater, &init_loop);
+  }
+  // Ensure that we have written to each stack page, in order. Skipping a page
+  // on Windows can cause segmentation faults. Assuming page size is 4k.
+  const int kPageSize = 4096;
+  const int kRegistersPerPage = kPageSize / kPointerSize;
+  for (int i = num_saved_registers_ + kRegistersPerPage - 1;
+      i < num_registers_;
+      i += kRegistersPerPage) {
+    __ mov(register_location(i), eax);  // One write every page.
+  }
+
+
+  // Initialize backtrack stack pointer.
+  __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
+  // Load previous char as initial value of current-character.
+  Label at_start;
+  __ cmp(Operand(ebp, kAtStart), Immediate(0));
+  __ j(not_equal, &at_start);
+  LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
+  __ jmp(&start_label_);
+  __ bind(&at_start);
+  __ mov(current_character(), '\n');
+  __ jmp(&start_label_);
+
+
+  // Exit code:
+  if (success_label_.is_linked()) {
+    // Save captures when successful.
+    __ bind(&success_label_);
+    if (num_saved_registers_ > 0) {
+      // copy captures to output
+      __ mov(ebx, Operand(ebp, kRegisterOutput));
+      __ mov(ecx, Operand(ebp, kInputEnd));
+      __ sub(ecx, Operand(ebp, kInputStart));
+      for (int i = 0; i < num_saved_registers_; i++) {
+        __ mov(eax, register_location(i));
+        __ add(eax, Operand(ecx));  // Convert to index from start, not end.
+        if (mode_ == UC16) {
+          __ sar(eax, 1);  // Convert byte index to character index.
+        }
+        __ mov(Operand(ebx, i * kPointerSize), eax);
+      }
+    }
+    __ mov(eax, Immediate(SUCCESS));
+  }
+  // Exit and return eax
+  __ bind(&exit_label_);
+  // Skip esp past regexp registers.
+  __ lea(esp, Operand(ebp, kBackup_ebx));
+  // Restore callee-save registers.
+  __ pop(ebx);
+  __ pop(edi);
+  __ pop(esi);
+  // Exit function frame, restore previous one.
+  __ pop(ebp);
+  __ ret(0);
+
+  // Backtrack code (branch target for conditional backtracks).
+  if (backtrack_label_.is_linked()) {
+    __ bind(&backtrack_label_);
+    Backtrack();
+  }
+
+  Label exit_with_exception;
+
+  // Preempt-code
+  if (check_preempt_label_.is_linked()) {
+    SafeCallTarget(&check_preempt_label_);
+
+    __ push(backtrack_stackpointer());
+    __ push(edi);
+
+    CallCheckStackGuardState(ebx);
+    __ or_(eax, Operand(eax));
+    // If returning non-zero, we should end execution with the given
+    // result as return value.
+    __ j(not_zero, &exit_label_);
+
+    __ pop(edi);
+    __ pop(backtrack_stackpointer());
+    // String might have moved: Reload esi from frame.
+    __ mov(esi, Operand(ebp, kInputEnd));
+    SafeReturn();
+  }
+
+  // Backtrack stack overflow code.
+  if (stack_overflow_label_.is_linked()) {
+    SafeCallTarget(&stack_overflow_label_);
+    // Reached if the backtrack-stack limit has been hit.
+
+    Label grow_failed;
+    // Save registers before calling C function
+    __ push(esi);
+    __ push(edi);
+
+    // Call GrowStack(backtrack_stackpointer())
+    int num_arguments = 2;
+    FrameAlign(num_arguments, ebx);
+    __ lea(eax, Operand(ebp, kStackHighEnd));
+    __ mov(Operand(esp, 1 * kPointerSize), eax);
+    __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
+    ExternalReference grow_stack = ExternalReference::re_grow_stack();
+    CallCFunction(grow_stack, num_arguments);
+    // If return NULL, we have failed to grow the stack, and
+    // must exit with a stack-overflow exception.
+    __ or_(eax, Operand(eax));
+    __ j(equal, &exit_with_exception);
+    // Otherwise use return value as new stack pointer.
+    __ mov(backtrack_stackpointer(), eax);
+    // Restore saved registers and continue.
+    __ pop(edi);
+    __ pop(esi);
+    SafeReturn();
+  }
+
+  if (exit_with_exception.is_linked()) {
+    // If any of the code above needed to exit with an exception.
+    __ bind(&exit_with_exception);
+    // Exit with Result EXCEPTION(-1) to signal thrown exception.
+    __ mov(eax, EXCEPTION);
+    __ jmp(&exit_label_);
+  }
+
+  CodeDesc code_desc;
+  masm_->GetCode(&code_desc);
+  Handle<Code> code = Factory::NewCode(code_desc,
+                                       NULL,
+                                       Code::ComputeFlags(Code::REGEXP),
+                                       masm_->CodeObject());
+  LOG(RegExpCodeCreateEvent(*code, *source));
+  return Handle<Object>::cast(code);
+}
+
+
+void RegExpMacroAssemblerIA32::GoTo(Label* to) {
+  BranchOrBacktrack(no_condition, to);
+}
+
+
+void RegExpMacroAssemblerIA32::IfRegisterGE(int reg,
+                                            int comparand,
+                                            Label* if_ge) {
+  __ cmp(register_location(reg), Immediate(comparand));
+  BranchOrBacktrack(greater_equal, if_ge);
+}
+
+
+void RegExpMacroAssemblerIA32::IfRegisterLT(int reg,
+                                            int comparand,
+                                            Label* if_lt) {
+  __ cmp(register_location(reg), Immediate(comparand));
+  BranchOrBacktrack(less, if_lt);
+}
+
+
+void RegExpMacroAssemblerIA32::IfRegisterEqPos(int reg,
+                                               Label* if_eq) {
+  __ cmp(edi, register_location(reg));
+  BranchOrBacktrack(equal, if_eq);
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+    RegExpMacroAssemblerIA32::Implementation() {
+  return kIA32Implementation;
+}
+
+
+void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
+                                                    Label* on_end_of_input,
+                                                    bool check_bounds,
+                                                    int characters) {
+  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
+  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
+  if (check_bounds) {
+    CheckPosition(cp_offset + characters - 1, on_end_of_input);
+  }
+  LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerIA32::PopCurrentPosition() {
+  Pop(edi);
+}
+
+
+void RegExpMacroAssemblerIA32::PopRegister(int register_index) {
+  Pop(eax);
+  __ mov(register_location(register_index), eax);
+}
+
+
+void RegExpMacroAssemblerIA32::PushBacktrack(Label* label) {
+  Push(Immediate::CodeRelativeOffset(label));
+  CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerIA32::PushCurrentPosition() {
+  Push(edi);
+}
+
+
+void RegExpMacroAssemblerIA32::PushRegister(int register_index,
+                                            StackCheckFlag check_stack_limit) {
+  __ mov(eax, register_location(register_index));
+  Push(eax);
+  if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerIA32::ReadCurrentPositionFromRegister(int reg) {
+  __ mov(edi, register_location(reg));
+}
+
+
+void RegExpMacroAssemblerIA32::ReadStackPointerFromRegister(int reg) {
+  __ mov(backtrack_stackpointer(), register_location(reg));
+  __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
+}
+
+
+void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
+  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  __ mov(register_location(register_index), Immediate(to));
+}
+
+
+void RegExpMacroAssemblerIA32::Succeed() {
+  __ jmp(&success_label_);
+}
+
+
+void RegExpMacroAssemblerIA32::WriteCurrentPositionToRegister(int reg,
+                                                              int cp_offset) {
+  if (cp_offset == 0) {
+    __ mov(register_location(reg), edi);
+  } else {
+    __ lea(eax, Operand(edi, cp_offset * char_size()));
+    __ mov(register_location(reg), eax);
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
+  ASSERT(reg_from <= reg_to);
+  __ mov(eax, Operand(ebp, kInputStartMinusOne));
+  for (int reg = reg_from; reg <= reg_to; reg++) {
+    __ mov(register_location(reg), eax);
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
+  __ mov(eax, backtrack_stackpointer());
+  __ sub(eax, Operand(ebp, kStackHighEnd));
+  __ mov(register_location(reg), eax);
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
+  int num_arguments = 3;
+  FrameAlign(num_arguments, scratch);
+  // RegExp code frame pointer.
+  __ mov(Operand(esp, 2 * kPointerSize), ebp);
+  // Code* of self.
+  __ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
+  // Next address on the stack (will be address of return address).
+  __ lea(eax, Operand(esp, -kPointerSize));
+  __ mov(Operand(esp, 0 * kPointerSize), eax);
+  ExternalReference check_stack_guard =
+      ExternalReference::re_check_stack_guard_state();
+  CallCFunction(check_stack_guard, num_arguments);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+  return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
+                                                   Code* re_code,
+                                                   Address re_frame) {
+  if (StackGuard::IsStackOverflow()) {
+    Top::StackOverflow();
+    return EXCEPTION;
+  }
+
+  // If not real stack overflow the stack guard was used to interrupt
+  // execution for another purpose.
+
+  // Prepare for possible GC.
+  HandleScope handles;
+  Handle<Code> code_handle(re_code);
+
+  Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+  // Current string.
+  bool is_ascii = subject->IsAsciiRepresentation();
+
+  ASSERT(re_code->instruction_start() <= *return_address);
+  ASSERT(*return_address <=
+      re_code->instruction_start() + re_code->instruction_size());
+
+  Object* result = Execution::HandleStackGuardInterrupt();
+
+  if (*code_handle != re_code) {  // Return address no longer valid
+    int delta = *code_handle - re_code;
+    // Overwrite the return address on the stack.
+    *return_address += delta;
+  }
+
+  if (result->IsException()) {
+    return EXCEPTION;
+  }
+
+  // String might have changed.
+  if (subject->IsAsciiRepresentation() != is_ascii) {
+    // If we changed between an ASCII and an UC16 string, the specialized
+    // code cannot be used, and we need to restart regexp matching from
+    // scratch (including, potentially, compiling a new version of the code).
+    return RETRY;
+  }
+
+  // Otherwise, the content of the string might have moved. It must still
+  // be a sequential or external string with the same content.
+  // Update the start and end pointers in the stack frame to the current
+  // location (whether it has actually moved or not).
+  ASSERT(StringShape(*subject).IsSequential() ||
+      StringShape(*subject).IsExternal());
+
+  // The original start address of the characters to match.
+  const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+  // Find the current start address of the same character at the current string
+  // position.
+  int start_index = frame_entry<int>(re_frame, kStartIndex);
+  const byte* new_address = StringCharacterPosition(*subject, start_index);
+
+  if (start_address != new_address) {
+    // If there is a difference, update the object pointer and start and end
+    // addresses in the RegExp stack frame to match the new value.
+    const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+    int byte_length = end_address - start_address;
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
+    frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+    frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  }
+
+  return 0;
+}
+
+
+Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
+  ASSERT(register_index < (1<<30));
+  if (num_registers_ <= register_index) {
+    num_registers_ = register_index + 1;
+  }
+  return Operand(ebp, kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckPosition(int cp_offset,
+                                             Label* on_outside_input) {
+  __ cmp(edi, -cp_offset * char_size());
+  BranchOrBacktrack(greater_equal, on_outside_input);
+}
+
+
+void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
+                                                 Label* to,
+                                                 Hint hint) {
+  if (condition < 0) {  // No condition
+    if (to == NULL) {
+      Backtrack();
+      return;
+    }
+    __ jmp(to);
+    return;
+  }
+  if (to == NULL) {
+    __ j(condition, &backtrack_label_, hint);
+    return;
+  }
+  __ j(condition, to, hint);
+}
+
+
+void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
+  __ call(to);
+}
+
+
+void RegExpMacroAssemblerIA32::SafeReturn() {
+  __ add(Operand(esp, 0), Immediate(masm_->CodeObject()));
+  __ ret(0);
+}
+
+
+void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
+  __ bind(name);
+  __ sub(Operand(esp, 0), Immediate(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerIA32::Push(Register source) {
+  ASSERT(!source.is(backtrack_stackpointer()));
+  // Notice: This updates flags, unlike normal Push.
+  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ mov(Operand(backtrack_stackpointer(), 0), source);
+}
+
+
+void RegExpMacroAssemblerIA32::Push(Immediate value) {
+  // Notice: This updates flags, unlike normal Push.
+  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ mov(Operand(backtrack_stackpointer(), 0), value);
+}
+
+
+void RegExpMacroAssemblerIA32::Pop(Register target) {
+  ASSERT(!target.is(backtrack_stackpointer()));
+  __ mov(target, Operand(backtrack_stackpointer(), 0));
+  // Notice: This updates flags, unlike normal Pop.
+  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+}
+
+
+void RegExpMacroAssemblerIA32::CheckPreemption() {
+  // Check for preemption.
+  Label no_preempt;
+  ExternalReference stack_guard_limit =
+      ExternalReference::address_of_stack_guard_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+  __ j(above, &no_preempt, taken);
+
+  SafeCall(&check_preempt_label_);
+
+  __ bind(&no_preempt);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckStackLimit() {
+  if (FLAG_check_stack) {
+    Label no_stack_overflow;
+    ExternalReference stack_limit =
+        ExternalReference::address_of_regexp_stack_limit();
+    __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
+    __ j(above, &no_stack_overflow);
+
+    SafeCall(&stack_overflow_label_);
+
+    __ bind(&no_stack_overflow);
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::FrameAlign(int num_arguments, Register scratch) {
+  // TODO(lrn): Since we no longer use the system stack arbitrarily (but we do
+  // use it, e.g., for SafeCall), we know the number of elements on the stack
+  // since the last frame alignment. We might be able to do this simpler then.
+  int frameAlignment = OS::ActivationFrameAlignment();
+  if (frameAlignment != 0) {
+    // Make stack end at alignment and make room for num_arguments words
+    // and the original value of esp.
+    __ mov(scratch, esp);
+    __ sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
+    ASSERT(IsPowerOf2(frameAlignment));
+    __ and_(esp, -frameAlignment);
+    __ mov(Operand(esp, num_arguments * kPointerSize), scratch);
+  } else {
+    __ sub(Operand(esp), Immediate(num_arguments * kPointerSize));
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::CallCFunction(ExternalReference function,
+                                             int num_arguments) {
+  __ mov(Operand(eax), Immediate(function));
+  __ call(Operand(eax));
+  if (OS::ActivationFrameAlignment() != 0) {
+    __ mov(esp, Operand(esp, num_arguments * kPointerSize));
+  } else {
+    __ add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
+  }
+}
+
+
+void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
+                                                             int characters) {
+  if (mode_ == ASCII) {
+    if (characters == 4) {
+      __ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
+    } else if (characters == 2) {
+      __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
+    } else {
+      ASSERT(characters == 1);
+      __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
+    }
+  } else {
+    ASSERT(mode_ == UC16);
+    if (characters == 2) {
+      __ mov(current_character(),
+             Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
+    } else {
+      ASSERT(characters == 1);
+      __ movzx_w(current_character(),
+                 Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
+    }
+  }
+}
+
+
+void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
+  __ int3();  // Unused on ia32.
+}
+
+#undef __
+
+#endif  // V8_NATIVE_REGEXP
+
+}}  // namespace v8::internal
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
new file mode 100644
index 0000000..5ffd462
--- /dev/null
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -0,0 +1,232 @@
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
+#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_NATIVE_REGEXP
+class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerIA32() { }
+  virtual ~RegExpMacroAssemblerIA32() { }
+};
+
+#else
+class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerIA32(Mode mode, int registers_to_save);
+  virtual ~RegExpMacroAssemblerIA32();
+  virtual int stack_limit_slack();
+  virtual void AdvanceCurrentPosition(int by);
+  virtual void AdvanceRegister(int reg, int by);
+  virtual void Backtrack();
+  virtual void Bind(Label* label);
+  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckCharacter(uint32_t c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(uint32_t c,
+                                      uint32_t mask,
+                                      Label* on_equal);
+  virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+  virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+  virtual void CheckCharacters(Vector<const uc16> str,
+                               int cp_offset,
+                               Label* on_failure,
+                               bool check_end_of_string);
+  // A "greedy loop" is a loop that is both greedy and with a simple
+  // body. It has a particularly simple implementation.
+  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+  virtual void CheckNotAtStart(Label* on_not_at_start);
+  virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+                                               Label* on_no_match);
+  virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(uint32_t c,
+                                         uint32_t mask,
+                                         Label* on_not_equal);
+  virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+                                              uc16 minus,
+                                              uc16 mask,
+                                              Label* on_not_equal);
+  // Checks whether the given offset from the current position is before
+  // the end of the string.
+  virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+  virtual bool CheckSpecialCharacterClass(uc16 type,
+                                          int cp_offset,
+                                          bool check_offset,
+                                          Label* on_no_match);
+  virtual void Fail();
+  virtual Handle<Object> GetCode(Handle<String> source);
+  virtual void GoTo(Label* label);
+  virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+  virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+  virtual void IfRegisterEqPos(int reg, Label* if_eq);
+  virtual IrregexpImplementation Implementation();
+  virtual void LoadCurrentCharacter(int cp_offset,
+                                    Label* on_end_of_input,
+                                    bool check_bounds = true,
+                                    int characters = 1);
+  virtual void PopCurrentPosition();
+  virtual void PopRegister(int register_index);
+  virtual void PushBacktrack(Label* label);
+  virtual void PushCurrentPosition();
+  virtual void PushRegister(int register_index,
+                            StackCheckFlag check_stack_limit);
+  virtual void ReadCurrentPositionFromRegister(int reg);
+  virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetRegister(int register_index, int to);
+  virtual void Succeed();
+  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+  virtual void ClearRegisters(int reg_from, int reg_to);
+  virtual void WriteStackPointerToRegister(int reg);
+
+  // Called from RegExp if the stack-guard is triggered.
+  // If the code object is relocated, the return address is fixed before
+  // returning.
+  static int CheckStackGuardState(Address* return_address,
+                                  Code* re_code,
+                                  Address re_frame);
+
+ private:
+  // Offsets from ebp of function parameters and stored registers.
+  static const int kFramePointer = 0;
+  // Above the frame pointer - function parameters and return address.
+  static const int kReturn_eip = kFramePointer + kPointerSize;
+  static const int kFrameAlign = kReturn_eip + kPointerSize;
+  // Parameters.
+  static const int kInputString = kFrameAlign;
+  static const int kStartIndex = kInputString + kPointerSize;
+  static const int kInputStart = kStartIndex + kPointerSize;
+  static const int kInputEnd = kInputStart + kPointerSize;
+  static const int kRegisterOutput = kInputEnd + kPointerSize;
+  static const int kAtStart = kRegisterOutput + kPointerSize;
+  static const int kStackHighEnd = kAtStart + kPointerSize;
+  // Below the frame pointer - local stack variables.
+  // When adding local variables remember to push space for them in
+  // the frame in GetCode.
+  static const int kBackup_esi = kFramePointer - kPointerSize;
+  static const int kBackup_edi = kBackup_esi - kPointerSize;
+  static const int kBackup_ebx = kBackup_edi - kPointerSize;
+  static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
+  // First register address. Following registers are below it on the stack.
+  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+  // Initial size of code buffer.
+  static const size_t kRegExpCodeSize = 1024;
+
+  // Load a number of characters at the given offset from the
+  // current position, into the current-character register.
+  void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+  // Check whether preemption has been requested.
+  void CheckPreemption();
+
+  // Check whether we are exceeding the stack limit on the backtrack stack.
+  void CheckStackLimit();
+
+  // Generate a call to CheckStackGuardState.
+  void CallCheckStackGuardState(Register scratch);
+
+  // The ebp-relative location of a regexp register.
+  Operand register_location(int register_index);
+
+  // The register containing the current character after LoadCurrentCharacter.
+  inline Register current_character() { return edx; }
+
+  // The register containing the backtrack stack top. Provides a meaningful
+  // name to the register.
+  inline Register backtrack_stackpointer() { return ecx; }
+
+  // Byte size of chars in the string to match (decided by the Mode argument)
+  inline int char_size() { return static_cast<int>(mode_); }
+
+  // Equivalent to a conditional branch to the label, unless the label
+  // is NULL, in which case it is a conditional Backtrack.
+  void BranchOrBacktrack(Condition condition, Label* to, Hint hint = no_hint);
+
+  // Call and return internally in the generated code in a way that
+  // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+  inline void SafeCall(Label* to);
+  inline void SafeReturn();
+  inline void SafeCallTarget(Label* name);
+
+  // Pushes the value of a register on the backtrack stack. Decrements the
+  // stack pointer (ecx) by a word size and stores the register's value there.
+  inline void Push(Register source);
+
+  // Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
+  // by a word size and stores the value there.
+  inline void Push(Immediate value);
+
+  // Pops a value from the backtrack stack. Reads the word at the stack pointer
+  // (ecx) and increments it by a word size.
+  inline void Pop(Register target);
+
+  // Before calling a C-function from generated code, align arguments on stack.
+  // After aligning the frame, arguments must be stored in esp[0], esp[4],
+  // etc., not pushed. The argument count assumes all arguments are word sized.
+  // Some compilers/platforms require the stack to be aligned when calling
+  // C++ code.
+  // Needs a scratch register to do some arithmetic. This register will be
+  // trashed.
+  inline void FrameAlign(int num_arguments, Register scratch);
+
+  // Calls a C function and cleans up the space for arguments allocated
+  // by FrameAlign. The called function is not allowed to trigger a garbage
+  // collection, since that might move the code and invalidate the return
+  // address (unless this is somehow accounted for).
+  inline void CallCFunction(ExternalReference function, int num_arguments);
+
+  MacroAssembler* masm_;
+
+  // Which mode to generate code for (ASCII or UC16).
+  Mode mode_;
+
+  // One greater than maximal register index actually used.
+  int num_registers_;
+
+  // Number of registers to output at the end (the saved registers
+  // are always 0..num_saved_registers_-1)
+  int num_saved_registers_;
+
+  // Labels used internally.
+  Label entry_label_;
+  Label start_label_;
+  Label success_label_;
+  Label backtrack_label_;
+  Label exit_label_;
+  Label check_preempt_label_;
+  Label stack_overflow_label_;
+};
+#endif  // V8_NATIVE_REGEXP
+
+}}  // namespace v8::internal
+
+#endif  // V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/ia32/register-allocator-ia32-inl.h b/src/ia32/register-allocator-ia32-inl.h
new file mode 100644
index 0000000..99ae6eb
--- /dev/null
+++ b/src/ia32/register-allocator-ia32-inl.h
@@ -0,0 +1,82 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
+#define V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+  // The code for this test relies on the order of register codes.
+  return reg.code() >= esp.code() && reg.code() <= esi.code();
+}
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers.  The mapping is:
+
+// eax <-> 0, ebx <-> 1, ecx <-> 2, edx <-> 3, edi <-> 4.
+
+int RegisterAllocator::ToNumber(Register reg) {
+  ASSERT(reg.is_valid() && !IsReserved(reg));
+  const int kNumbers[] = {
+    0,   // eax
+    2,   // ecx
+    3,   // edx
+    1,   // ebx
+    -1,  // esp
+    -1,  // ebp
+    -1,  // esi
+    4    // edi
+  };
+  return kNumbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+  ASSERT(num >= 0 && num < kNumRegisters);
+  const Register kRegisters[] = { eax, ebx, ecx, edx, edi };
+  return kRegisters[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+  Reset();
+  // The non-reserved edi register is live on JS function entry.
+  Use(edi);  // JS function.
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
new file mode 100644
index 0000000..2914960
--- /dev/null
+++ b/src/ia32/register-allocator-ia32.cc
@@ -0,0 +1,99 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+  ASSERT(is_valid());
+  if (is_constant()) {
+    Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
+    ASSERT(fresh.is_valid());
+    if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
+      CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
+    } else {
+      CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
+                                                 Immediate(handle()));
+    }
+    // This result becomes a copy of the fresh one.
+    *this = fresh;
+  }
+  ASSERT(is_register());
+}
+
+
+void Result::ToRegister(Register target) {
+  ASSERT(is_valid());
+  if (!is_register() || !reg().is(target)) {
+    Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
+    ASSERT(fresh.is_valid());
+    if (is_register()) {
+      CodeGeneratorScope::Current()->masm()->mov(fresh.reg(), reg());
+    } else {
+      ASSERT(is_constant());
+      if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
+        CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
+      } else {
+        CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
+                                                   Immediate(handle()));
+      }
+    }
+    *this = fresh;
+  } else if (is_register() && reg().is(target)) {
+    ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
+    CodeGeneratorScope::Current()->frame()->Spill(target);
+    ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
+  }
+  ASSERT(is_register());
+  ASSERT(reg().is(target));
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+  Result result = AllocateWithoutSpilling();
+  // Check that the register is a byte register.  If not, unuse the
+  // register if valid and return an invalid result.
+  if (result.is_valid() && !result.reg().is_byte_register()) {
+    result.Unuse();
+    return Result();
+  }
+  return result;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/register-allocator-ia32.h b/src/ia32/register-allocator-ia32.h
new file mode 100644
index 0000000..e7ce91f
--- /dev/null
+++ b/src/ia32/register-allocator-ia32.h
@@ -0,0 +1,43 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_H_
+#define V8_IA32_REGISTER_ALLOCATOR_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+  static const int kNumRegisters = 5;
+  static const int kInvalidRegister = -1;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_REGISTER_ALLOCATOR_IA32_H_
diff --git a/src/ia32/simulator-ia32.cc b/src/ia32/simulator-ia32.cc
new file mode 100644
index 0000000..ab81693
--- /dev/null
+++ b/src/ia32/simulator-ia32.cc
@@ -0,0 +1,30 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Since there is no simulator for the ia32 architecture this file is empty.
+
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
new file mode 100644
index 0000000..8fa4287
--- /dev/null
+++ b/src/ia32/simulator-ia32.h
@@ -0,0 +1,53 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_SIMULATOR_IA32_H_
+#define V8_IA32_SIMULATOR_IA32_H_
+
+#include "allocation.h"
+
+// Since there is no simulator for the ia32 architecture the only thing we can
+// do is to call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  entry(p0, p1, p2, p3, p4);
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on ia32 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return c_limit;
+  }
+};
+
+// Call the generated regexp code directly. The entry function pointer should
+// expect seven int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  entry(p0, p1, p2, p3, p4, p5, p6)
+
+#endif  // V8_IA32_SIMULATOR_IA32_H_
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
new file mode 100644
index 0000000..ca4e142
--- /dev/null
+++ b/src/ia32/stub-cache-ia32.cc
@@ -0,0 +1,1876 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(MacroAssembler* masm,
+                       Code::Flags flags,
+                       StubCache::Table table,
+                       Register name,
+                       Register offset,
+                       Register extra) {
+  ExternalReference key_offset(SCTableReference::keyReference(table));
+  ExternalReference value_offset(SCTableReference::valueReference(table));
+
+  Label miss;
+
+  if (extra.is_valid()) {
+    // Get the code entry from the cache.
+    __ mov(extra, Operand::StaticArray(offset, times_2, value_offset));
+
+    // Check that the key in the entry matches the name.
+    __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
+    __ j(not_equal, &miss, not_taken);
+
+    // Check that the flags match what we're looking for.
+    __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
+    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+    __ cmp(offset, flags);
+    __ j(not_equal, &miss);
+
+    // Jump to the first instruction in the code stub.
+    __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(Operand(extra));
+
+    __ bind(&miss);
+  } else {
+    // Save the offset on the stack.
+    __ push(offset);
+
+    // Check that the key in the entry matches the name.
+    __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
+    __ j(not_equal, &miss, not_taken);
+
+    // Get the code entry from the cache.
+    __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+
+    // Check that the flags match what we're looking for.
+    __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
+    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+    __ cmp(offset, flags);
+    __ j(not_equal, &miss);
+
+    // Restore offset and re-load code entry from cache.
+    __ pop(offset);
+    __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+
+    // Jump to the first instruction in the code stub.
+    __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(Operand(offset));
+
+    // Pop at miss.
+    __ bind(&miss);
+    __ pop(offset);
+  }
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+                              Code::Flags flags,
+                              Register receiver,
+                              Register name,
+                              Register scratch,
+                              Register extra) {
+  Label miss;
+
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 8.
+  ASSERT(sizeof(Entry) == 8);
+
+  // Make sure the flags does not name a specific type.
+  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  ASSERT(!scratch.is(receiver));
+  ASSERT(!scratch.is(name));
+  ASSERT(!extra.is(receiver));
+  ASSERT(!extra.is(name));
+  ASSERT(!extra.is(scratch));
+
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Get the map of the receiver and compute the hash.
+  __ mov(scratch, FieldOperand(name, String::kLengthOffset));
+  __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, flags);
+  __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+
+  // Probe the primary table.
+  ProbeTable(masm, flags, kPrimary, name, scratch, extra);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ mov(scratch, FieldOperand(name, String::kLengthOffset));
+  __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, flags);
+  __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+  __ sub(scratch, Operand(name));
+  __ add(Operand(scratch), Immediate(flags));
+  __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
+
+  // Probe the secondary table.
+  ProbeTable(masm, flags, kSecondary, name, scratch, extra);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+}
+
+
+template <typename Pushable>
+static void PushInterceptorArguments(MacroAssembler* masm,
+                                     Register receiver,
+                                     Register holder,
+                                     Pushable name,
+                                     JSObject* holder_obj) {
+  __ push(receiver);
+  __ push(holder);
+  __ push(name);
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  __ mov(receiver, Immediate(Handle<Object>(interceptor)));
+  __ push(receiver);
+  __ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+                                                       int index,
+                                                       Register prototype) {
+  // Load the global or builtins object from the current context.
+  __ mov(prototype, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Load the global context from the global or builtins object.
+  __ mov(prototype,
+         FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
+  // Load the function from the global context.
+  __ mov(prototype, Operand(prototype, Context::SlotOffset(index)));
+  // Load the initial map.  The global functions all have initial maps.
+  __ mov(prototype,
+         FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the prototype from the initial map.
+  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register scratch,
+                                           Label* miss_label) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss_label, not_taken);
+
+  // Check that the object is a JS array.
+  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+  __ j(not_equal, miss_label, not_taken);
+
+  // Load length directly from the JS array.
+  __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
+  __ ret(0);
+}
+
+
+// Generate code to check if an object is a string.  If the object is
+// a string, the map's instance type is left in the scratch register.
+static void GenerateStringCheck(MacroAssembler* masm,
+                                Register receiver,
+                                Register scratch,
+                                Label* smi,
+                                Label* non_string_object) {
+  // Check that the object isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, smi, not_taken);
+
+  // Check that the object is a string.
+  __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+  ASSERT(kNotStringTag != 0);
+  __ test(scratch, Immediate(kNotStringTag));
+  __ j(not_zero, non_string_object, not_taken);
+}
+
+
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+                                            Register receiver,
+                                            Register scratch,
+                                            Label* miss) {
+  Label load_length, check_wrapper;
+
+  // Check if the object is a string leaving the instance type in the
+  // scratch register.
+  GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
+
+  // Load length directly from the string.
+  __ bind(&load_length);
+  __ and_(scratch, kStringSizeMask);
+  __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
+  // ecx is also the receiver.
+  __ lea(ecx, Operand(scratch, String::kLongLengthShift));
+  __ shr(eax);  // ecx is implicit shift register.
+  __ shl(eax, kSmiTagSize);
+  __ ret(0);
+
+  // Check if the object is a JSValue wrapper.
+  __ bind(&check_wrapper);
+  __ cmp(scratch, JS_VALUE_TYPE);
+  __ j(not_equal, miss, not_taken);
+
+  // Check if the wrapped value is a string and load the length
+  // directly if it is.
+  __ mov(receiver, FieldOperand(receiver, JSValue::kValueOffset));
+  GenerateStringCheck(masm, receiver, scratch, miss, miss);
+  __ jmp(&load_length);
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+                                                 Register receiver,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ mov(eax, Operand(scratch1));
+  __ ret(0);
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
+  // Adjust for the number of properties stored in the holder.
+  index -= holder->map()->inobject_properties();
+  if (index < 0) {
+    // Get the property straight out of the holder.
+    int offset = holder->map()->instance_size() + (index * kPointerSize);
+    __ mov(dst, FieldOperand(src, offset));
+  } else {
+    // Calculate the offset into the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+    __ mov(dst, FieldOperand(dst, offset));
+  }
+}
+
+
+template <class Pushable>
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+                                                   Register receiver,
+                                                   Register holder,
+                                                   Pushable name,
+                                                   JSObject* holder_obj) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+  __ mov(eax, Immediate(5));
+  __ mov(ebx, Immediate(ref));
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+}
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+                                   StubCompiler* stub_compiler,
+                                   MacroAssembler* masm,
+                                   JSObject* object,
+                                   JSObject* holder,
+                                   String* name,
+                                   LookupResult* lookup,
+                                   Register receiver,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Label* miss) {
+  ASSERT(holder->HasNamedInterceptor());
+  ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      stub_compiler->CheckPrototypes(object, receiver, holder,
+                                     scratch1, scratch2, name, miss);
+
+  if (lookup->IsValid() && lookup->IsCacheable()) {
+    compiler->CompileCacheable(masm,
+                               stub_compiler,
+                               receiver,
+                               reg,
+                               scratch1,
+                               scratch2,
+                               holder,
+                               lookup,
+                               name,
+                               miss);
+  } else {
+    compiler->CompileRegular(masm,
+                             receiver,
+                             reg,
+                             scratch2,
+                             holder,
+                             miss);
+  }
+}
+
+
+static void LookupPostInterceptor(JSObject* holder,
+                                  String* name,
+                                  LookupResult* lookup) {
+  holder->LocalLookupRealNamedProperty(name, lookup);
+  if (lookup->IsNotFound()) {
+    Object* proto = holder->GetPrototype();
+    if (proto != Heap::null_value()) {
+      proto->Lookup(name, lookup);
+    }
+  }
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+  explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+  void CompileCacheable(MacroAssembler* masm,
+                        StubCompiler* stub_compiler,
+                        Register receiver,
+                        Register holder,
+                        Register scratch1,
+                        Register scratch2,
+                        JSObject* holder_obj,
+                        LookupResult* lookup,
+                        String* name,
+                        Label* miss_label) {
+    AccessorInfo* callback = 0;
+    bool optimize = false;
+    // So far the most popular follow ups for interceptor loads are FIELD
+    // and CALLBACKS, so inline only them, other cases may be added
+    // later.
+    if (lookup->type() == FIELD) {
+      optimize = true;
+    } else if (lookup->type() == CALLBACKS) {
+      Object* callback_object = lookup->GetCallbackObject();
+      if (callback_object->IsAccessorInfo()) {
+        callback = AccessorInfo::cast(callback_object);
+        optimize = callback->getter() != NULL;
+      }
+    }
+
+    if (!optimize) {
+      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      return;
+    }
+
+    // Note: starting a frame here makes GC aware of pointers pushed below.
+    __ EnterInternalFrame();
+
+    if (lookup->type() == CALLBACKS) {
+      __ push(receiver);
+    }
+    __ push(holder);
+    __ push(name_);
+
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
+
+    Label interceptor_failed;
+    __ cmp(eax, Factory::no_interceptor_result_sentinel());
+    __ j(equal, &interceptor_failed);
+    __ LeaveInternalFrame();
+    __ ret(0);
+
+    __ bind(&interceptor_failed);
+    __ pop(name_);
+    __ pop(holder);
+    if (lookup->type() == CALLBACKS) {
+      __ pop(receiver);
+    }
+
+    __ LeaveInternalFrame();
+
+    if (lookup->type() == FIELD) {
+      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+                                              lookup->holder(), scratch1,
+                                              scratch2,
+                                              name,
+                                              miss_label);
+      stub_compiler->GenerateFastPropertyLoad(masm, eax,
+                                              holder, lookup->holder(),
+                                              lookup->GetFieldIndex());
+      __ ret(0);
+    } else {
+      ASSERT(lookup->type() == CALLBACKS);
+      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+      ASSERT(callback != NULL);
+      ASSERT(callback->getter() != NULL);
+
+      Label cleanup;
+      __ pop(scratch2);
+      __ push(receiver);
+      __ push(scratch2);
+
+      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+                                              lookup->holder(), scratch1,
+                                              scratch2,
+                                              name,
+                                              &cleanup);
+
+      __ pop(scratch2);  // save old return address
+      __ push(holder);
+      __ mov(holder, Immediate(Handle<AccessorInfo>(callback)));
+      __ push(holder);
+      __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
+      __ push(name_);
+      __ push(scratch2);  // restore old return address
+
+      ExternalReference ref =
+          ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+      __ TailCallRuntime(ref, 5, 1);
+
+      __ bind(&cleanup);
+      __ pop(scratch1);
+      __ pop(scratch2);
+      __ push(scratch1);
+    }
+  }
+
+
+  void CompileRegular(MacroAssembler* masm,
+                      Register receiver,
+                      Register holder,
+                      Register scratch,
+                      JSObject* holder_obj,
+                      Label* miss_label) {
+    __ pop(scratch);  // save old return address
+    PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+    __ push(scratch);  // restore old return address
+
+    ExternalReference ref = ExternalReference(
+        IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+    __ TailCallRuntime(ref, 5, 1);
+  }
+
+ private:
+  Register name_;
+};
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+  explicit CallInterceptorCompiler(const ParameterCount& arguments)
+      : arguments_(arguments), argc_(arguments.immediate()) {}
+
+  void CompileCacheable(MacroAssembler* masm,
+                        StubCompiler* stub_compiler,
+                        Register receiver,
+                        Register holder,
+                        Register scratch1,
+                        Register scratch2,
+                        JSObject* holder_obj,
+                        LookupResult* lookup,
+                        String* name,
+                        Label* miss_label) {
+    JSFunction* function = 0;
+    bool optimize = false;
+    // So far the most popular case for failed interceptor is
+    // CONSTANT_FUNCTION sitting below.
+    if (lookup->type() == CONSTANT_FUNCTION) {
+      function = lookup->GetConstantFunction();
+      // JSArray holder is a special case for call constant function
+      // (see the corresponding code).
+      if (function->is_compiled() && !holder_obj->IsJSArray()) {
+        optimize = true;
+      }
+    }
+
+    if (!optimize) {
+      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      return;
+    }
+
+    __ EnterInternalFrame();
+    __ push(holder);  // save the holder
+
+    CompileCallLoadPropertyWithInterceptor(
+        masm,
+        receiver,
+        holder,
+        // Under EnterInternalFrame this refers to name.
+        Operand(ebp, (argc_ + 3) * kPointerSize),
+        holder_obj);
+
+    __ pop(receiver);  // restore holder
+    __ LeaveInternalFrame();
+
+    __ cmp(eax, Factory::no_interceptor_result_sentinel());
+    Label invoke;
+    __ j(not_equal, &invoke);
+
+    stub_compiler->CheckPrototypes(holder_obj, receiver,
+                                   lookup->holder(), scratch1,
+                                   scratch2,
+                                   name,
+                                   miss_label);
+    if (lookup->holder()->IsGlobalObject()) {
+      __ mov(edx, Operand(esp, (argc_ + 1) * kPointerSize));
+      __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+      __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edx);
+    }
+
+    ASSERT(function->is_compiled());
+    // Get the function and setup the context.
+    __ mov(edi, Immediate(Handle<JSFunction>(function)));
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+    // Jump to the cached code (tail call).
+    ASSERT(function->is_compiled());
+    Handle<Code> code(function->code());
+    ParameterCount expected(function->shared()->formal_parameter_count());
+    __ InvokeCode(code, expected, arguments_,
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+    __ bind(&invoke);
+  }
+
+  void CompileRegular(MacroAssembler* masm,
+                      Register receiver,
+                      Register holder,
+                      Register scratch,
+                      JSObject* holder_obj,
+                      Label* miss_label) {
+    __ EnterInternalFrame();
+
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             Operand(ebp, (argc_ + 3) * kPointerSize),
+                             holder_obj);
+
+    ExternalReference ref = ExternalReference(
+        IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+    __ mov(eax, Immediate(5));
+    __ mov(ebx, Immediate(ref));
+
+    CEntryStub stub(1);
+    __ CallStub(&stub);
+
+    __ LeaveInternalFrame();
+  }
+
+ private:
+  const ParameterCount& arguments_;
+  int argc_;
+};
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = Builtins::builtin(Builtins::LoadIC_Miss);
+  } else {
+    code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+                                      Builtins::Name storage_extend,
+                                      JSObject* object,
+                                      int index,
+                                      Map* transition,
+                                      Register receiver_reg,
+                                      Register name_reg,
+                                      Register scratch,
+                                      Label* miss_label) {
+  // Check that the object isn't a smi.
+  __ test(receiver_reg, Immediate(kSmiTagMask));
+  __ j(zero, miss_label, not_taken);
+
+  // Check that the map of the object hasn't changed.
+  __ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(object->map())));
+  __ j(not_equal, miss_label, not_taken);
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  // Perform map transition for the receiver if necessary.
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ mov(ecx, Immediate(Handle<Map>(transition)));
+    Handle<Code> ic(Builtins::builtin(storage_extend));
+    __ jmp(ic, RelocInfo::CODE_TARGET);
+    return;
+  }
+
+  if (transition != NULL) {
+    // Update the map of the object; no write barrier updating is
+    // needed because the map is never in new space.
+    __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+           Immediate(Handle<Map>(transition)));
+  }
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= object->map()->inobject_properties();
+
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = object->map()->instance_size() + (index * kPointerSize);
+    __ mov(FieldOperand(receiver_reg, offset), eax);
+
+    // Update the write barrier for the array address.
+    // Pass the value being stored in the now unused name_reg.
+    __ mov(name_reg, Operand(eax));
+    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array (optimistically).
+    __ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+    __ mov(FieldOperand(scratch, offset), eax);
+
+    // Update the write barrier for the array address.
+    // Pass the value being stored in the now unused name_reg.
+    __ mov(name_reg, Operand(eax));
+    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+  }
+
+  // Return the value (register eax).
+  __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(JSObject* object,
+                                       Register object_reg,
+                                       JSObject* holder,
+                                       Register holder_reg,
+                                       Register scratch,
+                                       String* name,
+                                       Label* miss) {
+  // Check that the maps haven't changed.
+  Register result =
+      masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.
+  while (object != holder) {
+    if (object->IsGlobalObject()) {
+      GlobalObject* global = GlobalObject::cast(object);
+      Object* probe = global->EnsurePropertyCell(name);
+      if (probe->IsFailure()) {
+        set_failure(Failure::cast(probe));
+        return result;
+      }
+      JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+      ASSERT(cell->value()->IsTheHole());
+      __ mov(scratch, Immediate(Handle<Object>(cell)));
+      __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+             Immediate(Factory::the_hole_value()));
+      __ j(not_equal, miss, not_taken);
+    }
+    object = JSObject::cast(object->GetPrototype());
+  }
+
+  // Return the register containin the holder.
+  return result;
+}
+
+
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
+                                     Register receiver,
+                                     Register scratch1,
+                                     Register scratch2,
+                                     int index,
+                                     String* name,
+                                     Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check the prototype chain.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Get the value from the properties.
+  GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
+  __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadCallback(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        AccessorInfo* callback,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Push the arguments on the JS stack of the caller.
+  __ pop(scratch2);  // remove return address
+  __ push(receiver);  // receiver
+  __ push(reg);  // holder
+  __ mov(reg, Immediate(Handle<AccessorInfo>(callback)));  // callback data
+  __ push(reg);
+  __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+  __ push(name_reg);  // name
+  __ push(scratch2);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference load_callback_property =
+      ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+  __ TailCallRuntime(load_callback_property, 5, 1);
+}
+
+
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Object* value,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Return the constant value.
+  __ mov(eax, Handle<Object>(value));
+  __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* holder,
+                                           LookupResult* lookup,
+                                           Register receiver,
+                                           Register name_reg,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           String* name,
+                                           Label* miss) {
+  LoadInterceptorCompiler compiler(name_reg);
+  CompileLoadInterceptor(&compiler,
+                         this,
+                         masm(),
+                         object,
+                         holder,
+                         name,
+                         lookup,
+                         receiver,
+                         scratch1,
+                         scratch2,
+                         miss);
+}
+
+
+// TODO(1241006): Avoid having lazy compile stubs specialized by the
+// number of arguments. It is not needed anymore.
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push a copy of the function onto the stack.
+  __ push(edi);
+
+  __ push(edi);  // function is also the parameter to the runtime call
+  __ CallRuntime(Runtime::kLazyCompile, 1);
+  __ pop(edi);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
+  __ jmp(Operand(ecx));
+
+  return GetCodeWithFlags(flags, "LazyCompileStub");
+}
+
+
+Object* CallStubCompiler::CompileCallField(Object* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Do the right check and compute the holder register.
+  Register reg =
+      CheckPrototypes(JSObject::cast(object), edx, holder,
+                      ebx, ecx, name, &miss);
+
+  GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
+
+  // Check that the function really is a function.
+  __ test(edi, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
+  __ j(not_equal, &miss, not_taken);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+  }
+
+  // Invoke the function.
+  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              CheckType check) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  if (check != NUMBER_CHECK) {
+    __ test(edx, Immediate(kSmiTagMask));
+    __ j(zero, &miss, not_taken);
+  }
+
+  // Make sure that it's okay not to patch the on stack receiver
+  // unless we're doing a receiver map check.
+  ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+  switch (check) {
+    case RECEIVER_MAP_CHECK:
+      // Check that the maps haven't changed.
+      CheckPrototypes(JSObject::cast(object), edx, holder,
+                      ebx, ecx, name, &miss);
+
+      // Patch the receiver on the stack with the global proxy if
+      // necessary.
+      if (object->IsGlobalObject()) {
+        __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+        __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+      }
+      break;
+
+    case STRING_CHECK:
+      // Check that the object is a two-byte string or a symbol.
+      __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+      __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+      __ cmp(ecx, FIRST_NONSTRING_TYPE);
+      __ j(above_equal, &miss, not_taken);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::STRING_FUNCTION_INDEX,
+                                          ecx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
+                      ebx, edx, name, &miss);
+      break;
+
+    case NUMBER_CHECK: {
+      Label fast;
+      // Check that the object is a smi or a heap number.
+      __ test(edx, Immediate(kSmiTagMask));
+      __ j(zero, &fast, taken);
+      __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
+      __ j(not_equal, &miss, not_taken);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::NUMBER_FUNCTION_INDEX,
+                                          ecx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
+                      ebx, edx, name, &miss);
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      Label fast;
+      // Check that the object is a boolean.
+      __ cmp(edx, Factory::true_value());
+      __ j(equal, &fast, taken);
+      __ cmp(edx, Factory::false_value());
+      __ j(not_equal, &miss, not_taken);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::BOOLEAN_FUNCTION_INDEX,
+                                          ecx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
+                      ebx, edx, name, &miss);
+      break;
+    }
+
+    case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
+      CheckPrototypes(JSObject::cast(object), edx, holder,
+                      ebx, ecx, name, &miss);
+      // Make sure object->HasFastElements().
+      // Get the elements array of the object.
+      __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+      // Check that the object is in fast mode (not dictionary).
+      __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+             Immediate(Factory::fixed_array_map()));
+      __ j(not_equal, &miss, not_taken);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+
+  // Get the function and setup the context.
+  __ mov(edi, Immediate(Handle<JSFunction>(function)));
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  __ InvokeCode(code, expected, arguments(),
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+                                                 JSObject* holder,
+                                                 String* name) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label miss;
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+
+  // Get the receiver from the stack.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  CallInterceptorCompiler compiler(arguments());
+  CompileLoadInterceptor(&compiler,
+                         this,
+                         masm(),
+                         JSObject::cast(object),
+                         holder,
+                         name,
+                         &lookup,
+                         edx,
+                         ebx,
+                         ecx,
+                         &miss);
+
+  // Restore receiver.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  // Check that the function really is a function.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+  __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+  __ j(not_equal, &miss, not_taken);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+  }
+
+  // Invoke the function.
+  __ mov(edi, eax);
+  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+
+  // Handle load cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(argc);
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            JSFunction* function,
+                                            String* name) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label miss;
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  // Get the receiver from the stack.
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ test(edx, Immediate(kSmiTagMask));
+    __ j(zero, &miss, not_taken);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, edx, holder, ebx, ecx, name, &miss);
+
+  // Get the value from the cell.
+  __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
+
+  // Check that the cell contains the same function.
+  __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
+  __ j(not_equal, &miss, not_taken);
+
+  // Patch the receiver on the stack with the global proxy.
+  if (object->IsGlobalObject()) {
+    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+  }
+
+  // Setup the context (function already in edi).
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  __ IncrementCounter(&Counters::call_global_inline, 1);
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  __ InvokeCode(code, expected, arguments(),
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::call_global_inline_miss, 1);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* object,
+                                             int index,
+                                             Map* transition,
+                                             String* name) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+  // Generate store field code.  Trashes the name register.
+  GenerateStoreField(masm(),
+                     Builtins::StoreIC_ExtendStorage,
+                     object,
+                     index,
+                     transition,
+                     ebx, ecx, edx,
+                     &miss);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ mov(ecx, Immediate(Handle<String>(name)));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                AccessorInfo* callback,
+                                                String* name) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the map of the object hasn't changed.
+  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(object->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(ebx, edx, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  __ pop(ebx);  // remove the return address
+  __ push(Operand(esp, 0));  // receiver
+  __ push(Immediate(Handle<AccessorInfo>(callback)));  // callback info
+  __ push(ecx);  // name
+  __ push(eax);  // value
+  __ push(ebx);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+  __ TailCallRuntime(store_callback_property, 4, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ mov(ecx, Immediate(Handle<String>(name)));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                   String* name) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the map of the object hasn't changed.
+  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(receiver->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Perform global security token check if needed.
+  if (receiver->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(ebx, edx, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+  __ pop(ebx);  // remove the return address
+  __ push(Operand(esp, 0));  // receiver
+  __ push(ecx);  // name
+  __ push(eax);  // value
+  __ push(ebx);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property =
+      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+  __ TailCallRuntime(store_ic_property, 3, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ mov(ecx, Immediate(Handle<String>(name)));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+                                              JSGlobalPropertyCell* cell,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check that the map of the global has not changed.
+  __ mov(ebx, Operand(esp, kPointerSize));
+  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(object->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Store the value in the cell.
+  __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
+
+  // Return the value (register eax).
+  __ IncrementCounter(&Counters::named_store_global_inline, 1);
+  __ ret(0);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+                                                  int index,
+                                                  Map* transition,
+                                                  String* name) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ IncrementCounter(&Counters::keyed_store_field, 1);
+
+  // Get the name from the stack.
+  __ mov(ecx, Operand(esp, 1 * kPointerSize));
+  // Check that the name has not changed.
+  __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  // Get the object from the stack.
+  __ mov(ebx, Operand(esp, 2 * kPointerSize));
+
+  // Generate store field code.  Trashes the name register.
+  GenerateStoreField(masm(),
+                     Builtins::KeyedStoreIC_ExtendStorage,
+                     object,
+                     index,
+                     transition,
+                     ebx, ecx, edx,
+                     &miss);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_store_field, 1);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+                                              JSObject* holder,
+                                              AccessorInfo* callback,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
+                       callback, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                              JSObject* holder,
+                                              Object* value,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                 JSObject* holder,
+                                                 String* name) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  // TODO(368): Compile in the whole chain: all the interceptors in
+  // prototypes and ultimate answer.
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          eax,
+                          ecx,
+                          edx,
+                          ebx,
+                          name,
+                          &miss);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            String* name,
+                                            bool is_dont_delete) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver from the stack.
+  __ mov(eax, Operand(esp, kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual loads. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ test(eax, Immediate(kSmiTagMask));
+    __ j(zero, &miss, not_taken);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, eax, holder, ebx, edx, name, &miss);
+
+  // Get the value from the cell.
+  __ mov(eax, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (!is_dont_delete) {
+    __ cmp(eax, Factory::the_hole_value());
+    __ j(equal, &miss, not_taken);
+  } else if (FLAG_debug_code) {
+    __ cmp(eax, Factory::the_hole_value());
+    __ Check(not_equal, "DontDelete cells can't contain the hole");
+  }
+
+  __ IncrementCounter(&Counters::named_load_global_inline, 1);
+  __ ret(0);
+
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                JSObject* receiver,
+                                                JSObject* holder,
+                                                int index) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_field, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadField(receiver, holder, ecx, ebx, edx, index, name, &miss);
+
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_field, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_callback, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
+                       callback, name, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_callback, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   Object* value) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadConstant(receiver, holder, ecx, ebx, edx,
+                       value, name, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                      JSObject* holder,
+                                                      String* name) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          ecx,
+                          eax,
+                          edx,
+                          ebx,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadArrayLength(masm(), ecx, edx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadStringLength(masm(), ecx, edx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : name
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+
+  // Check that the name has not changed.
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ j(not_equal, &miss, not_taken);
+
+  GenerateLoadFunctionPrototype(masm(), ecx, edx, ebx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+// Specialized stub for constructing objects from functions which only have only
+// simple assignments of the form this.x = ...; in their body.
+Object* ConstructStubCompiler::CompileConstructStub(
+    SharedFunctionInfo* shared) {
+  // ----------- S t a t e -------------
+  //  -- eax : argc
+  //  -- edi : constructor
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument
+  // -----------------------------------
+  Label generic_stub_call;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Check to see whether there are any break points in the function code. If
+  // there are jump to the generic constructor stub which calls the actual
+  // code for the function thereby hitting the break points.
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
+  __ cmp(ebx, Factory::undefined_value());
+  __ j(not_equal, &generic_stub_call, not_taken);
+#endif
+
+  // Load the initial map and verify that it is in fact a map.
+  __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+  // Will both indicate a NULL and a Smi.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(zero, &generic_stub_call);
+  __ CmpObjectType(ebx, MAP_TYPE, ecx);
+  __ j(not_equal, &generic_stub_call);
+
+#ifdef DEBUG
+  // Cannot construct functions this way.
+  // edi: constructor
+  // ebx: initial map
+  __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
+  __ Assert(not_equal, "Function constructed by construct stub.");
+#endif
+
+  // Now allocate the JSObject on the heap by moving the new space allocation
+  // top forward.
+  // edi: constructor
+  // ebx: initial map
+  __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
+  __ shl(ecx, kPointerSizeLog2);
+  __ AllocateInNewSpace(ecx,
+                        edx,
+                        ecx,
+                        no_reg,
+                        &generic_stub_call,
+                        NO_ALLOCATION_FLAGS);
+
+  // Allocated the JSObject, now initialize the fields and add the heap tag.
+  // ebx: initial map
+  // edx: JSObject (untagged)
+  __ mov(Operand(edx, JSObject::kMapOffset), ebx);
+  __ mov(ebx, Factory::empty_fixed_array());
+  __ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
+  __ mov(Operand(edx, JSObject::kElementsOffset), ebx);
+
+  // Push the allocated object to the stack. This is the object that will be
+  // returned (after it is tagged).
+  __ push(edx);
+
+  // eax: argc
+  // edx: JSObject (untagged)
+  // Load the address of the first in-object property into edx.
+  __ lea(edx, Operand(edx, JSObject::kHeaderSize));
+  // Calculate the location of the first argument. The stack contains the
+  // allocated object and the return address on top of the argc arguments.
+  __ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
+
+  // Use edi for holding undefined which is used in several places below.
+  __ mov(edi, Factory::undefined_value());
+
+  // eax: argc
+  // ecx: first argument
+  // edx: first in-object property of the JSObject
+  // edi: undefined
+  // Fill the initialized properties with a constant value or a passed argument
+  // depending on the this.x = ...; assignment in the function.
+  for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+    if (shared->IsThisPropertyAssignmentArgument(i)) {
+      Label not_passed;
+      // Set the property to undefined.
+      __ mov(Operand(edx, i * kPointerSize), edi);
+      // Check if the argument assigned to the property is actually passed.
+      int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+      __ cmp(eax, arg_number);
+      __ j(below_equal, &not_passed);
+      // Argument passed - find it on the stack.
+      __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
+      __ mov(Operand(edx, i * kPointerSize), ebx);
+      __ bind(&not_passed);
+    } else {
+      // Set the property to the constant value.
+      Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+      __ mov(Operand(edx, i * kPointerSize), Immediate(constant));
+    }
+  }
+
+  // Fill the unused in-object property fields with undefined.
+  for (int i = shared->this_property_assignments_count();
+       i < shared->CalculateInObjectProperties();
+       i++) {
+    __ mov(Operand(edx, i * kPointerSize), edi);
+  }
+
+  // Move argc to ebx and retrieve and tag the JSObject to return.
+  __ mov(ebx, eax);
+  __ pop(eax);
+  __ or_(Operand(eax), Immediate(kHeapObjectTag));
+
+  // Remove caller arguments and receiver from the stack and return.
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
+  __ push(ecx);
+  __ IncrementCounter(&Counters::constructed_objects, 1);
+  __ IncrementCounter(&Counters::constructed_objects_stub, 1);
+  __ ret(0);
+
+  // Jump to the generic stub in case the specialized code cannot handle the
+  // construction.
+  __ bind(&generic_stub_call);
+  Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+  Handle<Code> generic_construct_stub(code);
+  __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
new file mode 100644
index 0000000..1b8232f
--- /dev/null
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -0,0 +1,1085 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+// On entry to a function, the virtual frame already contains the receiver,
+// the parameters, and a return address.  All frame elements are in memory.
+VirtualFrame::VirtualFrame()
+    : elements_(parameter_count() + local_count() + kPreallocatedElements),
+      stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
+  for (int i = 0; i <= stack_pointer_; i++) {
+    elements_.Add(FrameElement::MemoryElement());
+  }
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    register_locations_[i] = kIllegalIndex;
+  }
+}
+
+
+void VirtualFrame::SyncElementBelowStackPointer(int index) {
+  // Emit code to write elements below the stack pointer to their
+  // (already allocated) stack address.
+  ASSERT(index <= stack_pointer_);
+  FrameElement element = elements_[index];
+  ASSERT(!element.is_synced());
+  switch (element.type()) {
+    case FrameElement::INVALID:
+      break;
+
+    case FrameElement::MEMORY:
+      // This function should not be called with synced elements.
+      // (memory elements are always synced).
+      UNREACHABLE();
+      break;
+
+    case FrameElement::REGISTER:
+      __ mov(Operand(ebp, fp_relative(index)), element.reg());
+      break;
+
+    case FrameElement::CONSTANT:
+      if (cgen()->IsUnsafeSmi(element.handle())) {
+        Result temp = cgen()->allocator()->Allocate();
+        ASSERT(temp.is_valid());
+        cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
+        __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+      } else {
+        __ Set(Operand(ebp, fp_relative(index)),
+               Immediate(element.handle()));
+      }
+      break;
+
+    case FrameElement::COPY: {
+      int backing_index = element.index();
+      FrameElement backing_element = elements_[backing_index];
+      if (backing_element.is_memory()) {
+        Result temp = cgen()->allocator()->Allocate();
+        ASSERT(temp.is_valid());
+        __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
+        __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+      } else {
+        ASSERT(backing_element.is_register());
+        __ mov(Operand(ebp, fp_relative(index)), backing_element.reg());
+      }
+      break;
+    }
+  }
+  elements_[index].set_sync();
+}
+
+
+void VirtualFrame::SyncElementByPushing(int index) {
+  // Sync an element of the frame that is just above the stack pointer
+  // by pushing it.
+  ASSERT(index == stack_pointer_ + 1);
+  stack_pointer_++;
+  FrameElement element = elements_[index];
+
+  switch (element.type()) {
+    case FrameElement::INVALID:
+      __ push(Immediate(Smi::FromInt(0)));
+      break;
+
+    case FrameElement::MEMORY:
+      // No memory elements exist above the stack pointer.
+      UNREACHABLE();
+      break;
+
+    case FrameElement::REGISTER:
+      __ push(element.reg());
+      break;
+
+    case FrameElement::CONSTANT:
+      if (cgen()->IsUnsafeSmi(element.handle())) {
+        Result temp = cgen()->allocator()->Allocate();
+        ASSERT(temp.is_valid());
+        cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
+        __ push(temp.reg());
+      } else {
+        __ push(Immediate(element.handle()));
+      }
+      break;
+
+    case FrameElement::COPY: {
+      int backing_index = element.index();
+      FrameElement backing = elements_[backing_index];
+      ASSERT(backing.is_memory() || backing.is_register());
+      if (backing.is_memory()) {
+        __ push(Operand(ebp, fp_relative(backing_index)));
+      } else {
+        __ push(backing.reg());
+      }
+      break;
+    }
+  }
+  elements_[index].set_sync();
+}
+
+
+// Clear the dirty bits for the range of elements in
+// [min(stack_pointer_ + 1,begin), end].
+void VirtualFrame::SyncRange(int begin, int end) {
+  ASSERT(begin >= 0);
+  ASSERT(end < element_count());
+  // Sync elements below the range if they have not been materialized
+  // on the stack.
+  int start = Min(begin, stack_pointer_ + 1);
+
+  // If positive we have to adjust the stack pointer.
+  int delta = end - stack_pointer_;
+  if (delta > 0) {
+    stack_pointer_ = end;
+    __ sub(Operand(esp), Immediate(delta * kPointerSize));
+  }
+
+  for (int i = start; i <= end; i++) {
+    if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+  }
+}
+
+
+void VirtualFrame::MakeMergable() {
+  for (int i = 0; i < element_count(); i++) {
+    FrameElement element = elements_[i];
+
+    if (element.is_constant() || element.is_copy()) {
+      if (element.is_synced()) {
+        // Just spill.
+        elements_[i] = FrameElement::MemoryElement();
+      } else {
+        // Allocate to a register.
+        FrameElement backing_element;  // Invalid if not a copy.
+        if (element.is_copy()) {
+          backing_element = elements_[element.index()];
+        }
+        Result fresh = cgen()->allocator()->Allocate();
+        ASSERT(fresh.is_valid());  // A register was spilled if all were in use.
+        elements_[i] =
+            FrameElement::RegisterElement(fresh.reg(),
+                                          FrameElement::NOT_SYNCED);
+        Use(fresh.reg(), i);
+
+        // Emit a move.
+        if (element.is_constant()) {
+          if (cgen()->IsUnsafeSmi(element.handle())) {
+            cgen()->LoadUnsafeSmi(fresh.reg(), element.handle());
+          } else {
+            __ Set(fresh.reg(), Immediate(element.handle()));
+          }
+        } else {
+          ASSERT(element.is_copy());
+          // Copies are only backed by register or memory locations.
+          if (backing_element.is_register()) {
+            // The backing store may have been spilled by allocating,
+            // but that's OK.  If it was, the value is right where we
+            // want it.
+            if (!fresh.reg().is(backing_element.reg())) {
+              __ mov(fresh.reg(), backing_element.reg());
+            }
+          } else {
+            ASSERT(backing_element.is_memory());
+            __ mov(fresh.reg(), Operand(ebp, fp_relative(element.index())));
+          }
+        }
+      }
+      // No need to set the copied flag --- there are no copies.
+    } else {
+      // Clear the copy flag of non-constant, non-copy elements.
+      // They cannot be copied because copies are not allowed.
+      // The copy flag is not relied on before the end of this loop,
+      // including when registers are spilled.
+      elements_[i].clear_copied();
+    }
+  }
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+  Comment cmnt(masm(), "[ Merge frame");
+  // We should always be merging the code generator's current frame to an
+  // expected frame.
+  ASSERT(cgen()->frame() == this);
+
+  // Adjust the stack pointer upward (toward the top of the virtual
+  // frame) if necessary.
+  if (stack_pointer_ < expected->stack_pointer_) {
+    int difference = expected->stack_pointer_ - stack_pointer_;
+    stack_pointer_ = expected->stack_pointer_;
+    __ sub(Operand(esp), Immediate(difference * kPointerSize));
+  }
+
+  MergeMoveRegistersToMemory(expected);
+  MergeMoveRegistersToRegisters(expected);
+  MergeMoveMemoryToRegisters(expected);
+
+  // Adjust the stack pointer downward if necessary.
+  if (stack_pointer_ > expected->stack_pointer_) {
+    int difference = stack_pointer_ - expected->stack_pointer_;
+    stack_pointer_ = expected->stack_pointer_;
+    __ add(Operand(esp), Immediate(difference * kPointerSize));
+  }
+
+  // At this point, the frames should be identical.
+  ASSERT(Equals(expected));
+}
+
+
+void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
+  ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+  // Move registers, constants, and copies to memory.  Perform moves
+  // from the top downward in the frame in order to leave the backing
+  // stores of copies in registers.
+  //
+  // Moving memory-backed copies to memory requires a spare register
+  // for the memory-to-memory moves.  Since we are performing a merge,
+  // we use esi (which is already saved in the frame).  We keep track
+  // of the index of the frame element esi is caching or kIllegalIndex
+  // if esi has not been disturbed.
+  int esi_caches = kIllegalIndex;
+  for (int i = element_count() - 1; i >= 0; i--) {
+    FrameElement target = expected->elements_[i];
+    if (target.is_register()) continue;  // Handle registers later.
+    if (target.is_memory()) {
+      FrameElement source = elements_[i];
+      switch (source.type()) {
+        case FrameElement::INVALID:
+          // Not a legal merge move.
+          UNREACHABLE();
+          break;
+
+        case FrameElement::MEMORY:
+          // Already in place.
+          break;
+
+        case FrameElement::REGISTER:
+          Unuse(source.reg());
+          if (!source.is_synced()) {
+            __ mov(Operand(ebp, fp_relative(i)), source.reg());
+          }
+          break;
+
+        case FrameElement::CONSTANT:
+          if (!source.is_synced()) {
+            if (cgen()->IsUnsafeSmi(source.handle())) {
+              esi_caches = i;
+              cgen()->LoadUnsafeSmi(esi, source.handle());
+              __ mov(Operand(ebp, fp_relative(i)), esi);
+            } else {
+              __ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
+            }
+          }
+          break;
+
+        case FrameElement::COPY:
+          if (!source.is_synced()) {
+            int backing_index = source.index();
+            FrameElement backing_element = elements_[backing_index];
+            if (backing_element.is_memory()) {
+              // If we have to spill a register, we spill esi.
+              if (esi_caches != backing_index) {
+                esi_caches = backing_index;
+                __ mov(esi, Operand(ebp, fp_relative(backing_index)));
+              }
+              __ mov(Operand(ebp, fp_relative(i)), esi);
+            } else {
+              ASSERT(backing_element.is_register());
+              __ mov(Operand(ebp, fp_relative(i)), backing_element.reg());
+            }
+          }
+          break;
+      }
+    }
+    elements_[i] = target;
+  }
+
+  if (esi_caches != kIllegalIndex) {
+    __ mov(esi, Operand(ebp, fp_relative(context_index())));
+  }
+}
+
+
+void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+  // We have already done X-to-memory moves.
+  ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    // Move the right value into register i if it is currently in a register.
+    int index = expected->register_location(i);
+    int use_index = register_location(i);
+    // Skip if register i is unused in the target or else if source is
+    // not a register (this is not a register-to-register move).
+    if (index == kIllegalIndex || !elements_[index].is_register()) continue;
+
+    Register target = RegisterAllocator::ToRegister(i);
+    Register source = elements_[index].reg();
+    if (index != use_index) {
+      if (use_index == kIllegalIndex) {  // Target is currently unused.
+        // Copy contents of source from source to target.
+        // Set frame element register to target.
+        Use(target, index);
+        Unuse(source);
+        __ mov(target, source);
+      } else {
+        // Exchange contents of registers source and target.
+        // Nothing except the register backing use_index has changed.
+        elements_[use_index].set_reg(source);
+        set_register_location(target, index);
+        set_register_location(source, use_index);
+        __ xchg(source, target);
+      }
+    }
+
+    if (!elements_[index].is_synced() &&
+        expected->elements_[index].is_synced()) {
+      __ mov(Operand(ebp, fp_relative(index)), target);
+    }
+    elements_[index] = expected->elements_[index];
+  }
+}
+
+
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
+  // Move memory, constants, and copies to registers.  This is the
+  // final step and since it is not done from the bottom up, but in
+  // register code order, we have special code to ensure that the backing
+  // elements of copies are in their correct locations when we
+  // encounter the copies.
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int index = expected->register_location(i);
+    if (index != kIllegalIndex) {
+      FrameElement source = elements_[index];
+      FrameElement target = expected->elements_[index];
+      Register target_reg = RegisterAllocator::ToRegister(i);
+      ASSERT(target.reg().is(target_reg));
+      switch (source.type()) {
+        case FrameElement::INVALID:  // Fall through.
+          UNREACHABLE();
+          break;
+        case FrameElement::REGISTER:
+          ASSERT(source.Equals(target));
+          // Go to next iteration.  Skips Use(target_reg) and syncing
+          // below.  It is safe to skip syncing because a target
+          // register frame element would only be synced if all source
+          // elements were.
+          continue;
+          break;
+        case FrameElement::MEMORY:
+          ASSERT(index <= stack_pointer_);
+          __ mov(target_reg, Operand(ebp, fp_relative(index)));
+          break;
+
+        case FrameElement::CONSTANT:
+          if (cgen()->IsUnsafeSmi(source.handle())) {
+            cgen()->LoadUnsafeSmi(target_reg, source.handle());
+          } else {
+           __ Set(target_reg, Immediate(source.handle()));
+          }
+          break;
+
+        case FrameElement::COPY: {
+          int backing_index = source.index();
+          FrameElement backing = elements_[backing_index];
+          ASSERT(backing.is_memory() || backing.is_register());
+          if (backing.is_memory()) {
+            ASSERT(backing_index <= stack_pointer_);
+            // Code optimization if backing store should also move
+            // to a register: move backing store to its register first.
+            if (expected->elements_[backing_index].is_register()) {
+              FrameElement new_backing = expected->elements_[backing_index];
+              Register new_backing_reg = new_backing.reg();
+              ASSERT(!is_used(new_backing_reg));
+              elements_[backing_index] = new_backing;
+              Use(new_backing_reg, backing_index);
+              __ mov(new_backing_reg,
+                     Operand(ebp, fp_relative(backing_index)));
+              __ mov(target_reg, new_backing_reg);
+            } else {
+              __ mov(target_reg, Operand(ebp, fp_relative(backing_index)));
+            }
+          } else {
+            __ mov(target_reg, backing.reg());
+          }
+        }
+      }
+      // Ensure the proper sync state.
+      if (target.is_synced() && !source.is_synced()) {
+        __ mov(Operand(ebp, fp_relative(index)), target_reg);
+      }
+      Use(target_reg, index);
+      elements_[index] = target;
+    }
+  }
+}
+
+
+void VirtualFrame::Enter() {
+  // Registers live on entry: esp, ebp, esi, edi.
+  Comment cmnt(masm(), "[ Enter JS frame");
+
+#ifdef DEBUG
+  // Verify that edi contains a JS function.  The following code
+  // relies on eax being available for use.
+  __ test(edi, Immediate(kSmiTagMask));
+  __ Check(not_zero,
+           "VirtualFrame::Enter - edi is not a function (smi check).");
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
+  __ Check(equal,
+           "VirtualFrame::Enter - edi is not a function (map check).");
+#endif
+
+  EmitPush(ebp);
+
+  __ mov(ebp, Operand(esp));
+
+  // Store the context in the frame.  The context is kept in esi and a
+  // copy is stored in the frame.  The external reference to esi
+  // remains.
+  EmitPush(esi);
+
+  // Store the function in the frame.  The frame owns the register
+  // reference now (ie, it can keep it in edi or spill it later).
+  Push(edi);
+  SyncElementAt(element_count() - 1);
+  cgen()->allocator()->Unuse(edi);
+}
+
+
+void VirtualFrame::Exit() {
+  Comment cmnt(masm(), "[ Exit JS frame");
+  // Record the location of the JS exit code for patching when setting
+  // break point.
+  __ RecordJSReturn();
+
+  // Avoid using the leave instruction here, because it is too
+  // short. We need the return sequence to be a least the size of a
+  // call instruction to support patching the exit code in the
+  // debugger. See VisitReturnStatement for the full return sequence.
+  __ mov(esp, Operand(ebp));
+  stack_pointer_ = frame_pointer();
+  for (int i = element_count() - 1; i > stack_pointer_; i--) {
+    FrameElement last = elements_.RemoveLast();
+    if (last.is_register()) {
+      Unuse(last.reg());
+    }
+  }
+
+  EmitPop(ebp);
+}
+
+
+void VirtualFrame::AllocateStackSlots() {
+  int count = local_count();
+  if (count > 0) {
+    Comment cmnt(masm(), "[ Allocate space for locals");
+    // The locals are initialized to a constant (the undefined value), but
+    // we sync them with the actual frame to allocate space for spilling
+    // them later.  First sync everything above the stack pointer so we can
+    // use pushes to allocate and initialize the locals.
+    SyncRange(stack_pointer_ + 1, element_count() - 1);
+    Handle<Object> undefined = Factory::undefined_value();
+    FrameElement initial_value =
+        FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
+    Result temp = cgen()->allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ Set(temp.reg(), Immediate(undefined));
+    for (int i = 0; i < count; i++) {
+      elements_.Add(initial_value);
+      stack_pointer_++;
+      __ push(temp.reg());
+    }
+  }
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+  ASSERT(elements_[context_index()].is_memory());
+  __ mov(Operand(ebp, fp_relative(context_index())), esi);
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+  ASSERT(elements_[context_index()].is_memory());
+  __ mov(esi, Operand(ebp, fp_relative(context_index())));
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+  Result temp = cgen()->allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  __ lea(temp.reg(), ParameterAt(-1));
+  Push(&temp);
+}
+
+
+int VirtualFrame::InvalidateFrameSlotAt(int index) {
+  FrameElement original = elements_[index];
+
+  // Is this element the backing store of any copies?
+  int new_backing_index = kIllegalIndex;
+  if (original.is_copied()) {
+    // Verify it is copied, and find first copy.
+    for (int i = index + 1; i < element_count(); i++) {
+      if (elements_[i].is_copy() && elements_[i].index() == index) {
+        new_backing_index = i;
+        break;
+      }
+    }
+  }
+
+  if (new_backing_index == kIllegalIndex) {
+    // No copies found, return kIllegalIndex.
+    if (original.is_register()) {
+      Unuse(original.reg());
+    }
+    elements_[index] = FrameElement::InvalidElement();
+    return kIllegalIndex;
+  }
+
+  // This is the backing store of copies.
+  Register backing_reg;
+  if (original.is_memory()) {
+    Result fresh = cgen()->allocator()->Allocate();
+    ASSERT(fresh.is_valid());
+    Use(fresh.reg(), new_backing_index);
+    backing_reg = fresh.reg();
+    __ mov(backing_reg, Operand(ebp, fp_relative(index)));
+  } else {
+    // The original was in a register.
+    backing_reg = original.reg();
+    set_register_location(backing_reg, new_backing_index);
+  }
+  // Invalidate the element at index.
+  elements_[index] = FrameElement::InvalidElement();
+  // Set the new backing element.
+  if (elements_[new_backing_index].is_synced()) {
+    elements_[new_backing_index] =
+        FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED);
+  } else {
+    elements_[new_backing_index] =
+        FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
+  }
+  // Update the other copies.
+  for (int i = new_backing_index + 1; i < element_count(); i++) {
+    if (elements_[i].is_copy() && elements_[i].index() == index) {
+      elements_[i].set_index(new_backing_index);
+      elements_[new_backing_index].set_copied();
+    }
+  }
+  return new_backing_index;
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+  ASSERT(index >= 0);
+  ASSERT(index <= element_count());
+  FrameElement original = elements_[index];
+  int new_backing_store_index = InvalidateFrameSlotAt(index);
+  if (new_backing_store_index != kIllegalIndex) {
+    elements_.Add(CopyElementAt(new_backing_store_index));
+    return;
+  }
+
+  switch (original.type()) {
+    case FrameElement::MEMORY: {
+      // Emit code to load the original element's data into a register.
+      // Push that register as a FrameElement on top of the frame.
+      Result fresh = cgen()->allocator()->Allocate();
+      ASSERT(fresh.is_valid());
+      FrameElement new_element =
+          FrameElement::RegisterElement(fresh.reg(),
+                                        FrameElement::NOT_SYNCED);
+      Use(fresh.reg(), element_count());
+      elements_.Add(new_element);
+      __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
+      break;
+    }
+    case FrameElement::REGISTER:
+      Use(original.reg(), element_count());
+      // Fall through.
+    case FrameElement::CONSTANT:
+    case FrameElement::COPY:
+      original.clear_sync();
+      elements_.Add(original);
+      break;
+    case FrameElement::INVALID:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+  // Store the value on top of the frame to the virtual frame slot at
+  // a given index.  The value on top of the frame is left in place.
+  // This is a duplicating operation, so it can create copies.
+  ASSERT(index >= 0);
+  ASSERT(index < element_count());
+
+  int top_index = element_count() - 1;
+  FrameElement top = elements_[top_index];
+  FrameElement original = elements_[index];
+  if (top.is_copy() && top.index() == index) return;
+  ASSERT(top.is_valid());
+
+  InvalidateFrameSlotAt(index);
+
+  // InvalidateFrameSlotAt can potentially change any frame element, due
+  // to spilling registers to allocate temporaries in order to preserve
+  // the copy-on-write semantics of aliased elements.  Reload top from
+  // the frame.
+  top = elements_[top_index];
+
+  if (top.is_copy()) {
+    // There are two cases based on the relative positions of the
+    // stored-to slot and the backing slot of the top element.
+    int backing_index = top.index();
+    ASSERT(backing_index != index);
+    if (backing_index < index) {
+      // 1. The top element is a copy of a slot below the stored-to
+      // slot.  The stored-to slot becomes an unsynced copy of that
+      // same backing slot.
+      elements_[index] = CopyElementAt(backing_index);
+    } else {
+      // 2. The top element is a copy of a slot above the stored-to
+      // slot.  The stored-to slot becomes the new (unsynced) backing
+      // slot and both the top element and the element at the former
+      // backing slot become copies of it.  The sync state of the top
+      // and former backing elements is preserved.
+      FrameElement backing_element = elements_[backing_index];
+      ASSERT(backing_element.is_memory() || backing_element.is_register());
+      if (backing_element.is_memory()) {
+        // Because sets of copies are canonicalized to be backed by
+        // their lowest frame element, and because memory frame
+        // elements are backed by the corresponding stack address, we
+        // have to move the actual value down in the stack.
+        //
+        // TODO(209): considering allocating the stored-to slot to the
+        // temp register.  Alternatively, allow copies to appear in
+        // any order in the frame and lazily move the value down to
+        // the slot.
+        Result temp = cgen()->allocator()->Allocate();
+        ASSERT(temp.is_valid());
+        __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
+        __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+      } else {
+        set_register_location(backing_element.reg(), index);
+        if (backing_element.is_synced()) {
+          // If the element is a register, we will not actually move
+          // anything on the stack but only update the virtual frame
+          // element.
+          backing_element.clear_sync();
+        }
+      }
+      elements_[index] = backing_element;
+
+      // The old backing element becomes a copy of the new backing
+      // element.
+      FrameElement new_element = CopyElementAt(index);
+      elements_[backing_index] = new_element;
+      if (backing_element.is_synced()) {
+        elements_[backing_index].set_sync();
+      }
+
+      // All the copies of the old backing element (including the top
+      // element) become copies of the new backing element.
+      for (int i = backing_index + 1; i < element_count(); i++) {
+        if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
+          elements_[i].set_index(index);
+        }
+      }
+    }
+    return;
+  }
+
+  // Move the top element to the stored-to slot and replace it (the
+  // top element) with a copy.
+  elements_[index] = top;
+  if (top.is_memory()) {
+    // TODO(209): consider allocating the stored-to slot to the temp
+    // register.  Alternatively, allow copies to appear in any order
+    // in the frame and lazily move the value down to the slot.
+    FrameElement new_top = CopyElementAt(index);
+    new_top.set_sync();
+    elements_[top_index] = new_top;
+
+    // The sync state of the former top element is correct (synced).
+    // Emit code to move the value down in the frame.
+    Result temp = cgen()->allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ mov(temp.reg(), Operand(esp, 0));
+    __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+  } else if (top.is_register()) {
+    set_register_location(top.reg(), index);
+    // The stored-to slot has the (unsynced) register reference and
+    // the top element becomes a copy.  The sync state of the top is
+    // preserved.
+    FrameElement new_top = CopyElementAt(index);
+    if (top.is_synced()) {
+      new_top.set_sync();
+      elements_[index].clear_sync();
+    }
+    elements_[top_index] = new_top;
+  } else {
+    // The stored-to slot holds the same value as the top but
+    // unsynced.  (We do not have copies of constants yet.)
+    ASSERT(top.is_constant());
+    elements_[index].clear_sync();
+  }
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  // Grow the expression stack by handler size less one (the return
+  // address is already pushed by a call instruction).
+  Adjust(kHandlerSize - 1);
+  __ PushTryHandler(IN_JAVASCRIPT, type);
+}
+
+
+Result VirtualFrame::RawCallStub(CodeStub* stub) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallStub(stub);
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+  PrepareForCall(0, 0);
+  arg->ToRegister(eax);
+  arg->Unuse();
+  return RawCallStub(stub);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+  PrepareForCall(0, 0);
+
+  if (arg0->is_register() && arg0->reg().is(eax)) {
+    if (arg1->is_register() && arg1->reg().is(edx)) {
+      // Wrong registers.
+      __ xchg(eax, edx);
+    } else {
+      // Register edx is free for arg0, which frees eax for arg1.
+      arg0->ToRegister(edx);
+      arg1->ToRegister(eax);
+    }
+  } else {
+    // Register eax is free for arg1, which guarantees edx is free for
+    // arg0.
+    arg1->ToRegister(eax);
+    arg0->ToRegister(edx);
+  }
+
+  arg0->Unuse();
+  arg1->Unuse();
+  return RawCallStub(stub);
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(f, arg_count);
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(id, arg_count);
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeFlag flag,
+                                   int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ InvokeBuiltin(id, flag);
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+                                       RelocInfo::Mode rmode) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ call(code, rmode);
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
+  // Name and receiver are on the top of the frame.  The IC expects
+  // name in ecx and receiver on the stack.  It does not drop the
+  // receiver.
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  Result name = Pop();
+  PrepareForCall(1, 0);  // One stack arg, not callee-dropped.
+  name.ToRegister(ecx);
+  name.Unuse();
+  return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
+  // Key and receiver are on top of the frame.  The IC expects them on
+  // the stack.  It does not drop them.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  PrepareForCall(2, 0);  // Two stack args, neither callee-dropped.
+  return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallStoreIC() {
+  // Name, value, and receiver are on top of the frame.  The IC
+  // expects name in ecx, value in eax, and receiver on the stack.  It
+  // does not drop the receiver.
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+  Result name = Pop();
+  Result value = Pop();
+  PrepareForCall(1, 0);  // One stack arg, not callee-dropped.
+
+  if (value.is_register() && value.reg().is(ecx)) {
+    if (name.is_register() && name.reg().is(eax)) {
+      // Wrong registers.
+      __ xchg(eax, ecx);
+    } else {
+      // Register eax is free for value, which frees ecx for name.
+      value.ToRegister(eax);
+      name.ToRegister(ecx);
+    }
+  } else {
+    // Register ecx is free for name, which guarantees eax is free for
+    // value.
+    name.ToRegister(ecx);
+    value.ToRegister(eax);
+  }
+
+  name.Unuse();
+  value.Unuse();
+  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
+Result VirtualFrame::CallKeyedStoreIC() {
+  // Value, key, and receiver are on the top of the frame.  The IC
+  // expects value in eax and key and receiver on the stack.  It does
+  // not drop the key and receiver.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  // TODO(1222589): Make the IC grab the values from the stack.
+  Result value = Pop();
+  PrepareForCall(2, 0);  // Two stack args, neither callee-dropped.
+  value.ToRegister(eax);
+  value.Unuse();
+  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
+Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
+                                int arg_count,
+                                int loop_nesting) {
+  // Arguments, receiver, and function name are on top of the frame.
+  // The IC expects them on the stack.  It does not drop the function
+  // name slot (but it does drop the rest).
+  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
+  Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
+  // Spill args, receiver, and function.  The call will drop args and
+  // receiver.
+  PrepareForCall(arg_count + 2, arg_count + 1);
+  return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallConstructor(int arg_count) {
+  // Arguments, receiver, and function are on top of the frame.  The
+  // IC expects arg count in eax, function in edi, and the arguments
+  // and receiver on the stack.
+  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+  // Duplicate the function before preparing the frame.
+  PushElementAt(arg_count + 1);
+  Result function = Pop();
+  PrepareForCall(arg_count + 1, arg_count + 1);  // Spill args and receiver.
+  function.ToRegister(edi);
+
+  // Constructors are called with the number of arguments in register
+  // eax for now. Another option would be to have separate construct
+  // call trampolines per different arguments counts encountered.
+  Result num_args = cgen()->allocator()->Allocate(eax);
+  ASSERT(num_args.is_valid());
+  __ Set(num_args.reg(), Immediate(arg_count));
+
+  function.Unuse();
+  num_args.Unuse();
+  return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
+}
+
+
+void VirtualFrame::Drop(int count) {
+  ASSERT(count >= 0);
+  ASSERT(height() >= count);
+  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
+
+  // Emit code to lower the stack pointer if necessary.
+  if (num_virtual_elements < count) {
+    int num_dropped = count - num_virtual_elements;
+    stack_pointer_ -= num_dropped;
+    __ add(Operand(esp), Immediate(num_dropped * kPointerSize));
+  }
+
+  // Discard elements from the virtual frame and free any registers.
+  for (int i = 0; i < count; i++) {
+    FrameElement dropped = elements_.RemoveLast();
+    if (dropped.is_register()) {
+      Unuse(dropped.reg());
+    }
+  }
+}
+
+
+Result VirtualFrame::Pop() {
+  FrameElement element = elements_.RemoveLast();
+  int index = element_count();
+  ASSERT(element.is_valid());
+
+  bool pop_needed = (stack_pointer_ == index);
+  if (pop_needed) {
+    stack_pointer_--;
+    if (element.is_memory()) {
+      Result temp = cgen()->allocator()->Allocate();
+      ASSERT(temp.is_valid());
+      __ pop(temp.reg());
+      return temp;
+    }
+
+    __ add(Operand(esp), Immediate(kPointerSize));
+  }
+  ASSERT(!element.is_memory());
+
+  // The top element is a register, constant, or a copy.  Unuse
+  // registers and follow copies to their backing store.
+  if (element.is_register()) {
+    Unuse(element.reg());
+  } else if (element.is_copy()) {
+    ASSERT(element.index() < index);
+    index = element.index();
+    element = elements_[index];
+  }
+  ASSERT(!element.is_copy());
+
+  // The element is memory, a register, or a constant.
+  if (element.is_memory()) {
+    // Memory elements could only be the backing store of a copy.
+    // Allocate the original to a register.
+    ASSERT(index <= stack_pointer_);
+    Result temp = cgen()->allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    Use(temp.reg(), index);
+    FrameElement new_element =
+        FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
+    // Preserve the copy flag on the element.
+    if (element.is_copied()) new_element.set_copied();
+    elements_[index] = new_element;
+    __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
+    return Result(temp.reg());
+  } else if (element.is_register()) {
+    return Result(element.reg());
+  } else {
+    ASSERT(element.is_constant());
+    return Result(element.handle());
+  }
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(reg);
+}
+
+
+void VirtualFrame::EmitPop(Operand operand) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(operand);
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(reg);
+}
+
+
+void VirtualFrame::EmitPush(Operand operand) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(operand);
+}
+
+
+void VirtualFrame::EmitPush(Immediate immediate) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(immediate);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
new file mode 100644
index 0000000..314ea73
--- /dev/null
+++ b/src/ia32/virtual-frame-ia32.h
@@ -0,0 +1,575 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
+#define V8_IA32_VIRTUAL_FRAME_IA32_H_
+
+#include "register-allocator.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame.  It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack.  It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame: public ZoneObject {
+ public:
+  // A utility class to introduce a scope where the virtual frame is
+  // expected to remain spilled.  The constructor spills the code
+  // generator's current frame, but no attempt is made to require it
+  // to stay spilled.  It is intended as documentation while the code
+  // generator is being transformed.
+  class SpilledScope BASE_EMBEDDED {
+   public:
+    SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+      ASSERT(cgen()->has_valid_frame());
+      cgen()->frame()->SpillAll();
+      cgen()->set_in_spilled_code(true);
+    }
+
+    ~SpilledScope() {
+      cgen()->set_in_spilled_code(previous_state_);
+    }
+
+   private:
+    bool previous_state_;
+
+    CodeGenerator* cgen() {return CodeGeneratorScope::Current();}
+  };
+
+  // An illegal index into the virtual frame.
+  static const int kIllegalIndex = -1;
+
+  // Construct an initial virtual frame on entry to a JS function.
+  VirtualFrame();
+
+  // Construct a virtual frame as a clone of an existing one.
+  explicit VirtualFrame(VirtualFrame* original);
+
+  CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+
+  MacroAssembler* masm() { return cgen()->masm(); }
+
+  // Create a duplicate of an existing valid frame element.
+  FrameElement CopyElementAt(int index);
+
+  // The number of elements on the virtual frame.
+  int element_count() { return elements_.length(); }
+
+  // The height of the virtual expression stack.
+  int height() { return element_count() - expression_base_index(); }
+
+  int register_location(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num];
+  }
+
+  int register_location(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)];
+  }
+
+  void set_register_location(Register reg, int index) {
+    register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+  }
+
+  bool is_used(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num] != kIllegalIndex;
+  }
+
+  bool is_used(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)]
+        != kIllegalIndex;
+  }
+
+  // Add extra in-memory elements to the top of the frame to match an actual
+  // frame (eg, the frame after an exception handler is pushed).  No code is
+  // emitted.
+  void Adjust(int count);
+
+  // Forget count elements from the top of the frame all in-memory
+  // (including synced) and adjust the stack pointer downward, to
+  // match an external frame effect (examples include a call removing
+  // its arguments, and exiting a try/catch removing an exception
+  // handler).  No code will be emitted.
+  void Forget(int count) {
+    ASSERT(count >= 0);
+    ASSERT(stack_pointer_ == element_count() - 1);
+    stack_pointer_ -= count;
+    ForgetElements(count);
+  }
+
+  // Forget count elements from the top of the frame without adjusting
+  // the stack pointer downward.  This is used, for example, before
+  // merging frames at break, continue, and return targets.
+  void ForgetElements(int count);
+
+  // Spill all values from the frame to memory.
+  void SpillAll();
+
+  // Spill all occurrences of a specific register from the frame.
+  void Spill(Register reg) {
+    if (is_used(reg)) SpillElementAt(register_location(reg));
+  }
+
+  // Spill all occurrences of an arbitrary register if possible.  Return the
+  // register spilled or no_reg if it was not possible to free any register
+  // (ie, they all have frame-external references).
+  Register SpillAnyRegister();
+
+  // Sync the range of elements in [begin, end] with memory.
+  void SyncRange(int begin, int end);
+
+  // Make this frame so that an arbitrary frame of the same height can
+  // be merged to it.  Copies and constants are removed from the frame.
+  void MakeMergable();
+
+  // Prepare this virtual frame for merging to an expected frame by
+  // performing some state changes that do not require generating
+  // code.  It is guaranteed that no code will be generated.
+  void PrepareMergeTo(VirtualFrame* expected);
+
+  // Make this virtual frame have a state identical to an expected virtual
+  // frame.  As a side effect, code may be emitted to make this frame match
+  // the expected one.
+  void MergeTo(VirtualFrame* expected);
+
+  // Detach a frame from its code generator, perhaps temporarily.  This
+  // tells the register allocator that it is free to use frame-internal
+  // registers.  Used when the code generator's frame is switched from this
+  // one to NULL by an unconditional jump.
+  void DetachFromCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Unuse(i);
+    }
+  }
+
+  // (Re)attach a frame to its code generator.  This informs the register
+  // allocator that the frame-internal register references are active again.
+  // Used when a code generator's frame is switched from NULL to this one by
+  // binding a label.
+  void AttachToCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Use(i);
+    }
+  }
+
+  // Emit code for the physical JS entry and exit frame sequences.  After
+  // calling Enter, the virtual frame is ready for use; and after calling
+  // Exit it should not be used.  Note that Enter does not allocate space in
+  // the physical frame for storing frame-allocated locals.
+  void Enter();
+  void Exit();
+
+  // Prepare for returning from the frame by spilling locals.  This
+  // avoids generating unnecessary merge code when jumping to the
+  // shared return site.  Emits code for spills.
+  void PrepareForReturn();
+
+  // Allocate and initialize the frame-allocated locals.
+  void AllocateStackSlots();
+
+  // An element of the expression stack as an assembly operand.
+  Operand ElementAt(int index) const {
+    return Operand(esp, index * kPointerSize);
+  }
+
+  // Random-access store to a frame-top relative frame element.  The result
+  // becomes owned by the frame and is invalidated.
+  void SetElementAt(int index, Result* value);
+
+  // Set a frame element to a constant.  The index is frame-top relative.
+  void SetElementAt(int index, Handle<Object> value) {
+    Result temp(value);
+    SetElementAt(index, &temp);
+  }
+
+  void PushElementAt(int index) {
+    PushFrameSlotAt(element_count() - index - 1);
+  }
+
+  void StoreToElementAt(int index) {
+    StoreToFrameSlotAt(element_count() - index - 1);
+  }
+
+  // A frame-allocated local as an assembly operand.
+  Operand LocalAt(int index) {
+    ASSERT(0 <= index);
+    ASSERT(index < local_count());
+    return Operand(ebp, kLocal0Offset - index * kPointerSize);
+  }
+
+  // Push a copy of the value of a local frame slot on top of the frame.
+  void PushLocalAt(int index) {
+    PushFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the value of a local frame slot on top of the frame and invalidate
+  // the local slot.  The slot should be written to before trying to read
+  // from it again.
+  void TakeLocalAt(int index) {
+    TakeFrameSlotAt(local0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a local frame slot.  The
+  // value is left in place on top of the frame.
+  void StoreToLocalAt(int index) {
+    StoreToFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the address of the receiver slot on the frame.
+  void PushReceiverSlotAddress();
+
+  // Push the function on top of the frame.
+  void PushFunction() {
+    PushFrameSlotAt(function_index());
+  }
+
+  // Save the value of the esi register to the context frame slot.
+  void SaveContextRegister();
+
+  // Restore the esi register from the value of the context frame
+  // slot.
+  void RestoreContextRegister();
+
+  // A parameter as an assembly operand.
+  Operand ParameterAt(int index) {
+    ASSERT(-1 <= index);  // -1 is the receiver.
+    ASSERT(index < parameter_count());
+    return Operand(ebp, (1 + parameter_count() - index) * kPointerSize);
+  }
+
+  // Push a copy of the value of a parameter frame slot on top of the frame.
+  void PushParameterAt(int index) {
+    PushFrameSlotAt(param0_index() + index);
+  }
+
+  // Push the value of a paramter frame slot on top of the frame and
+  // invalidate the parameter slot.  The slot should be written to before
+  // trying to read from it again.
+  void TakeParameterAt(int index) {
+    TakeFrameSlotAt(param0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a parameter frame slot.
+  // The value is left in place on top of the frame.
+  void StoreToParameterAt(int index) {
+    StoreToFrameSlotAt(param0_index() + index);
+  }
+
+  // The receiver frame slot.
+  Operand Receiver() {
+    return ParameterAt(-1);
+  }
+
+  // Push a try-catch or try-finally handler on top of the virtual frame.
+  void PushTryHandler(HandlerType type);
+
+  // Call stub given the number of arguments it expects on (and
+  // removes from) the stack.
+  Result CallStub(CodeStub* stub, int arg_count) {
+    PrepareForCall(arg_count, arg_count);
+    return RawCallStub(stub);
+  }
+
+  // Call stub that takes a single argument passed in eax.  The
+  // argument is given as a result which does not have to be eax or
+  // even a register.  The argument is consumed by the call.
+  Result CallStub(CodeStub* stub, Result* arg);
+
+  // Call stub that takes a pair of arguments passed in edx (arg0) and
+  // eax (arg1).  The arguments are given as results which do not have
+  // to be in the proper registers or even in registers.  The
+  // arguments are consumed by the call.
+  Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+
+  // Call runtime given the number of arguments expected on (and
+  // removed from) the stack.
+  Result CallRuntime(Runtime::Function* f, int arg_count);
+  Result CallRuntime(Runtime::FunctionId id, int arg_count);
+
+  // Invoke builtin given the number of arguments it expects on (and
+  // removes from) the stack.
+  Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
+
+  // Call load IC.  Name and receiver are found on top of the frame.
+  // Receiver is not dropped.
+  Result CallLoadIC(RelocInfo::Mode mode);
+
+  // Call keyed load IC.  Key and receiver are found on top of the
+  // frame.  They are not dropped.
+  Result CallKeyedLoadIC(RelocInfo::Mode mode);
+
+  // Call store IC.  Name, value, and receiver are found on top of the
+  // frame.  Receiver is not dropped.
+  Result CallStoreIC();
+
+  // Call keyed store IC.  Value, key, and receiver are found on top
+  // of the frame.  Key and receiver are not dropped.
+  Result CallKeyedStoreIC();
+
+  // Call call IC.  Arguments, reciever, and function name are found
+  // on top of the frame.  Function name slot is not dropped.  The
+  // argument count does not include the receiver.
+  Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+
+  // Allocate and call JS function as constructor.  Arguments,
+  // receiver (global object), and function are found on top of the
+  // frame.  Function is not dropped.  The argument count does not
+  // include the receiver.
+  Result CallConstructor(int arg_count);
+
+  // Drop a number of elements from the top of the expression stack.  May
+  // emit code to affect the physical frame.  Does not clobber any registers
+  // excepting possibly the stack pointer.
+  void Drop(int count);
+
+  // Drop one element.
+  void Drop() {
+    Drop(1);
+  }
+
+  // Duplicate the top element of the frame.
+  void Dup() {
+    PushFrameSlotAt(element_count() - 1);
+  }
+
+  // Pop an element from the top of the expression stack.  Returns a
+  // Result, which may be a constant or a register.
+  Result Pop();
+
+  // Pop and save an element from the top of the expression stack and
+  // emit a corresponding pop instruction.
+  void EmitPop(Register reg);
+  void EmitPop(Operand operand);
+
+  // Push an element on top of the expression stack and emit a
+  // corresponding push instruction.
+  void EmitPush(Register reg);
+  void EmitPush(Operand operand);
+  void EmitPush(Immediate immediate);
+
+  // Push an element on the virtual frame.
+  void Push(Register reg);
+  void Push(Handle<Object> value);
+  void Push(Smi* value) {
+    Push(Handle<Object> (value));
+  }
+
+  // Pushing a result invalidates it (its contents become owned by the
+  // frame).
+  void Push(Result* result) {
+    if (result->is_register()) {
+      Push(result->reg());
+    } else {
+      ASSERT(result->is_constant());
+      Push(result->handle());
+    }
+    result->Unuse();
+  }
+
+  // Nip removes zero or more elements from immediately below the top
+  // of the frame, leaving the previous top-of-frame value on top of
+  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+  void Nip(int num_dropped);
+
+ private:
+  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+  static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
+
+  ZoneList<FrameElement> elements_;
+
+  // The index of the element that is at the processor's stack pointer
+  // (the esp register).
+  int stack_pointer_;
+
+  // The index of the register frame element using each register, or
+  // kIllegalIndex if a register is not on the frame.
+  int register_locations_[RegisterAllocator::kNumRegisters];
+
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count() {
+    return cgen()->scope()->num_parameters();
+  }
+  int local_count() {
+    return cgen()->scope()->num_stack_slots();
+  }
+
+  // The index of the element that is at the processor's frame pointer
+  // (the ebp register).  The parameters, receiver, and return address
+  // are below the frame pointer.
+  int frame_pointer() {
+    return parameter_count() + 2;
+  }
+
+  // The index of the first parameter.  The receiver lies below the first
+  // parameter.
+  int param0_index() {
+    return 1;
+  }
+
+  // The index of the context slot in the frame.  It is immediately
+  // above the frame pointer.
+  int context_index() {
+    return frame_pointer() + 1;
+  }
+
+  // The index of the function slot in the frame.  It is above the frame
+  // pointer and the context slot.
+  int function_index() {
+    return frame_pointer() + 2;
+  }
+
+  // The index of the first local.  Between the frame pointer and the
+  // locals lie the context and the function.
+  int local0_index() {
+    return frame_pointer() + 3;
+  }
+
+  // The index of the base of the expression stack.
+  int expression_base_index() {
+    return local0_index() + local_count();
+  }
+
+  // Convert a frame index into a frame pointer relative offset into the
+  // actual stack.
+  int fp_relative(int index) {
+    ASSERT(index < element_count());
+    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
+    return (frame_pointer() - index) * kPointerSize;
+  }
+
+  // Record an occurrence of a register in the virtual frame.  This has the
+  // effect of incrementing the register's external reference count and
+  // of updating the index of the register's location in the frame.
+  void Use(Register reg, int index) {
+    ASSERT(!is_used(reg));
+    set_register_location(reg, index);
+    cgen()->allocator()->Use(reg);
+  }
+
+  // Record that a register reference has been dropped from the frame.  This
+  // decrements the register's external reference count and invalidates the
+  // index of the register's location in the frame.
+  void Unuse(Register reg) {
+    ASSERT(is_used(reg));
+    set_register_location(reg, kIllegalIndex);
+    cgen()->allocator()->Unuse(reg);
+  }
+
+  // Spill the element at a particular index---write it to memory if
+  // necessary, free any associated register, and forget its value if
+  // constant.
+  void SpillElementAt(int index);
+
+  // Sync the element at a particular index.  If it is a register or
+  // constant that disagrees with the value on the stack, write it to memory.
+  // Keep the element type as register or constant, and clear the dirty bit.
+  void SyncElementAt(int index);
+
+  // Sync a single unsynced element that lies beneath or at the stack pointer.
+  void SyncElementBelowStackPointer(int index);
+
+  // Sync a single unsynced element that lies just above the stack pointer.
+  void SyncElementByPushing(int index);
+
+  // Push a copy of a frame slot (typically a local or parameter) on top of
+  // the frame.
+  void PushFrameSlotAt(int index);
+
+  // Push a the value of a frame slot (typically a local or parameter) on
+  // top of the frame and invalidate the slot.
+  void TakeFrameSlotAt(int index);
+
+  // Store the value on top of the frame to a frame slot (typically a local
+  // or parameter).
+  void StoreToFrameSlotAt(int index);
+
+  // Spill all elements in registers. Spill the top spilled_args elements
+  // on the frame.  Sync all other frame elements.
+  // Then drop dropped_args elements from the virtual frame, to match
+  // the effect of an upcoming call that will drop them from the stack.
+  void PrepareForCall(int spilled_args, int dropped_args);
+
+  // Move frame elements currently in registers or constants, that
+  // should be in memory in the expected frame, to memory.
+  void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+  // Make the register-to-register moves necessary to
+  // merge this frame with the expected frame.
+  // Register to memory moves must already have been made,
+  // and memory to register moves must follow this call.
+  // This is because some new memory-to-register moves are
+  // created in order to break cycles of register moves.
+  // Used in the implementation of MergeTo().
+  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+  // Make the memory-to-register and constant-to-register moves
+  // needed to make this frame equal the expected frame.
+  // Called after all register-to-memory and register-to-register
+  // moves have been made.  After this function returns, the frames
+  // should be equal.
+  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+  // Invalidates a frame slot (puts an invalid frame element in it).
+  // Copies on the frame are correctly handled, and if this slot was
+  // the backing store of copies, the index of the new backing store
+  // is returned.  Otherwise, returns kIllegalIndex.
+  // Register counts are correctly updated.
+  int InvalidateFrameSlotAt(int index);
+
+  // Call a code stub that has already been prepared for calling (via
+  // PrepareForCall).
+  Result RawCallStub(CodeStub* stub);
+
+  // Calls a code object which has already been prepared for calling
+  // (via PrepareForCall).
+  Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+  bool Equals(VirtualFrame* other);
+
+  // Classes that need raw access to the elements_ array.
+  friend class DeferredCode;
+  friend class JumpTarget;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_VIRTUAL_FRAME_IA32_H_
diff --git a/src/ic-inl.h b/src/ic-inl.h
new file mode 100644
index 0000000..131f77b
--- /dev/null
+++ b/src/ic-inl.h
@@ -0,0 +1,93 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IC_INL_H_
+#define V8_IC_INL_H_
+
+#include "ic.h"
+#include "debug.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+Address IC::address() {
+  // Get the address of the call.
+  Address result = pc() - Assembler::kCallTargetAddressOffset;
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // First check if any break points are active if not just return the address
+  // of the call.
+  if (!Debug::has_break_points()) return result;
+
+  // At least one break point is active perform additional test to ensure that
+  // break point locations are updated correctly.
+  if (Debug::IsDebugBreak(Assembler::target_address_at(result))) {
+    // If the call site is a call to debug break then return the address in
+    // the original code instead of the address in the running code. This will
+    // cause the original code to be updated and keeps the breakpoint active in
+    // the running code.
+    return OriginalCodeAddress();
+  } else {
+    // No break point here just return the address of the call.
+    return result;
+  }
+#else
+  return result;
+#endif
+}
+
+
+Code* IC::GetTargetAtAddress(Address address) {
+  // Get the target address of the IC.
+  Address target = Assembler::target_address_at(address);
+  // Convert target address to the code object. Code::GetCodeFromTargetAddress
+  // is safe for use during GC where the map might be marked.
+  Code* result = Code::GetCodeFromTargetAddress(target);
+  ASSERT(result->is_inline_cache_stub());
+  return result;
+}
+
+
+void IC::SetTargetAtAddress(Address address, Code* target) {
+  ASSERT(target->is_inline_cache_stub());
+  Assembler::set_target_address_at(address, target->instruction_start());
+}
+
+
+Map* IC::GetCodeCacheMapForObject(Object* object) {
+  if (object->IsJSObject()) return JSObject::cast(object)->map();
+  // If the object is a value, we use the prototype map for the cache.
+  ASSERT(object->IsString() || object->IsNumber() || object->IsBoolean());
+  return JSObject::cast(object->GetPrototype())->map();
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IC_INL_H_
diff --git a/src/ic.cc b/src/ic.cc
new file mode 100644
index 0000000..264b99c
--- /dev/null
+++ b/src/ic.cc
@@ -0,0 +1,1368 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "arguments.h"
+#include "execution.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef DEBUG
+static char TransitionMarkFromState(IC::State state) {
+  switch (state) {
+    case UNINITIALIZED: return '0';
+    case PREMONOMORPHIC: return 'P';
+    case MONOMORPHIC: return '1';
+    case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
+    case MEGAMORPHIC: return 'N';
+
+    // We never see the debugger states here, because the state is
+    // computed from the original code - not the patched code. Let
+    // these cases fall through to the unreachable code below.
+    case DEBUG_BREAK: break;
+    case DEBUG_PREPARE_STEP_IN: break;
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+void IC::TraceIC(const char* type,
+                 Handle<String> name,
+                 State old_state,
+                 Code* new_target,
+                 const char* extra_info) {
+  if (FLAG_trace_ic) {
+    State new_state = StateFrom(new_target, Heap::undefined_value());
+    PrintF("[%s (%c->%c)%s", type,
+           TransitionMarkFromState(old_state),
+           TransitionMarkFromState(new_state),
+           extra_info);
+    name->Print();
+    PrintF("]\n");
+  }
+}
+#endif
+
+
+IC::IC(FrameDepth depth) {
+  // To improve the performance of the (much used) IC code, we unfold
+  // a few levels of the stack frame iteration code. This yields a
+  // ~35% speedup when running DeltaBlue with the '--nouse-ic' flag.
+  const Address entry = Top::c_entry_fp(Top::GetCurrentThread());
+  Address* pc_address =
+      reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
+  Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
+  // If there's another JavaScript frame on the stack, we need to look
+  // one frame further down the stack to find the frame pointer and
+  // the return address stack slot.
+  if (depth == EXTRA_CALL_FRAME) {
+    const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
+    pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
+    fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
+  }
+#ifdef DEBUG
+  StackFrameIterator it;
+  for (int i = 0; i < depth + 1; i++) it.Advance();
+  StackFrame* frame = it.frame();
+  ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
+#endif
+  fp_ = fp;
+  pc_address_ = pc_address;
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+Address IC::OriginalCodeAddress() {
+  HandleScope scope;
+  // Compute the JavaScript frame for the frame pointer of this IC
+  // structure. We need this to be able to find the function
+  // corresponding to the frame.
+  StackFrameIterator it;
+  while (it.frame()->fp() != this->fp()) it.Advance();
+  JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
+  // Find the function on the stack and both the active code for the
+  // function and the original code.
+  JSFunction* function = JSFunction::cast(frame->function());
+  Handle<SharedFunctionInfo> shared(function->shared());
+  Code* code = shared->code();
+  ASSERT(Debug::HasDebugInfo(shared));
+  Code* original_code = Debug::GetDebugInfo(shared)->original_code();
+  ASSERT(original_code->IsCode());
+  // Get the address of the call site in the active code. This is the
+  // place where the call to DebugBreakXXX is and where the IC
+  // normally would be.
+  Address addr = pc() - Assembler::kCallTargetAddressOffset;
+  // Return the address in the original code. This is the place where
+  // the call which has been overwritten by the DebugBreakXXX resides
+  // and the place where the inline cache system should look.
+  int delta = original_code->instruction_start() - code->instruction_start();
+  return addr + delta;
+}
+#endif
+
+IC::State IC::StateFrom(Code* target, Object* receiver) {
+  IC::State state = target->ic_state();
+
+  if (state != MONOMORPHIC) return state;
+  if (receiver->IsUndefined() || receiver->IsNull()) return state;
+
+  Map* map = GetCodeCacheMapForObject(receiver);
+
+  // Decide whether the inline cache failed because of changes to the
+  // receiver itself or changes to one of its prototypes.
+  //
+  // If there are changes to the receiver itself, the map of the
+  // receiver will have changed and the current target will not be in
+  // the receiver map's code cache.  Therefore, if the current target
+  // is in the receiver map's code cache, the inline cache failed due
+  // to prototype check failure.
+  int index = map->IndexInCodeCache(target);
+  if (index >= 0) {
+    // For keyed load/store, the most likely cause of cache failure is
+    // that the key has changed.  We do not distinguish between
+    // prototype and non-prototype failures for keyed access.
+    Code::Kind kind = target->kind();
+    if (kind == Code::KEYED_LOAD_IC || kind == Code::KEYED_STORE_IC) {
+      return MONOMORPHIC;
+    }
+
+    // Remove the target from the code cache to avoid hitting the same
+    // invalid stub again.
+    map->RemoveFromCodeCache(index);
+
+    return MONOMORPHIC_PROTOTYPE_FAILURE;
+  }
+
+  // The builtins object is special.  It only changes when JavaScript
+  // builtins are loaded lazily.  It is important to keep inline
+  // caches for the builtins object monomorphic.  Therefore, if we get
+  // an inline cache miss for the builtins object after lazily loading
+  // JavaScript builtins, we return uninitialized as the state to
+  // force the inline cache back to monomorphic state.
+  if (receiver->IsJSBuiltinsObject()) {
+    return UNINITIALIZED;
+  }
+
+  return MONOMORPHIC;
+}
+
+
+RelocInfo::Mode IC::ComputeMode() {
+  Address addr = address();
+  Code* code = Code::cast(Heap::FindCodeObject(addr));
+  for (RelocIterator it(code, RelocInfo::kCodeTargetMask);
+       !it.done(); it.next()) {
+    RelocInfo* info = it.rinfo();
+    if (info->pc() == addr) return info->rmode();
+  }
+  UNREACHABLE();
+  return RelocInfo::NONE;
+}
+
+
+Failure* IC::TypeError(const char* type,
+                       Handle<Object> object,
+                       Handle<String> name) {
+  HandleScope scope;
+  Handle<Object> args[2] = { name, object };
+  Handle<Object> error = Factory::NewTypeError(type, HandleVector(args, 2));
+  return Top::Throw(*error);
+}
+
+
+Failure* IC::ReferenceError(const char* type, Handle<String> name) {
+  HandleScope scope;
+  Handle<Object> error =
+      Factory::NewReferenceError(type, HandleVector(&name, 1));
+  return Top::Throw(*error);
+}
+
+
+void IC::Clear(Address address) {
+  Code* target = GetTargetAtAddress(address);
+
+  // Don't clear debug break inline cache as it will remove the break point.
+  if (target->ic_state() == DEBUG_BREAK) return;
+
+  switch (target->kind()) {
+    case Code::LOAD_IC: return LoadIC::Clear(address, target);
+    case Code::KEYED_LOAD_IC: return KeyedLoadIC::Clear(address, target);
+    case Code::STORE_IC: return StoreIC::Clear(address, target);
+    case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
+    case Code::CALL_IC: return CallIC::Clear(address, target);
+    default: UNREACHABLE();
+  }
+}
+
+
+void CallIC::Clear(Address address, Code* target) {
+  State state = target->ic_state();
+  InLoopFlag in_loop = target->ic_in_loop();
+  if (state == UNINITIALIZED) return;
+  Code* code =
+      StubCache::FindCallInitialize(target->arguments_count(), in_loop);
+  SetTargetAtAddress(address, code);
+}
+
+
+void KeyedLoadIC::Clear(Address address, Code* target) {
+  if (target->ic_state() == UNINITIALIZED) return;
+  // Make sure to also clear the map used in inline fast cases.  If we
+  // do not clear these maps, cached code can keep objects alive
+  // through the embedded maps.
+  ClearInlinedVersion(address);
+  SetTargetAtAddress(address, initialize_stub());
+}
+
+
+void LoadIC::Clear(Address address, Code* target) {
+  if (target->ic_state() == UNINITIALIZED) return;
+  ClearInlinedVersion(address);
+  SetTargetAtAddress(address, initialize_stub());
+}
+
+
+void StoreIC::Clear(Address address, Code* target) {
+  if (target->ic_state() == UNINITIALIZED) return;
+  SetTargetAtAddress(address, initialize_stub());
+}
+
+
+void KeyedStoreIC::Clear(Address address, Code* target) {
+  if (target->ic_state() == UNINITIALIZED) return;
+  SetTargetAtAddress(address, initialize_stub());
+}
+
+
+static bool HasInterceptorGetter(JSObject* object) {
+  return !object->GetNamedInterceptor()->getter()->IsUndefined();
+}
+
+
+static void LookupForRead(Object* object,
+                          String* name,
+                          LookupResult* lookup) {
+  AssertNoAllocation no_gc;  // pointers must stay valid
+
+  // Skip all the objects with named interceptors, but
+  // without actual getter.
+  while (true) {
+    object->Lookup(name, lookup);
+    // Besides normal conditions (property not found or it's not
+    // an interceptor), bail out of lookup is not cacheable: we won't
+    // be able to IC it anyway and regular lookup should work fine.
+    if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR ||
+        !lookup->IsCacheable()) {
+      return;
+    }
+
+    JSObject* holder = lookup->holder();
+    if (HasInterceptorGetter(holder)) {
+      return;
+    }
+
+    holder->LocalLookupRealNamedProperty(name, lookup);
+    if (lookup->IsValid()) {
+      ASSERT(lookup->type() != INTERCEPTOR);
+      return;
+    }
+
+    Object* proto = holder->GetPrototype();
+    if (proto->IsNull()) {
+      lookup->NotFound();
+      return;
+    }
+
+    object = proto;
+  }
+}
+
+
+Object* CallIC::TryCallAsFunction(Object* object) {
+  HandleScope scope;
+  Handle<Object> target(object);
+  Handle<Object> delegate = Execution::GetFunctionDelegate(target);
+
+  if (delegate->IsJSFunction()) {
+    // Patch the receiver and use the delegate as the function to
+    // invoke. This is used for invoking objects as if they were
+    // functions.
+    const int argc = this->target()->arguments_count();
+    StackFrameLocator locator;
+    JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
+    int index = frame->ComputeExpressionsCount() - (argc + 1);
+    frame->SetExpression(index, *target);
+  }
+
+  return *delegate;
+}
+
+
+Object* CallIC::LoadFunction(State state,
+                             Handle<Object> object,
+                             Handle<String> name) {
+  // If the object is undefined or null it's illegal to try to get any
+  // of its properties; throw a TypeError in that case.
+  if (object->IsUndefined() || object->IsNull()) {
+    return TypeError("non_object_property_call", object, name);
+  }
+
+  // Check if the name is trivially convertible to an index and get
+  // the element if so.
+  uint32_t index;
+  if (name->AsArrayIndex(&index)) {
+    Object* result = object->GetElement(index);
+    if (result->IsJSFunction()) return result;
+
+    // Try to find a suitable function delegate for the object at hand.
+    result = TryCallAsFunction(result);
+    if (result->IsJSFunction()) return result;
+
+    // Otherwise, it will fail in the lookup step.
+  }
+
+  // Lookup the property in the object.
+  LookupResult lookup;
+  LookupForRead(*object, *name, &lookup);
+
+  if (!lookup.IsValid()) {
+    // If the object does not have the requested property, check which
+    // exception we need to throw.
+    if (is_contextual()) {
+      return ReferenceError("not_defined", name);
+    }
+    return TypeError("undefined_method", object, name);
+  }
+
+  // Lookup is valid: Update inline cache and stub cache.
+  if (FLAG_use_ic && lookup.IsLoaded()) {
+    UpdateCaches(&lookup, state, object, name);
+  }
+
+  // Get the property.
+  PropertyAttributes attr;
+  Object* result = object->GetProperty(*object, &lookup, *name, &attr);
+  if (result->IsFailure()) return result;
+  if (lookup.type() == INTERCEPTOR) {
+    // If the object does not have the requested property, check which
+    // exception we need to throw.
+    if (attr == ABSENT) {
+      if (is_contextual()) {
+        return ReferenceError("not_defined", name);
+      }
+      return TypeError("undefined_method", object, name);
+    }
+  }
+
+  ASSERT(result != Heap::the_hole_value());
+
+  if (result->IsJSFunction()) {
+    // Check if there is an optimized (builtin) version of the function.
+    // Ignored this will degrade performance for Array.prototype.{push,pop}.
+    // Please note we only return the optimized function iff
+    // the JSObject has FastElements.
+    if (object->IsJSObject() && JSObject::cast(*object)->HasFastElements()) {
+      Object* opt = Top::LookupSpecialFunction(JSObject::cast(*object),
+                                               lookup.holder(),
+                                               JSFunction::cast(result));
+      if (opt->IsJSFunction()) return opt;
+    }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    // Handle stepping into a function if step into is active.
+    if (Debug::StepInActive()) {
+      // Protect the result in a handle as the debugger can allocate and might
+      // cause GC.
+      HandleScope scope;
+      Handle<JSFunction> function(JSFunction::cast(result));
+      Debug::HandleStepIn(function, object, fp(), false);
+      return *function;
+    }
+#endif
+
+    return result;
+  }
+
+  // Try to find a suitable function delegate for the object at hand.
+  result = TryCallAsFunction(result);
+  return result->IsJSFunction() ?
+      result : TypeError("property_not_function", object, name);
+}
+
+
+void CallIC::UpdateCaches(LookupResult* lookup,
+                          State state,
+                          Handle<Object> object,
+                          Handle<String> name) {
+  ASSERT(lookup->IsLoaded());
+  // Bail out if we didn't find a result.
+  if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+
+  // Compute the number of arguments.
+  int argc = target()->arguments_count();
+  InLoopFlag in_loop = target()->ic_in_loop();
+  Object* code = NULL;
+
+  if (state == UNINITIALIZED) {
+    // This is the first time we execute this inline cache.
+    // Set the target to the pre monomorphic stub to delay
+    // setting the monomorphic state.
+    code = StubCache::ComputeCallPreMonomorphic(argc, in_loop);
+  } else if (state == MONOMORPHIC) {
+    code = StubCache::ComputeCallMegamorphic(argc, in_loop);
+  } else {
+    // Compute monomorphic stub.
+    switch (lookup->type()) {
+      case FIELD: {
+        int index = lookup->GetFieldIndex();
+        code = StubCache::ComputeCallField(argc, in_loop, *name, *object,
+                                           lookup->holder(), index);
+        break;
+      }
+      case CONSTANT_FUNCTION: {
+        // Get the constant function and compute the code stub for this
+        // call; used for rewriting to monomorphic state and making sure
+        // that the code stub is in the stub cache.
+        JSFunction* function = lookup->GetConstantFunction();
+        code = StubCache::ComputeCallConstant(argc, in_loop, *name, *object,
+                                              lookup->holder(), function);
+        break;
+      }
+      case NORMAL: {
+        if (!object->IsJSObject()) return;
+        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+        if (lookup->holder()->IsGlobalObject()) {
+          GlobalObject* global = GlobalObject::cast(lookup->holder());
+          JSGlobalPropertyCell* cell =
+              JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+          if (!cell->value()->IsJSFunction()) return;
+          JSFunction* function = JSFunction::cast(cell->value());
+          code = StubCache::ComputeCallGlobal(argc,
+                                              in_loop,
+                                              *name,
+                                              *receiver,
+                                              global,
+                                              cell,
+                                              function);
+        } else {
+          // There is only one shared stub for calling normalized
+          // properties. It does not traverse the prototype chain, so the
+          // property must be found in the receiver for the stub to be
+          // applicable.
+          if (lookup->holder() != *receiver) return;
+          code = StubCache::ComputeCallNormal(argc, in_loop, *name, *receiver);
+        }
+        break;
+      }
+      case INTERCEPTOR: {
+        ASSERT(HasInterceptorGetter(lookup->holder()));
+        code = StubCache::ComputeCallInterceptor(argc, *name, *object,
+                                                 lookup->holder());
+        break;
+      }
+      default:
+        return;
+    }
+  }
+
+  // If we're unable to compute the stub (not enough memory left), we
+  // simply avoid updating the caches.
+  if (code == NULL || code->IsFailure()) return;
+
+  // Patch the call site depending on the state of the cache.
+  if (state == UNINITIALIZED ||
+      state == PREMONOMORPHIC ||
+      state == MONOMORPHIC ||
+      state == MONOMORPHIC_PROTOTYPE_FAILURE) {
+    set_target(Code::cast(code));
+  }
+
+#ifdef DEBUG
+  TraceIC("CallIC", name, state, target(), in_loop ? " (in-loop)" : "");
+#endif
+}
+
+
+Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
+  // If the object is undefined or null it's illegal to try to get any
+  // of its properties; throw a TypeError in that case.
+  if (object->IsUndefined() || object->IsNull()) {
+    return TypeError("non_object_property_load", object, name);
+  }
+
+  if (FLAG_use_ic) {
+    // Use specialized code for getting the length of strings and
+    // string wrapper objects.  The length property of string wrapper
+    // objects is read-only and therefore always returns the length of
+    // the underlying string value.  See ECMA-262 15.5.5.1.
+    if ((object->IsString() || object->IsStringWrapper()) &&
+        name->Equals(Heap::length_symbol())) {
+      HandleScope scope;
+      // Get the string if we have a string wrapper object.
+      if (object->IsJSValue()) {
+        object = Handle<Object>(Handle<JSValue>::cast(object)->value());
+      }
+#ifdef DEBUG
+      if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+#endif
+      Code* target = NULL;
+      target = Builtins::builtin(Builtins::LoadIC_StringLength);
+      set_target(target);
+      StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
+      return Smi::FromInt(String::cast(*object)->length());
+    }
+
+    // Use specialized code for getting the length of arrays.
+    if (object->IsJSArray() && name->Equals(Heap::length_symbol())) {
+#ifdef DEBUG
+      if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
+#endif
+      Code* target = Builtins::builtin(Builtins::LoadIC_ArrayLength);
+      set_target(target);
+      StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
+      return JSArray::cast(*object)->length();
+    }
+
+    // Use specialized code for getting prototype of functions.
+    if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol())) {
+#ifdef DEBUG
+      if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
+#endif
+      Code* target = Builtins::builtin(Builtins::LoadIC_FunctionPrototype);
+      set_target(target);
+      StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
+      return Accessors::FunctionGetPrototype(*object, 0);
+    }
+  }
+
+  // Check if the name is trivially convertible to an index and get
+  // the element if so.
+  uint32_t index;
+  if (name->AsArrayIndex(&index)) return object->GetElement(index);
+
+  // Named lookup in the object.
+  LookupResult lookup;
+  LookupForRead(*object, *name, &lookup);
+
+  // If lookup is invalid, check if we need to throw an exception.
+  if (!lookup.IsValid()) {
+    if (FLAG_strict || is_contextual()) {
+      return ReferenceError("not_defined", name);
+    }
+    LOG(SuspectReadEvent(*name, *object));
+  }
+
+  bool can_be_inlined =
+      FLAG_use_ic &&
+      state == PREMONOMORPHIC &&
+      lookup.IsValid() &&
+      lookup.IsLoaded() &&
+      lookup.IsCacheable() &&
+      lookup.holder() == *object &&
+      lookup.type() == FIELD &&
+      !object->IsAccessCheckNeeded();
+
+  if (can_be_inlined) {
+    Map* map = lookup.holder()->map();
+    // Property's index in the properties array.  If negative we have
+    // an inobject property.
+    int index = lookup.GetFieldIndex() - map->inobject_properties();
+    if (index < 0) {
+      // Index is an offset from the end of the object.
+      int offset = map->instance_size() + (index * kPointerSize);
+      if (PatchInlinedLoad(address(), map, offset)) {
+        set_target(megamorphic_stub());
+        return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
+      }
+    }
+  }
+
+  // Update inline cache and stub cache.
+  if (FLAG_use_ic && lookup.IsLoaded()) {
+    UpdateCaches(&lookup, state, object, name);
+  }
+
+  PropertyAttributes attr;
+  if (lookup.IsValid() && lookup.type() == INTERCEPTOR) {
+    // Get the property.
+    Object* result = object->GetProperty(*object, &lookup, *name, &attr);
+    if (result->IsFailure()) return result;
+    // If the property is not present, check if we need to throw an
+    // exception.
+    if (attr == ABSENT && is_contextual()) {
+      return ReferenceError("not_defined", name);
+    }
+    return result;
+  }
+
+  // Get the property.
+  return object->GetProperty(*object, &lookup, *name, &attr);
+}
+
+
+void LoadIC::UpdateCaches(LookupResult* lookup,
+                          State state,
+                          Handle<Object> object,
+                          Handle<String> name) {
+  ASSERT(lookup->IsLoaded());
+  // Bail out if we didn't find a result.
+  if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+
+  // Loading properties from values is not common, so don't try to
+  // deal with non-JS objects here.
+  if (!object->IsJSObject()) return;
+  Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+  // Compute the code stub for this load.
+  Object* code = NULL;
+  if (state == UNINITIALIZED) {
+    // This is the first time we execute this inline cache.
+    // Set the target to the pre monomorphic stub to delay
+    // setting the monomorphic state.
+    code = pre_monomorphic_stub();
+  } else {
+    // Compute monomorphic stub.
+    switch (lookup->type()) {
+      case FIELD: {
+        code = StubCache::ComputeLoadField(*name, *receiver,
+                                           lookup->holder(),
+                                           lookup->GetFieldIndex());
+        break;
+      }
+      case CONSTANT_FUNCTION: {
+        Object* constant = lookup->GetConstantFunction();
+        code = StubCache::ComputeLoadConstant(*name, *receiver,
+                                              lookup->holder(), constant);
+        break;
+      }
+      case NORMAL: {
+        if (lookup->holder()->IsGlobalObject()) {
+          GlobalObject* global = GlobalObject::cast(lookup->holder());
+          JSGlobalPropertyCell* cell =
+              JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+          code = StubCache::ComputeLoadGlobal(*name,
+                                              *receiver,
+                                              global,
+                                              cell,
+                                              lookup->IsDontDelete());
+        } else {
+          // There is only one shared stub for loading normalized
+          // properties. It does not traverse the prototype chain, so the
+          // property must be found in the receiver for the stub to be
+          // applicable.
+          if (lookup->holder() != *receiver) return;
+          code = StubCache::ComputeLoadNormal(*name, *receiver);
+        }
+        break;
+      }
+      case CALLBACKS: {
+        if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
+        AccessorInfo* callback =
+            AccessorInfo::cast(lookup->GetCallbackObject());
+        if (v8::ToCData<Address>(callback->getter()) == 0) return;
+        code = StubCache::ComputeLoadCallback(*name, *receiver,
+                                              lookup->holder(), callback);
+        break;
+      }
+      case INTERCEPTOR: {
+        ASSERT(HasInterceptorGetter(lookup->holder()));
+        code = StubCache::ComputeLoadInterceptor(*name, *receiver,
+                                                 lookup->holder());
+        break;
+      }
+      default:
+        return;
+    }
+  }
+
+  // If we're unable to compute the stub (not enough memory left), we
+  // simply avoid updating the caches.
+  if (code == NULL || code->IsFailure()) return;
+
+  // Patch the call site depending on the state of the cache.
+  if (state == UNINITIALIZED || state == PREMONOMORPHIC ||
+      state == MONOMORPHIC_PROTOTYPE_FAILURE) {
+    set_target(Code::cast(code));
+  } else if (state == MONOMORPHIC) {
+    set_target(megamorphic_stub());
+  }
+
+#ifdef DEBUG
+  TraceIC("LoadIC", name, state, target());
+#endif
+}
+
+
+Object* KeyedLoadIC::Load(State state,
+                          Handle<Object> object,
+                          Handle<Object> key) {
+  if (key->IsSymbol()) {
+    Handle<String> name = Handle<String>::cast(key);
+
+    // If the object is undefined or null it's illegal to try to get any
+    // of its properties; throw a TypeError in that case.
+    if (object->IsUndefined() || object->IsNull()) {
+      return TypeError("non_object_property_load", object, name);
+    }
+
+    if (FLAG_use_ic) {
+      // Use specialized code for getting the length of strings.
+      if (object->IsString() && name->Equals(Heap::length_symbol())) {
+        Handle<String> string = Handle<String>::cast(object);
+        Object* code = NULL;
+        code = StubCache::ComputeKeyedLoadStringLength(*name, *string);
+        if (code->IsFailure()) return code;
+        set_target(Code::cast(code));
+#ifdef DEBUG
+        TraceIC("KeyedLoadIC", name, state, target());
+#endif  // DEBUG
+        return Smi::FromInt(string->length());
+      }
+
+      // Use specialized code for getting the length of arrays.
+      if (object->IsJSArray() && name->Equals(Heap::length_symbol())) {
+        Handle<JSArray> array = Handle<JSArray>::cast(object);
+        Object* code = StubCache::ComputeKeyedLoadArrayLength(*name, *array);
+        if (code->IsFailure()) return code;
+        set_target(Code::cast(code));
+#ifdef DEBUG
+        TraceIC("KeyedLoadIC", name, state, target());
+#endif  // DEBUG
+        return JSArray::cast(*object)->length();
+      }
+
+      // Use specialized code for getting prototype of functions.
+      if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol())) {
+        Handle<JSFunction> function = Handle<JSFunction>::cast(object);
+        Object* code =
+            StubCache::ComputeKeyedLoadFunctionPrototype(*name, *function);
+        if (code->IsFailure()) return code;
+        set_target(Code::cast(code));
+#ifdef DEBUG
+        TraceIC("KeyedLoadIC", name, state, target());
+#endif  // DEBUG
+        return Accessors::FunctionGetPrototype(*object, 0);
+      }
+    }
+
+    // Check if the name is trivially convertible to an index and get
+    // the element or char if so.
+    uint32_t index = 0;
+    if (name->AsArrayIndex(&index)) {
+      HandleScope scope;
+      // Rewrite to the generic keyed load stub.
+      if (FLAG_use_ic) set_target(generic_stub());
+      return Runtime::GetElementOrCharAt(object, index);
+    }
+
+    // Named lookup.
+    LookupResult lookup;
+    LookupForRead(*object, *name, &lookup);
+
+    // If lookup is invalid, check if we need to throw an exception.
+    if (!lookup.IsValid()) {
+      if (FLAG_strict || is_contextual()) {
+        return ReferenceError("not_defined", name);
+      }
+    }
+
+    if (FLAG_use_ic && lookup.IsLoaded()) {
+      UpdateCaches(&lookup, state, object, name);
+    }
+
+    PropertyAttributes attr;
+    if (lookup.IsValid() && lookup.type() == INTERCEPTOR) {
+      // Get the property.
+      Object* result = object->GetProperty(*object, &lookup, *name, &attr);
+      if (result->IsFailure()) return result;
+      // If the property is not present, check if we need to throw an
+      // exception.
+      if (attr == ABSENT && is_contextual()) {
+        return ReferenceError("not_defined", name);
+      }
+      return result;
+    }
+
+    return object->GetProperty(*object, &lookup, *name, &attr);
+  }
+
+  // Do not use ICs for objects that require access checks (including
+  // the global object).
+  bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
+
+  if (use_ic) {
+    set_target(generic_stub());
+    // For JSObjects that are not value wrappers and that do not have
+    // indexed interceptors, we initialize the inlined fast case (if
+    // present) by patching the inlined map check.
+    if (object->IsJSObject() &&
+        !object->IsJSValue() &&
+        !JSObject::cast(*object)->HasIndexedInterceptor()) {
+      Map* map = JSObject::cast(*object)->map();
+      PatchInlinedLoad(address(), map);
+    }
+  }
+
+  // Get the property.
+  return Runtime::GetObjectProperty(object, key);
+}
+
+
+void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
+                               Handle<Object> object, Handle<String> name) {
+  ASSERT(lookup->IsLoaded());
+  // Bail out if we didn't find a result.
+  if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+
+  if (!object->IsJSObject()) return;
+  Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+  // Compute the code stub for this load.
+  Object* code = NULL;
+
+  if (state == UNINITIALIZED) {
+    // This is the first time we execute this inline cache.
+    // Set the target to the pre monomorphic stub to delay
+    // setting the monomorphic state.
+    code = pre_monomorphic_stub();
+  } else {
+    // Compute a monomorphic stub.
+    switch (lookup->type()) {
+      case FIELD: {
+        code = StubCache::ComputeKeyedLoadField(*name, *receiver,
+                                                lookup->holder(),
+                                                lookup->GetFieldIndex());
+        break;
+      }
+      case CONSTANT_FUNCTION: {
+        Object* constant = lookup->GetConstantFunction();
+        code = StubCache::ComputeKeyedLoadConstant(*name, *receiver,
+                                                   lookup->holder(), constant);
+        break;
+      }
+      case CALLBACKS: {
+        if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
+        AccessorInfo* callback =
+            AccessorInfo::cast(lookup->GetCallbackObject());
+        if (v8::ToCData<Address>(callback->getter()) == 0) return;
+        code = StubCache::ComputeKeyedLoadCallback(*name, *receiver,
+                                                   lookup->holder(), callback);
+        break;
+      }
+      case INTERCEPTOR: {
+        ASSERT(HasInterceptorGetter(lookup->holder()));
+        code = StubCache::ComputeKeyedLoadInterceptor(*name, *receiver,
+                                                      lookup->holder());
+        break;
+      }
+      default: {
+        // Always rewrite to the generic case so that we do not
+        // repeatedly try to rewrite.
+        code = generic_stub();
+        break;
+      }
+    }
+  }
+
+  // If we're unable to compute the stub (not enough memory left), we
+  // simply avoid updating the caches.
+  if (code == NULL || code->IsFailure()) return;
+
+  // Patch the call site depending on the state of the cache.  Make
+  // sure to always rewrite from monomorphic to megamorphic.
+  ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
+  if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
+    set_target(Code::cast(code));
+  } else if (state == MONOMORPHIC) {
+    set_target(megamorphic_stub());
+  }
+
+#ifdef DEBUG
+  TraceIC("KeyedLoadIC", name, state, target());
+#endif
+}
+
+
+static bool StoreICableLookup(LookupResult* lookup) {
+  // Bail out if we didn't find a result.
+  if (!lookup->IsValid() || !lookup->IsCacheable()) return false;
+
+  // If the property is read-only, we leave the IC in its current
+  // state.
+  if (lookup->IsReadOnly()) return false;
+
+  if (!lookup->IsLoaded()) return false;
+
+  return true;
+}
+
+
+static bool LookupForWrite(JSObject* object,
+                           String* name,
+                           LookupResult* lookup) {
+  object->LocalLookup(name, lookup);
+  if (!StoreICableLookup(lookup)) {
+    return false;
+  }
+
+  if (lookup->type() == INTERCEPTOR) {
+    if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
+      object->LocalLookupRealNamedProperty(name, lookup);
+      return StoreICableLookup(lookup);
+    }
+  }
+
+  return true;
+}
+
+
+Object* StoreIC::Store(State state,
+                       Handle<Object> object,
+                       Handle<String> name,
+                       Handle<Object> value) {
+  // If the object is undefined or null it's illegal to try to set any
+  // properties on it; throw a TypeError in that case.
+  if (object->IsUndefined() || object->IsNull()) {
+    return TypeError("non_object_property_store", object, name);
+  }
+
+  // Ignore stores where the receiver is not a JSObject.
+  if (!object->IsJSObject()) return *value;
+  Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+  // Check if the given name is an array index.
+  uint32_t index;
+  if (name->AsArrayIndex(&index)) {
+    HandleScope scope;
+    Handle<Object> result = SetElement(receiver, index, value);
+    if (result.is_null()) return Failure::Exception();
+    return *value;
+  }
+
+  // Lookup the property locally in the receiver.
+  if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
+    LookupResult lookup;
+    if (LookupForWrite(*receiver, *name, &lookup)) {
+      UpdateCaches(&lookup, state, receiver, name, value);
+    }
+  }
+
+  // Set the property.
+  return receiver->SetProperty(*name, *value, NONE);
+}
+
+
+void StoreIC::UpdateCaches(LookupResult* lookup,
+                           State state,
+                           Handle<JSObject> receiver,
+                           Handle<String> name,
+                           Handle<Object> value) {
+  ASSERT(lookup->IsLoaded());
+  // Skip JSGlobalProxy.
+  ASSERT(!receiver->IsJSGlobalProxy());
+
+  ASSERT(StoreICableLookup(lookup));
+
+  // If the property has a non-field type allowing map transitions
+  // where there is extra room in the object, we leave the IC in its
+  // current state.
+  PropertyType type = lookup->type();
+
+  // Compute the code stub for this store; used for rewriting to
+  // monomorphic state and making sure that the code stub is in the
+  // stub cache.
+  Object* code = NULL;
+  switch (type) {
+    case FIELD: {
+      code = StubCache::ComputeStoreField(*name, *receiver,
+                                          lookup->GetFieldIndex());
+      break;
+    }
+    case MAP_TRANSITION: {
+      if (lookup->GetAttributes() != NONE) return;
+      HandleScope scope;
+      ASSERT(type == MAP_TRANSITION);
+      Handle<Map> transition(lookup->GetTransitionMap());
+      int index = transition->PropertyIndexFor(*name);
+      code = StubCache::ComputeStoreField(*name, *receiver, index, *transition);
+      break;
+    }
+    case NORMAL: {
+      if (!receiver->IsGlobalObject()) {
+        return;
+      }
+      // The stub generated for the global object picks the value directly
+      // from the property cell. So the property must be directly on the
+      // global object.
+      Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+      JSGlobalPropertyCell* cell =
+          JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+      code = StubCache::ComputeStoreGlobal(*name, *global, cell);
+      break;
+    }
+    case CALLBACKS: {
+      if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
+      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+      if (v8::ToCData<Address>(callback->setter()) == 0) return;
+      code = StubCache::ComputeStoreCallback(*name, *receiver, callback);
+      break;
+    }
+    case INTERCEPTOR: {
+      ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
+      code = StubCache::ComputeStoreInterceptor(*name, *receiver);
+      break;
+    }
+    default:
+      return;
+  }
+
+  // If we're unable to compute the stub (not enough memory left), we
+  // simply avoid updating the caches.
+  if (code == NULL || code->IsFailure()) return;
+
+  // Patch the call site depending on the state of the cache.
+  if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
+    set_target(Code::cast(code));
+  } else if (state == MONOMORPHIC) {
+    // Only move to mega morphic if the target changes.
+    if (target() != Code::cast(code)) set_target(megamorphic_stub());
+  }
+
+#ifdef DEBUG
+  TraceIC("StoreIC", name, state, target());
+#endif
+}
+
+
+Object* KeyedStoreIC::Store(State state,
+                            Handle<Object> object,
+                            Handle<Object> key,
+                            Handle<Object> value) {
+  if (key->IsSymbol()) {
+    Handle<String> name = Handle<String>::cast(key);
+
+    // If the object is undefined or null it's illegal to try to set any
+    // properties on it; throw a TypeError in that case.
+    if (object->IsUndefined() || object->IsNull()) {
+      return TypeError("non_object_property_store", object, name);
+    }
+
+    // Ignore stores where the receiver is not a JSObject.
+    if (!object->IsJSObject()) return *value;
+    Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+    // Check if the given name is an array index.
+    uint32_t index;
+    if (name->AsArrayIndex(&index)) {
+      HandleScope scope;
+      Handle<Object> result = SetElement(receiver, index, value);
+      if (result.is_null()) return Failure::Exception();
+      return *value;
+    }
+
+    // Lookup the property locally in the receiver.
+    LookupResult lookup;
+    receiver->LocalLookup(*name, &lookup);
+
+    // Update inline cache and stub cache.
+    if (FLAG_use_ic && lookup.IsLoaded()) {
+      UpdateCaches(&lookup, state, receiver, name, value);
+    }
+
+    // Set the property.
+    return receiver->SetProperty(*name, *value, NONE);
+  }
+
+  // Do not use ICs for objects that require access checks (including
+  // the global object).
+  bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
+  ASSERT(!(use_ic && object->IsJSGlobalProxy()));
+
+  if (use_ic) set_target(generic_stub());
+
+  // Set the property.
+  return Runtime::SetObjectProperty(object, key, value, NONE);
+}
+
+
+void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
+                                State state,
+                                Handle<JSObject> receiver,
+                                Handle<String> name,
+                                Handle<Object> value) {
+  ASSERT(lookup->IsLoaded());
+
+  // Skip JSGlobalProxy.
+  if (receiver->IsJSGlobalProxy()) return;
+
+  // Bail out if we didn't find a result.
+  if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+
+  // If the property is read-only, we leave the IC in its current
+  // state.
+  if (lookup->IsReadOnly()) return;
+
+  // If the property has a non-field type allowing map transitions
+  // where there is extra room in the object, we leave the IC in its
+  // current state.
+  PropertyType type = lookup->type();
+
+  // Compute the code stub for this store; used for rewriting to
+  // monomorphic state and making sure that the code stub is in the
+  // stub cache.
+  Object* code = NULL;
+
+  switch (type) {
+    case FIELD: {
+      code = StubCache::ComputeKeyedStoreField(*name, *receiver,
+                                               lookup->GetFieldIndex());
+      break;
+    }
+    case MAP_TRANSITION: {
+      if (lookup->GetAttributes() == NONE) {
+        HandleScope scope;
+        ASSERT(type == MAP_TRANSITION);
+        Handle<Map> transition(lookup->GetTransitionMap());
+        int index = transition->PropertyIndexFor(*name);
+        code = StubCache::ComputeKeyedStoreField(*name, *receiver,
+                                                 index, *transition);
+        break;
+      }
+      // fall through.
+    }
+    default: {
+      // Always rewrite to the generic case so that we do not
+      // repeatedly try to rewrite.
+      code = generic_stub();
+      break;
+    }
+  }
+
+  // If we're unable to compute the stub (not enough memory left), we
+  // simply avoid updating the caches.
+  if (code == NULL || code->IsFailure()) return;
+
+  // Patch the call site depending on the state of the cache.  Make
+  // sure to always rewrite from monomorphic to megamorphic.
+  ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
+  if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
+    set_target(Code::cast(code));
+  } else if (state == MONOMORPHIC) {
+    set_target(megamorphic_stub());
+  }
+
+#ifdef DEBUG
+  TraceIC("KeyedStoreIC", name, state, target());
+#endif
+}
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+// Used from ic_<arch>.cc.
+Object* CallIC_Miss(Arguments args) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 2);
+  CallIC ic;
+  IC::State state = IC::StateFrom(ic.target(), args[0]);
+  Object* result =
+      ic.LoadFunction(state, args.at<Object>(0), args.at<String>(1));
+
+  // The first time the inline cache is updated may be the first time the
+  // function it references gets called.  If the function was lazily compiled
+  // then the first call will trigger a compilation.  We check for this case
+  // and we do the compilation immediately, instead of waiting for the stub
+  // currently attached to the JSFunction object to trigger compilation.  We
+  // do this in the case where we know that the inline cache is inside a loop,
+  // because then we know that we want to optimize the function.
+  if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
+    return result;
+  }
+
+  // Compile now with optimization.
+  HandleScope scope;
+  Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result));
+  InLoopFlag in_loop = ic.target()->ic_in_loop();
+  if (in_loop == IN_LOOP) {
+    CompileLazyInLoop(function, CLEAR_EXCEPTION);
+  } else {
+    CompileLazy(function, CLEAR_EXCEPTION);
+  }
+  return *function;
+}
+
+
+void CallIC::GenerateInitialize(MacroAssembler* masm, int argc) {
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+// Used from ic_<arch>.cc.
+Object* LoadIC_Miss(Arguments args) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 2);
+  LoadIC ic;
+  IC::State state = IC::StateFrom(ic.target(), args[0]);
+  return ic.Load(state, args.at<Object>(0), args.at<String>(1));
+}
+
+
+void LoadIC::GenerateInitialize(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GeneratePreMonomorphic(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+// Used from ic_<arch>.cc
+Object* KeyedLoadIC_Miss(Arguments args) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 2);
+  KeyedLoadIC ic;
+  IC::State state = IC::StateFrom(ic.target(), args[0]);
+  return ic.Load(state, args.at<Object>(0), args.at<Object>(1));
+}
+
+
+void KeyedLoadIC::GenerateInitialize(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+void KeyedLoadIC::GeneratePreMonomorphic(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+// Used from ic_<arch>.cc.
+Object* StoreIC_Miss(Arguments args) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 3);
+  StoreIC ic;
+  IC::State state = IC::StateFrom(ic.target(), args[0]);
+  return ic.Store(state, args.at<Object>(0), args.at<String>(1),
+                  args.at<Object>(2));
+}
+
+
+// Extend storage is called in a store inline cache when
+// it is necessary to extend the properties array of a
+// JSObject.
+Object* SharedStoreIC_ExtendStorage(Arguments args) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 3);
+
+  // Convert the parameters
+  JSObject* object = JSObject::cast(args[0]);
+  Map* transition = Map::cast(args[1]);
+  Object* value = args[2];
+
+  // Check the object has run out out property space.
+  ASSERT(object->HasFastProperties());
+  ASSERT(object->map()->unused_property_fields() == 0);
+
+  // Expand the properties array.
+  FixedArray* old_storage = object->properties();
+  int new_unused = transition->unused_property_fields();
+  int new_size = old_storage->length() + new_unused + 1;
+  Object* result = old_storage->CopySize(new_size);
+  if (result->IsFailure()) return result;
+  FixedArray* new_storage = FixedArray::cast(result);
+  new_storage->set(old_storage->length(), value);
+
+  // Set the new property value and do the map transition.
+  object->set_properties(new_storage);
+  object->set_map(transition);
+
+  // Return the stored value.
+  return value;
+}
+
+
+void StoreIC::GenerateInitialize(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+}
+
+
+// Used from ic_<arch>.cc.
+Object* KeyedStoreIC_Miss(Arguments args) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 3);
+  KeyedStoreIC ic;
+  IC::State state = IC::StateFrom(ic.target(), args[0]);
+  return ic.Store(state, args.at<Object>(0), args.at<Object>(1),
+                  args.at<Object>(2));
+}
+
+
+void KeyedStoreIC::GenerateInitialize(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
+}
+
+
+static Address IC_utilities[] = {
+#define ADDR(name) FUNCTION_ADDR(name),
+    IC_UTIL_LIST(ADDR)
+    NULL
+#undef ADDR
+};
+
+
+Address IC::AddressFromUtilityId(IC::UtilityId id) {
+  return IC_utilities[id];
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/ic.h b/src/ic.h
new file mode 100644
index 0000000..fcf1ec0
--- /dev/null
+++ b/src/ic.h
@@ -0,0 +1,404 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IC_H_
+#define V8_IC_H_
+
+#include "assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// IC_UTIL_LIST defines all utility functions called from generated
+// inline caching code. The argument for the macro, ICU, is the function name.
+#define IC_UTIL_LIST(ICU)                             \
+  ICU(LoadIC_Miss)                                    \
+  ICU(KeyedLoadIC_Miss)                               \
+  ICU(CallIC_Miss)                                    \
+  ICU(StoreIC_Miss)                                   \
+  ICU(SharedStoreIC_ExtendStorage)                    \
+  ICU(KeyedStoreIC_Miss)                              \
+  /* Utilities for IC stubs. */                       \
+  ICU(LoadCallbackProperty)                           \
+  ICU(StoreCallbackProperty)                          \
+  ICU(LoadPropertyWithInterceptorOnly)                \
+  ICU(LoadPropertyWithInterceptorForLoad)             \
+  ICU(LoadPropertyWithInterceptorForCall)             \
+  ICU(StoreInterceptorProperty)
+
+//
+// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
+// and KeyedStoreIC.
+//
+class IC {
+ public:
+
+  // The ids for utility called from the generated code.
+  enum UtilityId {
+  #define CONST_NAME(name) k##name,
+    IC_UTIL_LIST(CONST_NAME)
+  #undef CONST_NAME
+    kUtilityCount
+  };
+
+  // Looks up the address of the named utility.
+  static Address AddressFromUtilityId(UtilityId id);
+
+  // Alias the inline cache state type to make the IC code more readable.
+  typedef InlineCacheState State;
+
+  // The IC code is either invoked with no extra frames on the stack
+  // or with a single extra frame for supporting calls.
+  enum FrameDepth {
+    NO_EXTRA_FRAME = 0,
+    EXTRA_CALL_FRAME = 1
+  };
+
+  // Construct the IC structure with the given number of extra
+  // JavaScript frames on the stack.
+  explicit IC(FrameDepth depth);
+
+  // Get the call-site target; used for determining the state.
+  Code* target() { return GetTargetAtAddress(address()); }
+  inline Address address();
+
+  // Compute the current IC state based on the target stub and the receiver.
+  static State StateFrom(Code* target, Object* receiver);
+
+  // Clear the inline cache to initial state.
+  static void Clear(Address address);
+
+  // Computes the reloc info for this IC. This is a fairly expensive
+  // operation as it has to search through the heap to find the code
+  // object that contains this IC site.
+  RelocInfo::Mode ComputeMode();
+
+  // Returns if this IC is for contextual (no explicit receiver)
+  // access to properties.
+  bool is_contextual() {
+    return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
+  }
+
+  // Returns the map to use for caching stubs for a given object.
+  // This method should not be called with undefined or null.
+  static inline Map* GetCodeCacheMapForObject(Object* object);
+
+ protected:
+  Address fp() const { return fp_; }
+  Address pc() const { return *pc_address_; }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Computes the address in the original code when the code running is
+  // containing break points (calls to DebugBreakXXX builtins).
+  Address OriginalCodeAddress();
+#endif
+
+  // Set the call-site target.
+  void set_target(Code* code) { SetTargetAtAddress(address(), code); }
+
+#ifdef DEBUG
+  static void TraceIC(const char* type,
+                      Handle<String> name,
+                      State old_state,
+                      Code* new_target,
+                      const char* extra_info = "");
+#endif
+
+  static Failure* TypeError(const char* type,
+                            Handle<Object> object,
+                            Handle<String> name);
+  static Failure* ReferenceError(const char* type, Handle<String> name);
+
+  // Access the target code for the given IC address.
+  static inline Code* GetTargetAtAddress(Address address);
+  static inline void SetTargetAtAddress(Address address, Code* target);
+
+ private:
+  // Frame pointer for the frame that uses (calls) the IC.
+  Address fp_;
+
+  // All access to the program counter of an IC structure is indirect
+  // to make the code GC safe. This feature is crucial since
+  // GetProperty and SetProperty are called and they in turn might
+  // invoke the garbage collector.
+  Address* pc_address_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
+};
+
+
+// An IC_Utility encapsulates IC::UtilityId. It exists mainly because you
+// cannot make forward declarations to an enum.
+class IC_Utility {
+ public:
+  explicit IC_Utility(IC::UtilityId id)
+    : address_(IC::AddressFromUtilityId(id)), id_(id) {}
+
+  Address address() const { return address_; }
+
+  IC::UtilityId id() const { return id_; }
+ private:
+  Address address_;
+  IC::UtilityId id_;
+};
+
+
+class CallIC: public IC {
+ public:
+  CallIC() : IC(EXTRA_CALL_FRAME) { ASSERT(target()->is_call_stub()); }
+
+  Object* LoadFunction(State state, Handle<Object> object, Handle<String> name);
+
+
+  // Code generator routines.
+  static void GenerateInitialize(MacroAssembler* masm, int argc);
+  static void GenerateMiss(MacroAssembler* masm, int argc);
+  static void GenerateMegamorphic(MacroAssembler* masm, int argc);
+  static void GenerateNormal(MacroAssembler* masm, int argc);
+
+ private:
+  static void Generate(MacroAssembler* masm,
+                       int argc,
+                       const ExternalReference& f);
+
+  // Update the inline cache and the global stub cache based on the
+  // lookup result.
+  void UpdateCaches(LookupResult* lookup,
+                    State state,
+                    Handle<Object> object,
+                    Handle<String> name);
+
+  // Returns a JSFunction if the object can be called as a function,
+  // and patches the stack to be ready for the call.
+  // Otherwise, it returns the undefined value.
+  Object* TryCallAsFunction(Object* object);
+
+  static void Clear(Address address, Code* target);
+  friend class IC;
+};
+
+
+class LoadIC: public IC {
+ public:
+  LoadIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_load_stub()); }
+
+  Object* Load(State state, Handle<Object> object, Handle<String> name);
+
+  // Code generator routines.
+  static void GenerateInitialize(MacroAssembler* masm);
+  static void GeneratePreMonomorphic(MacroAssembler* masm);
+  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateMegamorphic(MacroAssembler* masm);
+  static void GenerateNormal(MacroAssembler* masm);
+
+  // Specialized code generator routines.
+  static void GenerateArrayLength(MacroAssembler* masm);
+  static void GenerateStringLength(MacroAssembler* masm);
+  static void GenerateFunctionPrototype(MacroAssembler* masm);
+
+  // The offset from the inlined patch site to the start of the
+  // inlined load instruction.  It is architecture-dependent, and not
+  // used on ARM.
+  static const int kOffsetToLoadInstruction;
+
+ private:
+  static void Generate(MacroAssembler* masm, const ExternalReference& f);
+
+  // Update the inline cache and the global stub cache based on the
+  // lookup result.
+  void UpdateCaches(LookupResult* lookup,
+                    State state,
+                    Handle<Object> object,
+                    Handle<String> name);
+
+  // Stub accessors.
+  static Code* megamorphic_stub() {
+    return Builtins::builtin(Builtins::LoadIC_Megamorphic);
+  }
+  static Code* initialize_stub() {
+    return Builtins::builtin(Builtins::LoadIC_Initialize);
+  }
+  static Code* pre_monomorphic_stub() {
+    return Builtins::builtin(Builtins::LoadIC_PreMonomorphic);
+  }
+
+  static void Clear(Address address, Code* target);
+
+  // Clear the use of the inlined version.
+  static void ClearInlinedVersion(Address address);
+
+  static bool PatchInlinedLoad(Address address, Object* map, int index);
+
+  friend class IC;
+};
+
+
+class KeyedLoadIC: public IC {
+ public:
+  KeyedLoadIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_keyed_load_stub()); }
+
+  Object* Load(State state, Handle<Object> object, Handle<Object> key);
+
+  // Code generator routines.
+  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateInitialize(MacroAssembler* masm);
+  static void GeneratePreMonomorphic(MacroAssembler* masm);
+  static void GenerateGeneric(MacroAssembler* masm);
+
+  // Clear the use of the inlined version.
+  static void ClearInlinedVersion(Address address);
+
+ private:
+  static void Generate(MacroAssembler* masm, const ExternalReference& f);
+
+  // Update the inline cache.
+  void UpdateCaches(LookupResult* lookup,
+                    State state,
+                    Handle<Object> object,
+                    Handle<String> name);
+
+  // Stub accessors.
+  static Code* initialize_stub() {
+    return Builtins::builtin(Builtins::KeyedLoadIC_Initialize);
+  }
+  static Code* megamorphic_stub() {
+    return Builtins::builtin(Builtins::KeyedLoadIC_Generic);
+  }
+  static Code* generic_stub() {
+    return Builtins::builtin(Builtins::KeyedLoadIC_Generic);
+  }
+  static Code* pre_monomorphic_stub() {
+    return Builtins::builtin(Builtins::KeyedLoadIC_PreMonomorphic);
+  }
+
+  static void Clear(Address address, Code* target);
+
+  // Support for patching the map that is checked in an inlined
+  // version of keyed load.
+  static bool PatchInlinedLoad(Address address, Object* map);
+
+  friend class IC;
+};
+
+
+class StoreIC: public IC {
+ public:
+  StoreIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_store_stub()); }
+
+  Object* Store(State state,
+                Handle<Object> object,
+                Handle<String> name,
+                Handle<Object> value);
+
+  // Code generators for stub routines. Only called once at startup.
+  static void GenerateInitialize(MacroAssembler* masm);
+  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateMegamorphic(MacroAssembler* masm);
+  static void GenerateExtendStorage(MacroAssembler* masm);
+
+ private:
+  static void Generate(MacroAssembler* masm, const ExternalReference& f);
+
+  // Update the inline cache and the global stub cache based on the
+  // lookup result.
+  void UpdateCaches(LookupResult* lookup,
+                    State state, Handle<JSObject> receiver,
+                    Handle<String> name,
+                    Handle<Object> value);
+
+  // Stub accessors.
+  static Code* megamorphic_stub() {
+    return Builtins::builtin(Builtins::StoreIC_Megamorphic);
+  }
+  static Code* initialize_stub() {
+    return Builtins::builtin(Builtins::StoreIC_Initialize);
+  }
+
+  static void Clear(Address address, Code* target);
+  friend class IC;
+};
+
+
+class KeyedStoreIC: public IC {
+ public:
+  KeyedStoreIC() : IC(NO_EXTRA_FRAME) { }
+
+  Object* Store(State state,
+                Handle<Object> object,
+                Handle<Object> name,
+                Handle<Object> value);
+
+  // Code generators for stub routines.  Only called once at startup.
+  static void GenerateInitialize(MacroAssembler* masm);
+  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateGeneric(MacroAssembler* masm);
+  static void GenerateExtendStorage(MacroAssembler* masm);
+
+  // Clear the inlined version so the IC is always hit.
+  static void ClearInlinedVersion(Address address);
+
+  // Restore the inlined version so the fast case can get hit.
+  static void RestoreInlinedVersion(Address address);
+
+ private:
+  static void Generate(MacroAssembler* masm, const ExternalReference& f);
+
+  // Update the inline cache.
+  void UpdateCaches(LookupResult* lookup,
+                    State state,
+                    Handle<JSObject> receiver,
+                    Handle<String> name,
+                    Handle<Object> value);
+
+  // Stub accessors.
+  static Code* initialize_stub() {
+    return Builtins::builtin(Builtins::KeyedStoreIC_Initialize);
+  }
+  static Code* megamorphic_stub() {
+    return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
+  }
+  static Code* generic_stub() {
+    return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
+  }
+
+  static void Clear(Address address, Code* target);
+
+  // Support for patching the map that is checked in an inlined
+  // version of keyed store.
+  // The address is the patch point for the IC call
+  // (Assembler::kCallTargetAddressOffset before the end of
+  // the call/return address).
+  // The map is the new map that the inlined code should check against.
+  static bool PatchInlinedStore(Address address, Object* map);
+
+  friend class IC;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IC_H_
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
new file mode 100644
index 0000000..ae914d3
--- /dev/null
+++ b/src/interpreter-irregexp.cc
@@ -0,0 +1,645 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A simple interpreter for the Irregexp byte code.
+
+
+#include "v8.h"
+#include "unicode.h"
+#include "utils.h"
+#include "ast.h"
+#include "bytecodes-irregexp.h"
+#include "interpreter-irregexp.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+static unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize;
+
+
+static bool BackRefMatchesNoCase(int from,
+                                 int current,
+                                 int len,
+                                 Vector<const uc16> subject) {
+  for (int i = 0; i < len; i++) {
+    unibrow::uchar old_char = subject[from++];
+    unibrow::uchar new_char = subject[current++];
+    if (old_char == new_char) continue;
+    unibrow::uchar old_string[1] = { old_char };
+    unibrow::uchar new_string[1] = { new_char };
+    interp_canonicalize.get(old_char, '\0', old_string);
+    interp_canonicalize.get(new_char, '\0', new_string);
+    if (old_string[0] != new_string[0]) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+static bool BackRefMatchesNoCase(int from,
+                                 int current,
+                                 int len,
+                                 Vector<const char> subject) {
+  for (int i = 0; i < len; i++) {
+    unsigned int old_char = subject[from++];
+    unsigned int new_char = subject[current++];
+    if (old_char == new_char) continue;
+    if (old_char - 'A' <= 'Z' - 'A') old_char |= 0x20;
+    if (new_char - 'A' <= 'Z' - 'A') new_char |= 0x20;
+    if (old_char != new_char) return false;
+  }
+  return true;
+}
+
+
+#ifdef DEBUG
+static void TraceInterpreter(const byte* code_base,
+                             const byte* pc,
+                             int stack_depth,
+                             int current_position,
+                             uint32_t current_char,
+                             int bytecode_length,
+                             const char* bytecode_name) {
+  if (FLAG_trace_regexp_bytecodes) {
+    bool printable = (current_char < 127 && current_char >= 32);
+    const char* format =
+        printable ?
+        "pc = %02x, sp = %d, curpos = %d, curchar = %08x (%c), bc = %s" :
+        "pc = %02x, sp = %d, curpos = %d, curchar = %08x .%c., bc = %s";
+    PrintF(format,
+           pc - code_base,
+           stack_depth,
+           current_position,
+           current_char,
+           printable ? current_char : '.',
+           bytecode_name);
+    for (int i = 0; i < bytecode_length; i++) {
+      printf(", %02x", pc[i]);
+    }
+    printf(" ");
+    for (int i = 1; i < bytecode_length; i++) {
+      unsigned char b = pc[i];
+      if (b < 127 && b >= 32) {
+        printf("%c", b);
+      } else {
+        printf(".");
+      }
+    }
+    printf("\n");
+  }
+}
+
+
+#define BYTECODE(name)                                    \
+  case BC_##name:                                         \
+    TraceInterpreter(code_base,                           \
+                     pc,                                  \
+                     backtrack_sp - backtrack_stack_base, \
+                     current,                             \
+                     current_char,                        \
+                     BC_##name##_LENGTH,                  \
+                     #name);
+#else
+#define BYTECODE(name)                                    \
+  case BC_##name:
+#endif
+
+
+static int32_t Load32Aligned(const byte* pc) {
+  ASSERT((reinterpret_cast<intptr_t>(pc) & 3) == 0);
+  return *reinterpret_cast<const int32_t *>(pc);
+}
+
+
+static int32_t Load16Aligned(const byte* pc) {
+  ASSERT((reinterpret_cast<intptr_t>(pc) & 1) == 0);
+  return *reinterpret_cast<const uint16_t *>(pc);
+}
+
+
+// A simple abstraction over the backtracking stack used by the interpreter.
+// This backtracking stack does not grow automatically, but it ensures that the
+// the memory held by the stack is released or remembered in a cache if the
+// matching terminates.
+class BacktrackStack {
+ public:
+  explicit BacktrackStack() {
+    if (cache_ != NULL) {
+      // If the cache is not empty reuse the previously allocated stack.
+      data_ = cache_;
+      cache_ = NULL;
+    } else {
+      // Cache was empty. Allocate a new backtrack stack.
+      data_ = NewArray<int>(kBacktrackStackSize);
+    }
+  }
+
+  ~BacktrackStack() {
+    if (cache_ == NULL) {
+      // The cache is empty. Keep this backtrack stack around.
+      cache_ = data_;
+    } else {
+      // A backtrack stack was already cached, just release this one.
+      DeleteArray(data_);
+    }
+  }
+
+  int* data() const { return data_; }
+
+  int max_size() const { return kBacktrackStackSize; }
+
+ private:
+  static const int kBacktrackStackSize = 10000;
+
+  int* data_;
+  static int* cache_;
+
+  DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
+};
+
+int* BacktrackStack::cache_ = NULL;
+
+
+template <typename Char>
+static bool RawMatch(const byte* code_base,
+                     Vector<const Char> subject,
+                     int* registers,
+                     int current,
+                     uint32_t current_char) {
+  const byte* pc = code_base;
+  // BacktrackStack ensures that the memory allocated for the backtracking stack
+  // is returned to the system or cached if there is no stack being cached at
+  // the moment.
+  BacktrackStack backtrack_stack;
+  int* backtrack_stack_base = backtrack_stack.data();
+  int* backtrack_sp = backtrack_stack_base;
+  int backtrack_stack_space = backtrack_stack.max_size();
+#ifdef DEBUG
+  if (FLAG_trace_regexp_bytecodes) {
+    PrintF("\n\nStart bytecode interpreter\n\n");
+  }
+#endif
+  while (true) {
+    int32_t insn = Load32Aligned(pc);
+    switch (insn & BYTECODE_MASK) {
+      BYTECODE(BREAK)
+        UNREACHABLE();
+        return false;
+      BYTECODE(PUSH_CP)
+        if (--backtrack_stack_space < 0) {
+          return false;  // No match on backtrack stack overflow.
+        }
+        *backtrack_sp++ = current;
+        pc += BC_PUSH_CP_LENGTH;
+        break;
+      BYTECODE(PUSH_BT)
+        if (--backtrack_stack_space < 0) {
+          return false;  // No match on backtrack stack overflow.
+        }
+        *backtrack_sp++ = Load32Aligned(pc + 4);
+        pc += BC_PUSH_BT_LENGTH;
+        break;
+      BYTECODE(PUSH_REGISTER)
+        if (--backtrack_stack_space < 0) {
+          return false;  // No match on backtrack stack overflow.
+        }
+        *backtrack_sp++ = registers[insn >> BYTECODE_SHIFT];
+        pc += BC_PUSH_REGISTER_LENGTH;
+        break;
+      BYTECODE(SET_REGISTER)
+        registers[insn >> BYTECODE_SHIFT] = Load32Aligned(pc + 4);
+        pc += BC_SET_REGISTER_LENGTH;
+        break;
+      BYTECODE(ADVANCE_REGISTER)
+        registers[insn >> BYTECODE_SHIFT] += Load32Aligned(pc + 4);
+        pc += BC_ADVANCE_REGISTER_LENGTH;
+        break;
+      BYTECODE(SET_REGISTER_TO_CP)
+        registers[insn >> BYTECODE_SHIFT] = current + Load32Aligned(pc + 4);
+        pc += BC_SET_REGISTER_TO_CP_LENGTH;
+        break;
+      BYTECODE(SET_CP_TO_REGISTER)
+        current = registers[insn >> BYTECODE_SHIFT];
+        pc += BC_SET_CP_TO_REGISTER_LENGTH;
+        break;
+      BYTECODE(SET_REGISTER_TO_SP)
+        registers[insn >> BYTECODE_SHIFT] = backtrack_sp - backtrack_stack_base;
+        pc += BC_SET_REGISTER_TO_SP_LENGTH;
+        break;
+      BYTECODE(SET_SP_TO_REGISTER)
+        backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT];
+        backtrack_stack_space = backtrack_stack.max_size() -
+                                (backtrack_sp - backtrack_stack_base);
+        pc += BC_SET_SP_TO_REGISTER_LENGTH;
+        break;
+      BYTECODE(POP_CP)
+        backtrack_stack_space++;
+        --backtrack_sp;
+        current = *backtrack_sp;
+        pc += BC_POP_CP_LENGTH;
+        break;
+      BYTECODE(POP_BT)
+        backtrack_stack_space++;
+        --backtrack_sp;
+        pc = code_base + *backtrack_sp;
+        break;
+      BYTECODE(POP_REGISTER)
+        backtrack_stack_space++;
+        --backtrack_sp;
+        registers[insn >> BYTECODE_SHIFT] = *backtrack_sp;
+        pc += BC_POP_REGISTER_LENGTH;
+        break;
+      BYTECODE(FAIL)
+        return false;
+      BYTECODE(SUCCEED)
+        return true;
+      BYTECODE(ADVANCE_CP)
+        current += insn >> BYTECODE_SHIFT;
+        pc += BC_ADVANCE_CP_LENGTH;
+        break;
+      BYTECODE(GOTO)
+        pc = code_base + Load32Aligned(pc + 4);
+        break;
+      BYTECODE(ADVANCE_CP_AND_GOTO)
+        current += insn >> BYTECODE_SHIFT;
+        pc = code_base + Load32Aligned(pc + 4);
+        break;
+      BYTECODE(CHECK_GREEDY)
+        if (current == backtrack_sp[-1]) {
+          backtrack_sp--;
+          backtrack_stack_space++;
+          pc = code_base + Load32Aligned(pc + 4);
+        } else {
+          pc += BC_CHECK_GREEDY_LENGTH;
+        }
+        break;
+      BYTECODE(LOAD_CURRENT_CHAR) {
+        int pos = current + (insn >> BYTECODE_SHIFT);
+        if (pos >= subject.length()) {
+          pc = code_base + Load32Aligned(pc + 4);
+        } else {
+          current_char = subject[pos];
+          pc += BC_LOAD_CURRENT_CHAR_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(LOAD_CURRENT_CHAR_UNCHECKED) {
+        int pos = current + (insn >> BYTECODE_SHIFT);
+        current_char = subject[pos];
+        pc += BC_LOAD_CURRENT_CHAR_UNCHECKED_LENGTH;
+        break;
+      }
+      BYTECODE(LOAD_2_CURRENT_CHARS) {
+        int pos = current + (insn >> BYTECODE_SHIFT);
+        if (pos + 2 > subject.length()) {
+          pc = code_base + Load32Aligned(pc + 4);
+        } else {
+          Char next = subject[pos + 1];
+          current_char =
+              (subject[pos] | (next << (kBitsPerByte * sizeof(Char))));
+          pc += BC_LOAD_2_CURRENT_CHARS_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(LOAD_2_CURRENT_CHARS_UNCHECKED) {
+        int pos = current + (insn >> BYTECODE_SHIFT);
+        Char next = subject[pos + 1];
+        current_char = (subject[pos] | (next << (kBitsPerByte * sizeof(Char))));
+        pc += BC_LOAD_2_CURRENT_CHARS_UNCHECKED_LENGTH;
+        break;
+      }
+      BYTECODE(LOAD_4_CURRENT_CHARS) {
+        ASSERT(sizeof(Char) == 1);
+        int pos = current + (insn >> BYTECODE_SHIFT);
+        if (pos + 4 > subject.length()) {
+          pc = code_base + Load32Aligned(pc + 4);
+        } else {
+          Char next1 = subject[pos + 1];
+          Char next2 = subject[pos + 2];
+          Char next3 = subject[pos + 3];
+          current_char = (subject[pos] |
+                          (next1 << 8) |
+                          (next2 << 16) |
+                          (next3 << 24));
+          pc += BC_LOAD_4_CURRENT_CHARS_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(LOAD_4_CURRENT_CHARS_UNCHECKED) {
+        ASSERT(sizeof(Char) == 1);
+        int pos = current + (insn >> BYTECODE_SHIFT);
+        Char next1 = subject[pos + 1];
+        Char next2 = subject[pos + 2];
+        Char next3 = subject[pos + 3];
+        current_char = (subject[pos] |
+                        (next1 << 8) |
+                        (next2 << 16) |
+                        (next3 << 24));
+        pc += BC_LOAD_4_CURRENT_CHARS_UNCHECKED_LENGTH;
+        break;
+      }
+      BYTECODE(CHECK_4_CHARS) {
+        uint32_t c = Load32Aligned(pc + 4);
+        if (c == current_char) {
+          pc = code_base + Load32Aligned(pc + 8);
+        } else {
+          pc += BC_CHECK_4_CHARS_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(CHECK_CHAR) {
+        uint32_t c = (insn >> BYTECODE_SHIFT);
+        if (c == current_char) {
+          pc = code_base + Load32Aligned(pc + 4);
+        } else {
+          pc += BC_CHECK_CHAR_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(CHECK_NOT_4_CHARS) {
+        uint32_t c = Load32Aligned(pc + 4);
+        if (c != current_char) {
+          pc = code_base + Load32Aligned(pc + 8);
+        } else {
+          pc += BC_CHECK_NOT_4_CHARS_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(CHECK_NOT_CHAR) {
+        uint32_t c = (insn >> BYTECODE_SHIFT);
+        if (c != current_char) {
+          pc = code_base + Load32Aligned(pc + 4);
+        } else {
+          pc += BC_CHECK_NOT_CHAR_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(AND_CHECK_4_CHARS) {
+        uint32_t c = Load32Aligned(pc + 4);
+        if (c == (current_char & Load32Aligned(pc + 8))) {
+          pc = code_base + Load32Aligned(pc + 12);
+        } else {
+          pc += BC_AND_CHECK_4_CHARS_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(AND_CHECK_CHAR) {
+        uint32_t c = (insn >> BYTECODE_SHIFT);
+        if (c == (current_char & Load32Aligned(pc + 4))) {
+          pc = code_base + Load32Aligned(pc + 8);
+        } else {
+          pc += BC_AND_CHECK_CHAR_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(AND_CHECK_NOT_4_CHARS) {
+        uint32_t c = Load32Aligned(pc + 4);
+        if (c != (current_char & Load32Aligned(pc + 8))) {
+          pc = code_base + Load32Aligned(pc + 12);
+        } else {
+          pc += BC_AND_CHECK_NOT_4_CHARS_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(AND_CHECK_NOT_CHAR) {
+        uint32_t c = (insn >> BYTECODE_SHIFT);
+        if (c != (current_char & Load32Aligned(pc + 4))) {
+          pc = code_base + Load32Aligned(pc + 8);
+        } else {
+          pc += BC_AND_CHECK_NOT_CHAR_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(MINUS_AND_CHECK_NOT_CHAR) {
+        uint32_t c = (insn >> BYTECODE_SHIFT);
+        uint32_t minus = Load16Aligned(pc + 4);
+        uint32_t mask = Load16Aligned(pc + 6);
+        if (c != ((current_char - minus) & mask)) {
+          pc = code_base + Load32Aligned(pc + 8);
+        } else {
+          pc += BC_MINUS_AND_CHECK_NOT_CHAR_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(CHECK_LT) {
+        uint32_t limit = (insn >> BYTECODE_SHIFT);
+        if (current_char < limit) {
+          pc = code_base + Load32Aligned(pc + 4);
+        } else {
+          pc += BC_CHECK_LT_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(CHECK_GT) {
+        uint32_t limit = (insn >> BYTECODE_SHIFT);
+        if (current_char > limit) {
+          pc = code_base + Load32Aligned(pc + 4);
+        } else {
+          pc += BC_CHECK_GT_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(CHECK_REGISTER_LT)
+        if (registers[insn >> BYTECODE_SHIFT] < Load32Aligned(pc + 4)) {
+          pc = code_base + Load32Aligned(pc + 8);
+        } else {
+          pc += BC_CHECK_REGISTER_LT_LENGTH;
+        }
+        break;
+      BYTECODE(CHECK_REGISTER_GE)
+        if (registers[insn >> BYTECODE_SHIFT] >= Load32Aligned(pc + 4)) {
+          pc = code_base + Load32Aligned(pc + 8);
+        } else {
+          pc += BC_CHECK_REGISTER_GE_LENGTH;
+        }
+        break;
+      BYTECODE(CHECK_REGISTER_EQ_POS)
+        if (registers[insn >> BYTECODE_SHIFT] == current) {
+          pc = code_base + Load32Aligned(pc + 4);
+        } else {
+          pc += BC_CHECK_REGISTER_EQ_POS_LENGTH;
+        }
+        break;
+      BYTECODE(LOOKUP_MAP1) {
+        // Look up character in a bitmap.  If we find a 0, then jump to the
+        // location at pc + 8.  Otherwise fall through!
+        int index = current_char - (insn >> BYTECODE_SHIFT);
+        byte map = code_base[Load32Aligned(pc + 4) + (index >> 3)];
+        map = ((map >> (index & 7)) & 1);
+        if (map == 0) {
+          pc = code_base + Load32Aligned(pc + 8);
+        } else {
+          pc += BC_LOOKUP_MAP1_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(LOOKUP_MAP2) {
+        // Look up character in a half-nibble map.  If we find 00, then jump to
+        // the location at pc + 8.   If we find 01 then jump to location at
+        // pc + 11, etc.
+        int index = (current_char - (insn >> BYTECODE_SHIFT)) << 1;
+        byte map = code_base[Load32Aligned(pc + 3) + (index >> 3)];
+        map = ((map >> (index & 7)) & 3);
+        if (map < 2) {
+          if (map == 0) {
+            pc = code_base + Load32Aligned(pc + 8);
+          } else {
+            pc = code_base + Load32Aligned(pc + 12);
+          }
+        } else {
+          if (map == 2) {
+            pc = code_base + Load32Aligned(pc + 16);
+          } else {
+            pc = code_base + Load32Aligned(pc + 20);
+          }
+        }
+        break;
+      }
+      BYTECODE(LOOKUP_MAP8) {
+        // Look up character in a byte map.  Use the byte as an index into a
+        // table that follows this instruction immediately.
+        int index = current_char - (insn >> BYTECODE_SHIFT);
+        byte map = code_base[Load32Aligned(pc + 4) + index];
+        const byte* new_pc = code_base + Load32Aligned(pc + 8) + (map << 2);
+        pc = code_base + Load32Aligned(new_pc);
+        break;
+      }
+      BYTECODE(LOOKUP_HI_MAP8) {
+        // Look up high byte of this character in a byte map.  Use the byte as
+        // an index into a table that follows this instruction immediately.
+        int index = (current_char >> 8) - (insn >> BYTECODE_SHIFT);
+        byte map = code_base[Load32Aligned(pc + 4) + index];
+        const byte* new_pc = code_base + Load32Aligned(pc + 8) + (map << 2);
+        pc = code_base + Load32Aligned(new_pc);
+        break;
+      }
+      BYTECODE(CHECK_NOT_REGS_EQUAL)
+        if (registers[insn >> BYTECODE_SHIFT] ==
+            registers[Load32Aligned(pc + 4)]) {
+          pc += BC_CHECK_NOT_REGS_EQUAL_LENGTH;
+        } else {
+          pc = code_base + Load32Aligned(pc + 8);
+        }
+        break;
+      BYTECODE(CHECK_NOT_BACK_REF) {
+        int from = registers[insn >> BYTECODE_SHIFT];
+        int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+        if (from < 0 || len <= 0) {
+          pc += BC_CHECK_NOT_BACK_REF_LENGTH;
+          break;
+        }
+        if (current + len > subject.length()) {
+          pc = code_base + Load32Aligned(pc + 4);
+          break;
+        } else {
+          int i;
+          for (i = 0; i < len; i++) {
+            if (subject[from + i] != subject[current + i]) {
+              pc = code_base + Load32Aligned(pc + 4);
+              break;
+            }
+          }
+          if (i < len) break;
+          current += len;
+        }
+        pc += BC_CHECK_NOT_BACK_REF_LENGTH;
+        break;
+      }
+      BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
+        int from = registers[insn >> BYTECODE_SHIFT];
+        int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+        if (from < 0 || len <= 0) {
+          pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
+          break;
+        }
+        if (current + len > subject.length()) {
+          pc = code_base + Load32Aligned(pc + 4);
+          break;
+        } else {
+          if (BackRefMatchesNoCase(from, current, len, subject)) {
+            current += len;
+            pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
+          } else {
+            pc = code_base + Load32Aligned(pc + 4);
+          }
+        }
+        break;
+      }
+      BYTECODE(CHECK_AT_START)
+        if (current == 0) {
+          pc = code_base + Load32Aligned(pc + 4);
+        } else {
+          pc += BC_CHECK_AT_START_LENGTH;
+        }
+        break;
+      BYTECODE(CHECK_NOT_AT_START)
+        if (current == 0) {
+          pc += BC_CHECK_NOT_AT_START_LENGTH;
+        } else {
+          pc = code_base + Load32Aligned(pc + 4);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+bool IrregexpInterpreter::Match(Handle<ByteArray> code_array,
+                                Handle<String> subject,
+                                int* registers,
+                                int start_position) {
+  ASSERT(subject->IsFlat());
+
+  AssertNoAllocation a;
+  const byte* code_base = code_array->GetDataStartAddress();
+  uc16 previous_char = '\n';
+  if (subject->IsAsciiRepresentation()) {
+    Vector<const char> subject_vector = subject->ToAsciiVector();
+    if (start_position != 0) previous_char = subject_vector[start_position - 1];
+    return RawMatch(code_base,
+                    subject_vector,
+                    registers,
+                    start_position,
+                    previous_char);
+  } else {
+    Vector<const uc16> subject_vector = subject->ToUC16Vector();
+    if (start_position != 0) previous_char = subject_vector[start_position - 1];
+    return RawMatch(code_base,
+                    subject_vector,
+                    registers,
+                    start_position,
+                    previous_char);
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/interpreter-irregexp.h b/src/interpreter-irregexp.h
new file mode 100644
index 0000000..0ad8846
--- /dev/null
+++ b/src/interpreter-irregexp.h
@@ -0,0 +1,48 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A simple interpreter for the Irregexp byte code.
+
+#ifndef V8_INTERPRETER_IRREGEXP_H_
+#define V8_INTERPRETER_IRREGEXP_H_
+
+namespace v8 {
+namespace internal {
+
+
+class IrregexpInterpreter {
+ public:
+  static bool Match(Handle<ByteArray> code,
+                    Handle<String> subject,
+                    int* captures,
+                    int start_position);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_INTERPRETER_IRREGEXP_H_
diff --git a/src/json-delay.js b/src/json-delay.js
new file mode 100644
index 0000000..1a6f008
--- /dev/null
+++ b/src/json-delay.js
@@ -0,0 +1,254 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var $JSON = global.JSON;
+
+function ParseJSONUnfiltered(text) {
+  var s = $String(text);
+  var f = %CompileString("(" + text + ")", true);
+  return f();
+}
+
+function Revive(holder, name, reviver) {
+  var val = holder[name];
+  if (IS_OBJECT(val)) {
+    if (IS_ARRAY(val)) {
+      var length = val.length;
+      for (var i = 0; i < length; i++) {
+        var newElement = Revive(val, $String(i), reviver);
+        val[i] = newElement;
+      }
+    } else {
+      for (var p in val) {
+        if (ObjectHasOwnProperty.call(val, p)) {
+          var newElement = Revive(val, p, reviver);
+          if (IS_UNDEFINED(newElement)) {
+            delete val[p];
+          } else {
+            val[p] = newElement;
+          }
+        }
+      }
+    }
+  }
+  return reviver.call(holder, name, val);
+}
+
+function JSONParse(text, reviver) {
+  var unfiltered = ParseJSONUnfiltered(text);
+  if (IS_FUNCTION(reviver)) {
+    return Revive({'': unfiltered}, '', reviver);
+  } else {
+    return unfiltered;
+  }
+}
+
+var characterQuoteCache = {
+  '\"': '\\"',
+  '\\': '\\\\',
+  '/': '\\/',
+  '\b': '\\b',
+  '\f': '\\f',
+  '\n': '\\n',
+  '\r': '\\r',
+  '\t': '\\t',
+  '\x0B': '\\u000b'
+};
+
+function QuoteSingleJSONCharacter(c) {
+  if (c in characterQuoteCache)
+    return characterQuoteCache[c];
+  var charCode = c.charCodeAt(0);
+  var result;
+  if (charCode < 16) result = '\\u000';
+  else if (charCode < 256) result = '\\u00';
+  else if (charCode < 4096) result = '\\u0';
+  else result = '\\u';
+  result += charCode.toString(16);
+  characterQuoteCache[c] = result;
+  return result;
+}
+
+function QuoteJSONString(str) {
+  var quotable = /[\\\"\x00-\x1f\x80-\uffff]/g;
+  return '"' + str.replace(quotable, QuoteSingleJSONCharacter) + '"';
+}
+
+function StackContains(stack, val) {
+  var length = stack.length;
+  for (var i = 0; i < length; i++) {
+    if (stack[i] === val)
+      return true;
+  }
+  return false;
+}
+
+function SerializeArray(value, replacer, stack, indent, gap) {
+  if (StackContains(stack, value))
+    throw MakeTypeError('circular_structure', []);
+  stack.push(value);
+  var stepback = indent;
+  indent += gap;
+  var partial = [];
+  var len = value.length;
+  for (var i = 0; i < len; i++) {
+    var strP = JSONSerialize($String(i), value, replacer, stack,
+        indent, gap);
+    if (IS_UNDEFINED(strP))
+      strP = "null";
+    partial.push(strP);
+  }
+  var final;
+  if (gap == "") {
+    final = "[" + partial.join(",") + "]";
+  } else if (partial.length > 0) {
+    var separator = ",\n" + indent;
+    final = "[\n" + indent + partial.join(separator) + "\n" +
+        stepback + "]";
+  } else {
+    final = "[]";
+  }
+  stack.pop();
+  return final;
+}
+
+function SerializeObject(value, replacer, stack, indent, gap) {
+  if (StackContains(stack, value))
+    throw MakeTypeError('circular_structure', []);
+  stack.push(value);
+  var stepback = indent;
+  indent += gap;
+  var partial = [];
+  if (IS_ARRAY(replacer)) {
+    var length = replacer.length;
+    for (var i = 0; i < length; i++) {
+      if (ObjectHasOwnProperty.call(replacer, i)) {
+        var p = replacer[i];
+        var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
+        if (!IS_UNDEFINED(strP)) {
+          var member = QuoteJSONString(p) + ":";
+          if (gap != "") member += " ";
+          member += strP;
+          partial.push(member);
+        }
+      }
+    }
+  } else {
+    for (var p in value) {
+      if (ObjectHasOwnProperty.call(value, p)) {
+        var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
+        if (!IS_UNDEFINED(strP)) {
+          var member = QuoteJSONString(p) + ":";
+          if (gap != "") member += " ";
+          member += strP;
+          partial.push(member);
+        }
+      }
+    }
+  }
+  var final;
+  if (gap == "") {
+    final = "{" + partial.join(",") + "}";
+  } else if (partial.length > 0) {
+    var separator = ",\n" + indent;
+    final = "{\n" + indent + partial.join(separator) + "\n" +
+        stepback + "}";
+  } else {
+    final = "{}";
+  }
+  stack.pop();
+  return final;
+}
+
+function JSONSerialize(key, holder, replacer, stack, indent, gap) {
+  var value = holder[key];
+  if (IS_OBJECT(value) && value) {
+    var toJSON = value.toJSON;
+    if (IS_FUNCTION(toJSON))
+      value = toJSON.call(value, key);
+  }
+  if (IS_FUNCTION(replacer))
+    value = replacer.call(holder, key, value);
+  // Unwrap value if necessary
+  if (IS_OBJECT(value)) {
+    if (IS_NUMBER_WRAPPER(value)) {
+      value = $Number(value);
+    } else if (IS_STRING_WRAPPER(value)) {
+      value = $String(value);
+    }
+  }
+  switch (typeof value) {
+    case "string":
+      return QuoteJSONString(value);
+    case "object":
+      if (!value) {
+        return "null";
+      } else if (IS_ARRAY(value)) {
+        return SerializeArray(value, replacer, stack, indent, gap);
+      } else {
+        return SerializeObject(value, replacer, stack, indent, gap);
+      }
+    case "number":
+      return $isFinite(value) ? $String(value) : "null";
+    case "boolean":
+      return value ? "true" : "false";
+  }
+}
+
+function JSONStringify(value, replacer, space) {
+  var stack = [];
+  var indent = "";
+  if (IS_OBJECT(space)) {
+    // Unwrap 'space' if it is wrapped
+    if (IS_NUMBER_WRAPPER(space)) {
+      space = $Number(space);
+    } else if (IS_STRING_WRAPPER(space)) {
+      space = $String(space);
+    }
+  }
+  var gap;
+  if (IS_NUMBER(space)) {
+    space = $Math.min(space, 100);
+    gap = "";
+    for (var i = 0; i < space; i++)
+      gap += " ";
+  } else if (IS_STRING(space)) {
+    gap = space;
+  } else {
+    gap = "";
+  }
+  return JSONSerialize('', {'': value}, replacer, stack, indent, gap);
+}
+
+function SetupJSON() {
+  InstallFunctions($JSON, DONT_ENUM, $Array(
+    "parse", JSONParse,
+    "stringify", JSONStringify
+  ));
+}
+
+SetupJSON();
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
new file mode 100644
index 0000000..e518662
--- /dev/null
+++ b/src/jsregexp.cc
@@ -0,0 +1,4495 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "compiler.h"
+#include "execution.h"
+#include "factory.h"
+#include "jsregexp.h"
+#include "platform.h"
+#include "runtime.h"
+#include "top.h"
+#include "compilation-cache.h"
+#include "string-stream.h"
+#include "parser.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-macro-assembler-tracer.h"
+#include "regexp-macro-assembler-irregexp.h"
+#include "regexp-stack.h"
+
+#ifdef V8_NATIVE_REGEXP
+#if V8_TARGET_ARCH_IA32
+#include "ia32/macro-assembler-ia32.h"
+#include "ia32/regexp-macro-assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/macro-assembler-x64.h"
+#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/macro-assembler-arm.h"
+#include "arm/regexp-macro-assembler-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+#endif
+
+#include "interpreter-irregexp.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor,
+                                               Handle<String> pattern,
+                                               Handle<String> flags,
+                                               bool* has_pending_exception) {
+  // Ensure that the constructor function has been loaded.
+  if (!constructor->IsLoaded()) {
+    LoadLazy(constructor, has_pending_exception);
+    if (*has_pending_exception) return Handle<Object>();
+  }
+  // Call the construct code with 2 arguments.
+  Object** argv[2] = { Handle<Object>::cast(pattern).location(),
+                       Handle<Object>::cast(flags).location() };
+  return Execution::New(constructor, 2, argv, has_pending_exception);
+}
+
+
+static JSRegExp::Flags RegExpFlagsFromString(Handle<String> str) {
+  int flags = JSRegExp::NONE;
+  for (int i = 0; i < str->length(); i++) {
+    switch (str->Get(i)) {
+      case 'i':
+        flags |= JSRegExp::IGNORE_CASE;
+        break;
+      case 'g':
+        flags |= JSRegExp::GLOBAL;
+        break;
+      case 'm':
+        flags |= JSRegExp::MULTILINE;
+        break;
+    }
+  }
+  return JSRegExp::Flags(flags);
+}
+
+
+static inline void ThrowRegExpException(Handle<JSRegExp> re,
+                                        Handle<String> pattern,
+                                        Handle<String> error_text,
+                                        const char* message) {
+  Handle<JSArray> array = Factory::NewJSArray(2);
+  SetElement(array, 0, pattern);
+  SetElement(array, 1, error_text);
+  Handle<Object> regexp_err = Factory::NewSyntaxError(message, array);
+  Top::Throw(*regexp_err);
+}
+
+
+// Generic RegExp methods. Dispatches to implementation specific methods.
+
+
+class OffsetsVector {
+ public:
+  inline OffsetsVector(int num_registers)
+      : offsets_vector_length_(num_registers) {
+    if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+      vector_ = NewArray<int>(offsets_vector_length_);
+    } else {
+      vector_ = static_offsets_vector_;
+    }
+  }
+  inline ~OffsetsVector() {
+    if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+      DeleteArray(vector_);
+      vector_ = NULL;
+    }
+  }
+  inline int* vector() { return vector_; }
+  inline int length() { return offsets_vector_length_; }
+
+ private:
+  int* vector_;
+  int offsets_vector_length_;
+  static const int kStaticOffsetsVectorSize = 50;
+  static int static_offsets_vector_[kStaticOffsetsVectorSize];
+};
+
+
+int OffsetsVector::static_offsets_vector_[
+    OffsetsVector::kStaticOffsetsVectorSize];
+
+
+Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
+                                   Handle<String> pattern,
+                                   Handle<String> flag_str) {
+  JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
+  Handle<FixedArray> cached = CompilationCache::LookupRegExp(pattern, flags);
+  bool in_cache = !cached.is_null();
+  LOG(RegExpCompileEvent(re, in_cache));
+
+  Handle<Object> result;
+  if (in_cache) {
+    re->set_data(*cached);
+    return re;
+  }
+  FlattenString(pattern);
+  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  RegExpCompileData parse_result;
+  FlatStringReader reader(pattern);
+  if (!ParseRegExp(&reader, flags.is_multiline(), &parse_result)) {
+    // Throw an exception if we fail to parse the pattern.
+    ThrowRegExpException(re,
+                         pattern,
+                         parse_result.error,
+                         "malformed_regexp");
+    return Handle<Object>::null();
+  }
+
+  if (parse_result.simple && !flags.is_ignore_case()) {
+    // Parse-tree is a single atom that is equal to the pattern.
+    AtomCompile(re, pattern, flags, pattern);
+  } else if (parse_result.tree->IsAtom() &&
+      !flags.is_ignore_case() &&
+      parse_result.capture_count == 0) {
+    RegExpAtom* atom = parse_result.tree->AsAtom();
+    Vector<const uc16> atom_pattern = atom->data();
+    Handle<String> atom_string = Factory::NewStringFromTwoByte(atom_pattern);
+    AtomCompile(re, pattern, flags, atom_string);
+  } else {
+    IrregexpPrepare(re, pattern, flags, parse_result.capture_count);
+  }
+  ASSERT(re->data()->IsFixedArray());
+  // Compilation succeeded so the data is set on the regexp
+  // and we can store it in the cache.
+  Handle<FixedArray> data(FixedArray::cast(re->data()));
+  CompilationCache::PutRegExp(pattern, flags, data);
+
+  return re;
+}
+
+
+Handle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
+                                Handle<String> subject,
+                                int index,
+                                Handle<JSArray> last_match_info) {
+  switch (regexp->TypeTag()) {
+    case JSRegExp::ATOM:
+      return AtomExec(regexp, subject, index, last_match_info);
+    case JSRegExp::IRREGEXP: {
+      Handle<Object> result =
+          IrregexpExec(regexp, subject, index, last_match_info);
+      ASSERT(!result.is_null() || Top::has_pending_exception());
+      return result;
+    }
+    default:
+      UNREACHABLE();
+      return Handle<Object>::null();
+  }
+}
+
+
+// RegExp Atom implementation: Simple string search using indexOf.
+
+
+void RegExpImpl::AtomCompile(Handle<JSRegExp> re,
+                             Handle<String> pattern,
+                             JSRegExp::Flags flags,
+                             Handle<String> match_pattern) {
+  Factory::SetRegExpAtomData(re,
+                             JSRegExp::ATOM,
+                             pattern,
+                             flags,
+                             match_pattern);
+}
+
+
+static void SetAtomLastCapture(FixedArray* array,
+                               String* subject,
+                               int from,
+                               int to) {
+  NoHandleAllocation no_handles;
+  RegExpImpl::SetLastCaptureCount(array, 2);
+  RegExpImpl::SetLastSubject(array, subject);
+  RegExpImpl::SetLastInput(array, subject);
+  RegExpImpl::SetCapture(array, 0, from);
+  RegExpImpl::SetCapture(array, 1, to);
+}
+
+
+Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
+                                    Handle<String> subject,
+                                    int index,
+                                    Handle<JSArray> last_match_info) {
+  Handle<String> needle(String::cast(re->DataAt(JSRegExp::kAtomPatternIndex)));
+
+  uint32_t start_index = index;
+
+  int value = Runtime::StringMatch(subject, needle, start_index);
+  if (value == -1) return Factory::null_value();
+  ASSERT(last_match_info->HasFastElements());
+
+  {
+    NoHandleAllocation no_handles;
+    FixedArray* array = FixedArray::cast(last_match_info->elements());
+    SetAtomLastCapture(array, *subject, value, value + needle->length());
+  }
+  return last_match_info;
+}
+
+
+// Irregexp implementation.
+
+// Ensures that the regexp object contains a compiled version of the
+// source for either ASCII or non-ASCII strings.
+// If the compiled version doesn't already exist, it is compiled
+// from the source pattern.
+// If compilation fails, an exception is thrown and this function
+// returns false.
+bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
+  Object* compiled_code = re->DataAt(JSRegExp::code_index(is_ascii));
+#ifdef V8_NATIVE_REGEXP
+  if (compiled_code->IsCode()) return true;
+#else  // ! V8_NATIVE_REGEXP (RegExp interpreter code)
+  if (compiled_code->IsByteArray()) return true;
+#endif
+  return CompileIrregexp(re, is_ascii);
+}
+
+
+bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
+  // Compile the RegExp.
+  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
+  if (entry->IsJSObject()) {
+    // If it's a JSObject, a previous compilation failed and threw this object.
+    // Re-throw the object without trying again.
+    Top::Throw(entry);
+    return false;
+  }
+  ASSERT(entry->IsTheHole());
+
+  JSRegExp::Flags flags = re->GetFlags();
+
+  Handle<String> pattern(re->Pattern());
+  if (!pattern->IsFlat()) {
+    FlattenString(pattern);
+  }
+
+  RegExpCompileData compile_data;
+  FlatStringReader reader(pattern);
+  if (!ParseRegExp(&reader, flags.is_multiline(), &compile_data)) {
+    // Throw an exception if we fail to parse the pattern.
+    // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
+    ThrowRegExpException(re,
+                         pattern,
+                         compile_data.error,
+                         "malformed_regexp");
+    return false;
+  }
+  RegExpEngine::CompilationResult result =
+      RegExpEngine::Compile(&compile_data,
+                            flags.is_ignore_case(),
+                            flags.is_multiline(),
+                            pattern,
+                            is_ascii);
+  if (result.error_message != NULL) {
+    // Unable to compile regexp.
+    Handle<JSArray> array = Factory::NewJSArray(2);
+    SetElement(array, 0, pattern);
+    SetElement(array,
+               1,
+               Factory::NewStringFromUtf8(CStrVector(result.error_message)));
+    Handle<Object> regexp_err =
+        Factory::NewSyntaxError("malformed_regexp", array);
+    Top::Throw(*regexp_err);
+    re->SetDataAt(JSRegExp::code_index(is_ascii), *regexp_err);
+    return false;
+  }
+
+  Handle<FixedArray> data = Handle<FixedArray>(FixedArray::cast(re->data()));
+  data->set(JSRegExp::code_index(is_ascii), result.code);
+  int register_max = IrregexpMaxRegisterCount(*data);
+  if (result.num_registers > register_max) {
+    SetIrregexpMaxRegisterCount(*data, result.num_registers);
+  }
+
+  return true;
+}
+
+
+int RegExpImpl::IrregexpMaxRegisterCount(FixedArray* re) {
+  return Smi::cast(
+      re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value();
+}
+
+
+void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray* re, int value) {
+  re->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
+}
+
+
+int RegExpImpl::IrregexpNumberOfCaptures(FixedArray* re) {
+  return Smi::cast(re->get(JSRegExp::kIrregexpCaptureCountIndex))->value();
+}
+
+
+int RegExpImpl::IrregexpNumberOfRegisters(FixedArray* re) {
+  return Smi::cast(re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value();
+}
+
+
+ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_ascii) {
+  return ByteArray::cast(re->get(JSRegExp::code_index(is_ascii)));
+}
+
+
+Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_ascii) {
+  return Code::cast(re->get(JSRegExp::code_index(is_ascii)));
+}
+
+
+void RegExpImpl::IrregexpPrepare(Handle<JSRegExp> re,
+                                 Handle<String> pattern,
+                                 JSRegExp::Flags flags,
+                                 int capture_count) {
+  // Initialize compiled code entries to null.
+  Factory::SetRegExpIrregexpData(re,
+                                 JSRegExp::IRREGEXP,
+                                 pattern,
+                                 flags,
+                                 capture_count);
+}
+
+
+Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
+                                        Handle<String> subject,
+                                        int previous_index,
+                                        Handle<JSArray> last_match_info) {
+  ASSERT_EQ(jsregexp->TypeTag(), JSRegExp::IRREGEXP);
+
+  // Prepare space for the return values.
+  int number_of_capture_registers =
+      (IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
+
+#ifndef V8_NATIVE_REGEXP
+#ifdef DEBUG
+  if (FLAG_trace_regexp_bytecodes) {
+    String* pattern = jsregexp->Pattern();
+    PrintF("\n\nRegexp match:   /%s/\n\n", *(pattern->ToCString()));
+    PrintF("\n\nSubject string: '%s'\n\n", *(subject->ToCString()));
+  }
+#endif
+#endif
+
+  if (!subject->IsFlat()) {
+    FlattenString(subject);
+  }
+
+  last_match_info->EnsureSize(number_of_capture_registers + kLastMatchOverhead);
+
+  Handle<FixedArray> array;
+
+  // Dispatch to the correct RegExp implementation.
+  Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data()));
+
+#ifdef V8_NATIVE_REGEXP
+
+  OffsetsVector captures(number_of_capture_registers);
+  int* captures_vector = captures.vector();
+  NativeRegExpMacroAssembler::Result res;
+  do {
+    bool is_ascii = subject->IsAsciiRepresentation();
+    if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
+      return Handle<Object>::null();
+    }
+    Handle<Code> code(RegExpImpl::IrregexpNativeCode(*regexp, is_ascii));
+    res = NativeRegExpMacroAssembler::Match(code,
+                                            subject,
+                                            captures_vector,
+                                            captures.length(),
+                                            previous_index);
+    // If result is RETRY, the string have changed representation, and we
+    // must restart from scratch.
+  } while (res == NativeRegExpMacroAssembler::RETRY);
+  if (res == NativeRegExpMacroAssembler::EXCEPTION) {
+    ASSERT(Top::has_pending_exception());
+    return Handle<Object>::null();
+  }
+  ASSERT(res == NativeRegExpMacroAssembler::SUCCESS
+      || res == NativeRegExpMacroAssembler::FAILURE);
+
+  if (res != NativeRegExpMacroAssembler::SUCCESS) return Factory::null_value();
+
+  array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements()));
+  ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
+  // The captures come in (start, end+1) pairs.
+  for (int i = 0; i < number_of_capture_registers; i += 2) {
+    SetCapture(*array, i, captures_vector[i]);
+    SetCapture(*array, i + 1, captures_vector[i + 1]);
+  }
+
+#else  // ! V8_NATIVE_REGEXP
+
+  bool is_ascii = subject->IsAsciiRepresentation();
+  if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
+    return Handle<Object>::null();
+  }
+  // Now that we have done EnsureCompiledIrregexp we can get the number of
+  // registers.
+  int number_of_registers =
+      IrregexpNumberOfRegisters(FixedArray::cast(jsregexp->data()));
+  OffsetsVector registers(number_of_registers);
+  int* register_vector = registers.vector();
+  for (int i = number_of_capture_registers - 1; i >= 0; i--) {
+    register_vector[i] = -1;
+  }
+  Handle<ByteArray> byte_codes(IrregexpByteCode(*regexp, is_ascii));
+
+  if (!IrregexpInterpreter::Match(byte_codes,
+                                  subject,
+                                  register_vector,
+                                  previous_index)) {
+    return Factory::null_value();
+  }
+
+  array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements()));
+  ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
+  // The captures come in (start, end+1) pairs.
+  for (int i = 0; i < number_of_capture_registers; i += 2) {
+    SetCapture(*array, i, register_vector[i]);
+    SetCapture(*array, i + 1, register_vector[i + 1]);
+  }
+
+#endif  // V8_NATIVE_REGEXP
+
+  SetLastCaptureCount(*array, number_of_capture_registers);
+  SetLastSubject(*array, *subject);
+  SetLastInput(*array, *subject);
+
+  return last_match_info;
+}
+
+
+// -------------------------------------------------------------------
+// Implementation of the Irregexp regular expression engine.
+//
+// The Irregexp regular expression engine is intended to be a complete
+// implementation of ECMAScript regular expressions.  It generates either
+// bytecodes or native code.
+
+//   The Irregexp regexp engine is structured in three steps.
+//   1) The parser generates an abstract syntax tree.  See ast.cc.
+//   2) From the AST a node network is created.  The nodes are all
+//      subclasses of RegExpNode.  The nodes represent states when
+//      executing a regular expression.  Several optimizations are
+//      performed on the node network.
+//   3) From the nodes we generate either byte codes or native code
+//      that can actually execute the regular expression (perform
+//      the search).  The code generation step is described in more
+//      detail below.
+
+// Code generation.
+//
+//   The nodes are divided into four main categories.
+//   * Choice nodes
+//        These represent places where the regular expression can
+//        match in more than one way.  For example on entry to an
+//        alternation (foo|bar) or a repetition (*, +, ? or {}).
+//   * Action nodes
+//        These represent places where some action should be
+//        performed.  Examples include recording the current position
+//        in the input string to a register (in order to implement
+//        captures) or other actions on register for example in order
+//        to implement the counters needed for {} repetitions.
+//   * Matching nodes
+//        These attempt to match some element part of the input string.
+//        Examples of elements include character classes, plain strings
+//        or back references.
+//   * End nodes
+//        These are used to implement the actions required on finding
+//        a successful match or failing to find a match.
+//
+//   The code generated (whether as byte codes or native code) maintains
+//   some state as it runs.  This consists of the following elements:
+//
+//   * The capture registers.  Used for string captures.
+//   * Other registers.  Used for counters etc.
+//   * The current position.
+//   * The stack of backtracking information.  Used when a matching node
+//     fails to find a match and needs to try an alternative.
+//
+// Conceptual regular expression execution model:
+//
+//   There is a simple conceptual model of regular expression execution
+//   which will be presented first.  The actual code generated is a more
+//   efficient simulation of the simple conceptual model:
+//
+//   * Choice nodes are implemented as follows:
+//     For each choice except the last {
+//       push current position
+//       push backtrack code location
+//       <generate code to test for choice>
+//       backtrack code location:
+//       pop current position
+//     }
+//     <generate code to test for last choice>
+//
+//   * Actions nodes are generated as follows
+//     <push affected registers on backtrack stack>
+//     <generate code to perform action>
+//     push backtrack code location
+//     <generate code to test for following nodes>
+//     backtrack code location:
+//     <pop affected registers to restore their state>
+//     <pop backtrack location from stack and go to it>
+//
+//   * Matching nodes are generated as follows:
+//     if input string matches at current position
+//       update current position
+//       <generate code to test for following nodes>
+//     else
+//       <pop backtrack location from stack and go to it>
+//
+//   Thus it can be seen that the current position is saved and restored
+//   by the choice nodes, whereas the registers are saved and restored by
+//   by the action nodes that manipulate them.
+//
+//   The other interesting aspect of this model is that nodes are generated
+//   at the point where they are needed by a recursive call to Emit().  If
+//   the node has already been code generated then the Emit() call will
+//   generate a jump to the previously generated code instead.  In order to
+//   limit recursion it is possible for the Emit() function to put the node
+//   on a work list for later generation and instead generate a jump.  The
+//   destination of the jump is resolved later when the code is generated.
+//
+// Actual regular expression code generation.
+//
+//   Code generation is actually more complicated than the above.  In order
+//   to improve the efficiency of the generated code some optimizations are
+//   performed
+//
+//   * Choice nodes have 1-character lookahead.
+//     A choice node looks at the following character and eliminates some of
+//     the choices immediately based on that character.  This is not yet
+//     implemented.
+//   * Simple greedy loops store reduced backtracking information.
+//     A quantifier like /.*foo/m will greedily match the whole input.  It will
+//     then need to backtrack to a point where it can match "foo".  The naive
+//     implementation of this would push each character position onto the
+//     backtracking stack, then pop them off one by one.  This would use space
+//     proportional to the length of the input string.  However since the "."
+//     can only match in one way and always has a constant length (in this case
+//     of 1) it suffices to store the current position on the top of the stack
+//     once.  Matching now becomes merely incrementing the current position and
+//     backtracking becomes decrementing the current position and checking the
+//     result against the stored current position.  This is faster and saves
+//     space.
+//   * The current state is virtualized.
+//     This is used to defer expensive operations until it is clear that they
+//     are needed and to generate code for a node more than once, allowing
+//     specialized an efficient versions of the code to be created. This is
+//     explained in the section below.
+//
+// Execution state virtualization.
+//
+//   Instead of emitting code, nodes that manipulate the state can record their
+//   manipulation in an object called the Trace.  The Trace object can record a
+//   current position offset, an optional backtrack code location on the top of
+//   the virtualized backtrack stack and some register changes.  When a node is
+//   to be emitted it can flush the Trace or update it.  Flushing the Trace
+//   will emit code to bring the actual state into line with the virtual state.
+//   Avoiding flushing the state can postpone some work (eg updates of capture
+//   registers).  Postponing work can save time when executing the regular
+//   expression since it may be found that the work never has to be done as a
+//   failure to match can occur.  In addition it is much faster to jump to a
+//   known backtrack code location than it is to pop an unknown backtrack
+//   location from the stack and jump there.
+//
+//   The virtual state found in the Trace affects code generation.  For example
+//   the virtual state contains the difference between the actual current
+//   position and the virtual current position, and matching code needs to use
+//   this offset to attempt a match in the correct location of the input
+//   string.  Therefore code generated for a non-trivial trace is specialized
+//   to that trace.  The code generator therefore has the ability to generate
+//   code for each node several times.  In order to limit the size of the
+//   generated code there is an arbitrary limit on how many specialized sets of
+//   code may be generated for a given node.  If the limit is reached, the
+//   trace is flushed and a generic version of the code for a node is emitted.
+//   This is subsequently used for that node.  The code emitted for non-generic
+//   trace is not recorded in the node and so it cannot currently be reused in
+//   the event that code generation is requested for an identical trace.
+
+
+void RegExpTree::AppendToText(RegExpText* text) {
+  UNREACHABLE();
+}
+
+
+void RegExpAtom::AppendToText(RegExpText* text) {
+  text->AddElement(TextElement::Atom(this));
+}
+
+
+void RegExpCharacterClass::AppendToText(RegExpText* text) {
+  text->AddElement(TextElement::CharClass(this));
+}
+
+
+void RegExpText::AppendToText(RegExpText* text) {
+  for (int i = 0; i < elements()->length(); i++)
+    text->AddElement(elements()->at(i));
+}
+
+
+TextElement TextElement::Atom(RegExpAtom* atom) {
+  TextElement result = TextElement(ATOM);
+  result.data.u_atom = atom;
+  return result;
+}
+
+
+TextElement TextElement::CharClass(
+      RegExpCharacterClass* char_class) {
+  TextElement result = TextElement(CHAR_CLASS);
+  result.data.u_char_class = char_class;
+  return result;
+}
+
+
+int TextElement::length() {
+  if (type == ATOM) {
+    return data.u_atom->length();
+  } else {
+    ASSERT(type == CHAR_CLASS);
+    return 1;
+  }
+}
+
+
+DispatchTable* ChoiceNode::GetTable(bool ignore_case) {
+  if (table_ == NULL) {
+    table_ = new DispatchTable();
+    DispatchTableConstructor cons(table_, ignore_case);
+    cons.BuildTable(this);
+  }
+  return table_;
+}
+
+
+class RegExpCompiler {
+ public:
+  RegExpCompiler(int capture_count, bool ignore_case, bool is_ascii);
+
+  int AllocateRegister() {
+    if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
+      reg_exp_too_big_ = true;
+      return next_register_;
+    }
+    return next_register_++;
+  }
+
+  RegExpEngine::CompilationResult Assemble(RegExpMacroAssembler* assembler,
+                                           RegExpNode* start,
+                                           int capture_count,
+                                           Handle<String> pattern);
+
+  inline void AddWork(RegExpNode* node) { work_list_->Add(node); }
+
+  static const int kImplementationOffset = 0;
+  static const int kNumberOfRegistersOffset = 0;
+  static const int kCodeOffset = 1;
+
+  RegExpMacroAssembler* macro_assembler() { return macro_assembler_; }
+  EndNode* accept() { return accept_; }
+
+  static const int kMaxRecursion = 100;
+  inline int recursion_depth() { return recursion_depth_; }
+  inline void IncrementRecursionDepth() { recursion_depth_++; }
+  inline void DecrementRecursionDepth() { recursion_depth_--; }
+
+  void SetRegExpTooBig() { reg_exp_too_big_ = true; }
+
+  inline bool ignore_case() { return ignore_case_; }
+  inline bool ascii() { return ascii_; }
+
+  static const int kNoRegister = -1;
+ private:
+  EndNode* accept_;
+  int next_register_;
+  List<RegExpNode*>* work_list_;
+  int recursion_depth_;
+  RegExpMacroAssembler* macro_assembler_;
+  bool ignore_case_;
+  bool ascii_;
+  bool reg_exp_too_big_;
+};
+
+
+class RecursionCheck {
+ public:
+  explicit RecursionCheck(RegExpCompiler* compiler) : compiler_(compiler) {
+    compiler->IncrementRecursionDepth();
+  }
+  ~RecursionCheck() { compiler_->DecrementRecursionDepth(); }
+ private:
+  RegExpCompiler* compiler_;
+};
+
+
+static RegExpEngine::CompilationResult IrregexpRegExpTooBig() {
+  return RegExpEngine::CompilationResult("RegExp too big");
+}
+
+
+// Attempts to compile the regexp using an Irregexp code generator.  Returns
+// a fixed array or a null handle depending on whether it succeeded.
+RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case, bool ascii)
+    : next_register_(2 * (capture_count + 1)),
+      work_list_(NULL),
+      recursion_depth_(0),
+      ignore_case_(ignore_case),
+      ascii_(ascii),
+      reg_exp_too_big_(false) {
+  accept_ = new EndNode(EndNode::ACCEPT);
+  ASSERT(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
+}
+
+
+RegExpEngine::CompilationResult RegExpCompiler::Assemble(
+    RegExpMacroAssembler* macro_assembler,
+    RegExpNode* start,
+    int capture_count,
+    Handle<String> pattern) {
+#ifdef DEBUG
+  if (FLAG_trace_regexp_assembler)
+    macro_assembler_ = new RegExpMacroAssemblerTracer(macro_assembler);
+  else
+#endif
+    macro_assembler_ = macro_assembler;
+  List <RegExpNode*> work_list(0);
+  work_list_ = &work_list;
+  Label fail;
+  macro_assembler_->PushBacktrack(&fail);
+  Trace new_trace;
+  start->Emit(this, &new_trace);
+  macro_assembler_->Bind(&fail);
+  macro_assembler_->Fail();
+  while (!work_list.is_empty()) {
+    work_list.RemoveLast()->Emit(this, &new_trace);
+  }
+  if (reg_exp_too_big_) return IrregexpRegExpTooBig();
+
+  Handle<Object> code = macro_assembler_->GetCode(pattern);
+
+  work_list_ = NULL;
+#ifdef DEBUG
+  if (FLAG_trace_regexp_assembler) {
+    delete macro_assembler_;
+  }
+#endif
+  return RegExpEngine::CompilationResult(*code, next_register_);
+}
+
+
+bool Trace::DeferredAction::Mentions(int that) {
+  if (type() == ActionNode::CLEAR_CAPTURES) {
+    Interval range = static_cast<DeferredClearCaptures*>(this)->range();
+    return range.Contains(that);
+  } else {
+    return reg() == that;
+  }
+}
+
+
+bool Trace::mentions_reg(int reg) {
+  for (DeferredAction* action = actions_;
+       action != NULL;
+       action = action->next()) {
+    if (action->Mentions(reg))
+      return true;
+  }
+  return false;
+}
+
+
+bool Trace::GetStoredPosition(int reg, int* cp_offset) {
+  ASSERT_EQ(0, *cp_offset);
+  for (DeferredAction* action = actions_;
+       action != NULL;
+       action = action->next()) {
+    if (action->Mentions(reg)) {
+      if (action->type() == ActionNode::STORE_POSITION) {
+        *cp_offset = static_cast<DeferredCapture*>(action)->cp_offset();
+        return true;
+      } else {
+        return false;
+      }
+    }
+  }
+  return false;
+}
+
+
+int Trace::FindAffectedRegisters(OutSet* affected_registers) {
+  int max_register = RegExpCompiler::kNoRegister;
+  for (DeferredAction* action = actions_;
+       action != NULL;
+       action = action->next()) {
+    if (action->type() == ActionNode::CLEAR_CAPTURES) {
+      Interval range = static_cast<DeferredClearCaptures*>(action)->range();
+      for (int i = range.from(); i <= range.to(); i++)
+        affected_registers->Set(i);
+      if (range.to() > max_register) max_register = range.to();
+    } else {
+      affected_registers->Set(action->reg());
+      if (action->reg() > max_register) max_register = action->reg();
+    }
+  }
+  return max_register;
+}
+
+
+void Trace::RestoreAffectedRegisters(RegExpMacroAssembler* assembler,
+                                     int max_register,
+                                     OutSet& registers_to_pop,
+                                     OutSet& registers_to_clear) {
+  for (int reg = max_register; reg >= 0; reg--) {
+    if (registers_to_pop.Get(reg)) assembler->PopRegister(reg);
+    else if (registers_to_clear.Get(reg)) {
+      int clear_to = reg;
+      while (reg > 0 && registers_to_clear.Get(reg - 1)) {
+        reg--;
+      }
+      assembler->ClearRegisters(reg, clear_to);
+    }
+  }
+}
+
+
+void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
+                                   int max_register,
+                                   OutSet& affected_registers,
+                                   OutSet* registers_to_pop,
+                                   OutSet* registers_to_clear) {
+  // The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
+  const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
+
+  // Count pushes performed to force a stack limit check occasionally.
+  int pushes = 0;
+
+  for (int reg = 0; reg <= max_register; reg++) {
+    if (!affected_registers.Get(reg)) {
+      continue;
+    }
+
+    // The chronologically first deferred action in the trace
+    // is used to infer the action needed to restore a register
+    // to its previous state (or not, if it's safe to ignore it).
+    enum DeferredActionUndoType { IGNORE, RESTORE, CLEAR };
+    DeferredActionUndoType undo_action = IGNORE;
+
+    int value = 0;
+    bool absolute = false;
+    bool clear = false;
+    int store_position = -1;
+    // This is a little tricky because we are scanning the actions in reverse
+    // historical order (newest first).
+    for (DeferredAction* action = actions_;
+         action != NULL;
+         action = action->next()) {
+      if (action->Mentions(reg)) {
+        switch (action->type()) {
+          case ActionNode::SET_REGISTER: {
+            Trace::DeferredSetRegister* psr =
+                static_cast<Trace::DeferredSetRegister*>(action);
+            if (!absolute) {
+              value += psr->value();
+              absolute = true;
+            }
+            // SET_REGISTER is currently only used for newly introduced loop
+            // counters. They can have a significant previous value if they
+            // occour in a loop. TODO(lrn): Propagate this information, so
+            // we can set undo_action to IGNORE if we know there is no value to
+            // restore.
+            undo_action = RESTORE;
+            ASSERT_EQ(store_position, -1);
+            ASSERT(!clear);
+            break;
+          }
+          case ActionNode::INCREMENT_REGISTER:
+            if (!absolute) {
+              value++;
+            }
+            ASSERT_EQ(store_position, -1);
+            ASSERT(!clear);
+            undo_action = RESTORE;
+            break;
+          case ActionNode::STORE_POSITION: {
+            Trace::DeferredCapture* pc =
+                static_cast<Trace::DeferredCapture*>(action);
+            if (!clear && store_position == -1) {
+              store_position = pc->cp_offset();
+            }
+
+            // For captures we know that stores and clears alternate.
+            // Other register, are never cleared, and if the occur
+            // inside a loop, they might be assigned more than once.
+            if (reg <= 1) {
+              // Registers zero and one, aka "capture zero", is
+              // always set correctly if we succeed. There is no
+              // need to undo a setting on backtrack, because we
+              // will set it again or fail.
+              undo_action = IGNORE;
+            } else {
+              undo_action = pc->is_capture() ? CLEAR : RESTORE;
+            }
+            ASSERT(!absolute);
+            ASSERT_EQ(value, 0);
+            break;
+          }
+          case ActionNode::CLEAR_CAPTURES: {
+            // Since we're scanning in reverse order, if we've already
+            // set the position we have to ignore historically earlier
+            // clearing operations.
+            if (store_position == -1) {
+              clear = true;
+            }
+            undo_action = RESTORE;
+            ASSERT(!absolute);
+            ASSERT_EQ(value, 0);
+            break;
+          }
+          default:
+            UNREACHABLE();
+            break;
+        }
+      }
+    }
+    // Prepare for the undo-action (e.g., push if it's going to be popped).
+    if (undo_action == RESTORE) {
+      pushes++;
+      RegExpMacroAssembler::StackCheckFlag stack_check =
+          RegExpMacroAssembler::kNoStackLimitCheck;
+      if (pushes == push_limit) {
+        stack_check = RegExpMacroAssembler::kCheckStackLimit;
+        pushes = 0;
+      }
+
+      assembler->PushRegister(reg, stack_check);
+      registers_to_pop->Set(reg);
+    } else if (undo_action == CLEAR) {
+      registers_to_clear->Set(reg);
+    }
+    // Perform the chronologically last action (or accumulated increment)
+    // for the register.
+    if (store_position != -1) {
+      assembler->WriteCurrentPositionToRegister(reg, store_position);
+    } else if (clear) {
+      assembler->ClearRegisters(reg, reg);
+    } else if (absolute) {
+      assembler->SetRegister(reg, value);
+    } else if (value != 0) {
+      assembler->AdvanceRegister(reg, value);
+    }
+  }
+}
+
+
+// This is called as we come into a loop choice node and some other tricky
+// nodes.  It normalizes the state of the code generator to ensure we can
+// generate generic code.
+void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
+  RegExpMacroAssembler* assembler = compiler->macro_assembler();
+
+  ASSERT(!is_trivial());
+
+  if (actions_ == NULL && backtrack() == NULL) {
+    // Here we just have some deferred cp advances to fix and we are back to
+    // a normal situation.  We may also have to forget some information gained
+    // through a quick check that was already performed.
+    if (cp_offset_ != 0) assembler->AdvanceCurrentPosition(cp_offset_);
+    // Create a new trivial state and generate the node with that.
+    Trace new_state;
+    successor->Emit(compiler, &new_state);
+    return;
+  }
+
+  // Generate deferred actions here along with code to undo them again.
+  OutSet affected_registers;
+
+  if (backtrack() != NULL) {
+    // Here we have a concrete backtrack location.  These are set up by choice
+    // nodes and so they indicate that we have a deferred save of the current
+    // position which we may need to emit here.
+    assembler->PushCurrentPosition();
+  }
+
+  int max_register = FindAffectedRegisters(&affected_registers);
+  OutSet registers_to_pop;
+  OutSet registers_to_clear;
+  PerformDeferredActions(assembler,
+                         max_register,
+                         affected_registers,
+                         &registers_to_pop,
+                         &registers_to_clear);
+  if (cp_offset_ != 0) {
+    assembler->AdvanceCurrentPosition(cp_offset_);
+  }
+
+  // Create a new trivial state and generate the node with that.
+  Label undo;
+  assembler->PushBacktrack(&undo);
+  Trace new_state;
+  successor->Emit(compiler, &new_state);
+
+  // On backtrack we need to restore state.
+  assembler->Bind(&undo);
+  RestoreAffectedRegisters(assembler,
+                           max_register,
+                           registers_to_pop,
+                           registers_to_clear);
+  if (backtrack() == NULL) {
+    assembler->Backtrack();
+  } else {
+    assembler->PopCurrentPosition();
+    assembler->GoTo(backtrack());
+  }
+}
+
+
+void NegativeSubmatchSuccess::Emit(RegExpCompiler* compiler, Trace* trace) {
+  RegExpMacroAssembler* assembler = compiler->macro_assembler();
+
+  // Omit flushing the trace. We discard the entire stack frame anyway.
+
+  if (!label()->is_bound()) {
+    // We are completely independent of the trace, since we ignore it,
+    // so this code can be used as the generic version.
+    assembler->Bind(label());
+  }
+
+  // Throw away everything on the backtrack stack since the start
+  // of the negative submatch and restore the character position.
+  assembler->ReadCurrentPositionFromRegister(current_position_register_);
+  assembler->ReadStackPointerFromRegister(stack_pointer_register_);
+  if (clear_capture_count_ > 0) {
+    // Clear any captures that might have been performed during the success
+    // of the body of the negative look-ahead.
+    int clear_capture_end = clear_capture_start_ + clear_capture_count_ - 1;
+    assembler->ClearRegisters(clear_capture_start_, clear_capture_end);
+  }
+  // Now that we have unwound the stack we find at the top of the stack the
+  // backtrack that the BeginSubmatch node got.
+  assembler->Backtrack();
+}
+
+
+void EndNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+  if (!trace->is_trivial()) {
+    trace->Flush(compiler, this);
+    return;
+  }
+  RegExpMacroAssembler* assembler = compiler->macro_assembler();
+  if (!label()->is_bound()) {
+    assembler->Bind(label());
+  }
+  switch (action_) {
+    case ACCEPT:
+      assembler->Succeed();
+      return;
+    case BACKTRACK:
+      assembler->GoTo(trace->backtrack());
+      return;
+    case NEGATIVE_SUBMATCH_SUCCESS:
+      // This case is handled in a different virtual method.
+      UNREACHABLE();
+  }
+  UNIMPLEMENTED();
+}
+
+
+void GuardedAlternative::AddGuard(Guard* guard) {
+  if (guards_ == NULL)
+    guards_ = new ZoneList<Guard*>(1);
+  guards_->Add(guard);
+}
+
+
+ActionNode* ActionNode::SetRegister(int reg,
+                                    int val,
+                                    RegExpNode* on_success) {
+  ActionNode* result = new ActionNode(SET_REGISTER, on_success);
+  result->data_.u_store_register.reg = reg;
+  result->data_.u_store_register.value = val;
+  return result;
+}
+
+
+ActionNode* ActionNode::IncrementRegister(int reg, RegExpNode* on_success) {
+  ActionNode* result = new ActionNode(INCREMENT_REGISTER, on_success);
+  result->data_.u_increment_register.reg = reg;
+  return result;
+}
+
+
+ActionNode* ActionNode::StorePosition(int reg,
+                                      bool is_capture,
+                                      RegExpNode* on_success) {
+  ActionNode* result = new ActionNode(STORE_POSITION, on_success);
+  result->data_.u_position_register.reg = reg;
+  result->data_.u_position_register.is_capture = is_capture;
+  return result;
+}
+
+
+ActionNode* ActionNode::ClearCaptures(Interval range,
+                                      RegExpNode* on_success) {
+  ActionNode* result = new ActionNode(CLEAR_CAPTURES, on_success);
+  result->data_.u_clear_captures.range_from = range.from();
+  result->data_.u_clear_captures.range_to = range.to();
+  return result;
+}
+
+
+ActionNode* ActionNode::BeginSubmatch(int stack_reg,
+                                      int position_reg,
+                                      RegExpNode* on_success) {
+  ActionNode* result = new ActionNode(BEGIN_SUBMATCH, on_success);
+  result->data_.u_submatch.stack_pointer_register = stack_reg;
+  result->data_.u_submatch.current_position_register = position_reg;
+  return result;
+}
+
+
+ActionNode* ActionNode::PositiveSubmatchSuccess(int stack_reg,
+                                                int position_reg,
+                                                int clear_register_count,
+                                                int clear_register_from,
+                                                RegExpNode* on_success) {
+  ActionNode* result = new ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success);
+  result->data_.u_submatch.stack_pointer_register = stack_reg;
+  result->data_.u_submatch.current_position_register = position_reg;
+  result->data_.u_submatch.clear_register_count = clear_register_count;
+  result->data_.u_submatch.clear_register_from = clear_register_from;
+  return result;
+}
+
+
+ActionNode* ActionNode::EmptyMatchCheck(int start_register,
+                                        int repetition_register,
+                                        int repetition_limit,
+                                        RegExpNode* on_success) {
+  ActionNode* result = new ActionNode(EMPTY_MATCH_CHECK, on_success);
+  result->data_.u_empty_match_check.start_register = start_register;
+  result->data_.u_empty_match_check.repetition_register = repetition_register;
+  result->data_.u_empty_match_check.repetition_limit = repetition_limit;
+  return result;
+}
+
+
+#define DEFINE_ACCEPT(Type)                                          \
+  void Type##Node::Accept(NodeVisitor* visitor) {                    \
+    visitor->Visit##Type(this);                                      \
+  }
+FOR_EACH_NODE_TYPE(DEFINE_ACCEPT)
+#undef DEFINE_ACCEPT
+
+
+void LoopChoiceNode::Accept(NodeVisitor* visitor) {
+  visitor->VisitLoopChoice(this);
+}
+
+
+// -------------------------------------------------------------------
+// Emit code.
+
+
+void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler,
+                               Guard* guard,
+                               Trace* trace) {
+  switch (guard->op()) {
+    case Guard::LT:
+      ASSERT(!trace->mentions_reg(guard->reg()));
+      macro_assembler->IfRegisterGE(guard->reg(),
+                                    guard->value(),
+                                    trace->backtrack());
+      break;
+    case Guard::GEQ:
+      ASSERT(!trace->mentions_reg(guard->reg()));
+      macro_assembler->IfRegisterLT(guard->reg(),
+                                    guard->value(),
+                                    trace->backtrack());
+      break;
+  }
+}
+
+
+static unibrow::Mapping<unibrow::Ecma262UnCanonicalize> uncanonicalize;
+static unibrow::Mapping<unibrow::CanonicalizationRange> canonrange;
+
+
+// Returns the number of characters in the equivalence class, omitting those
+// that cannot occur in the source string because it is ASCII.
+static int GetCaseIndependentLetters(uc16 character,
+                                     bool ascii_subject,
+                                     unibrow::uchar* letters) {
+  int length = uncanonicalize.get(character, '\0', letters);
+  // Unibrow returns 0 or 1 for characters where case independependence is
+  // trivial.
+  if (length == 0) {
+    letters[0] = character;
+    length = 1;
+  }
+  if (!ascii_subject || character <= String::kMaxAsciiCharCode) {
+    return length;
+  }
+  // The standard requires that non-ASCII characters cannot have ASCII
+  // character codes in their equivalence class.
+  return 0;
+}
+
+
+static inline bool EmitSimpleCharacter(RegExpCompiler* compiler,
+                                       uc16 c,
+                                       Label* on_failure,
+                                       int cp_offset,
+                                       bool check,
+                                       bool preloaded) {
+  RegExpMacroAssembler* assembler = compiler->macro_assembler();
+  bool bound_checked = false;
+  if (!preloaded) {
+    assembler->LoadCurrentCharacter(
+        cp_offset,
+        on_failure,
+        check);
+    bound_checked = true;
+  }
+  assembler->CheckNotCharacter(c, on_failure);
+  return bound_checked;
+}
+
+
+// Only emits non-letters (things that don't have case).  Only used for case
+// independent matches.
+static inline bool EmitAtomNonLetter(RegExpCompiler* compiler,
+                                     uc16 c,
+                                     Label* on_failure,
+                                     int cp_offset,
+                                     bool check,
+                                     bool preloaded) {
+  RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+  bool ascii = compiler->ascii();
+  unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+  int length = GetCaseIndependentLetters(c, ascii, chars);
+  if (length < 1) {
+    // This can't match.  Must be an ASCII subject and a non-ASCII character.
+    // We do not need to do anything since the ASCII pass already handled this.
+    return false;  // Bounds not checked.
+  }
+  bool checked = false;
+  // We handle the length > 1 case in a later pass.
+  if (length == 1) {
+    if (ascii && c > String::kMaxAsciiCharCodeU) {
+      // Can't match - see above.
+      return false;  // Bounds not checked.
+    }
+    if (!preloaded) {
+      macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
+      checked = check;
+    }
+    macro_assembler->CheckNotCharacter(c, on_failure);
+  }
+  return checked;
+}
+
+
+static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
+                                      bool ascii,
+                                      uc16 c1,
+                                      uc16 c2,
+                                      Label* on_failure) {
+  uc16 char_mask;
+  if (ascii) {
+    char_mask = String::kMaxAsciiCharCode;
+  } else {
+    char_mask = String::kMaxUC16CharCode;
+  }
+  uc16 exor = c1 ^ c2;
+  // Check whether exor has only one bit set.
+  if (((exor - 1) & exor) == 0) {
+    // If c1 and c2 differ only by one bit.
+    // Ecma262UnCanonicalize always gives the highest number last.
+    ASSERT(c2 > c1);
+    uc16 mask = char_mask ^ exor;
+    macro_assembler->CheckNotCharacterAfterAnd(c1, mask, on_failure);
+    return true;
+  }
+  ASSERT(c2 > c1);
+  uc16 diff = c2 - c1;
+  if (((diff - 1) & diff) == 0 && c1 >= diff) {
+    // If the characters differ by 2^n but don't differ by one bit then
+    // subtract the difference from the found character, then do the or
+    // trick.  We avoid the theoretical case where negative numbers are
+    // involved in order to simplify code generation.
+    uc16 mask = char_mask ^ diff;
+    macro_assembler->CheckNotCharacterAfterMinusAnd(c1 - diff,
+                                                    diff,
+                                                    mask,
+                                                    on_failure);
+    return true;
+  }
+  return false;
+}
+
+
+typedef bool EmitCharacterFunction(RegExpCompiler* compiler,
+                                   uc16 c,
+                                   Label* on_failure,
+                                   int cp_offset,
+                                   bool check,
+                                   bool preloaded);
+
+// Only emits letters (things that have case).  Only used for case independent
+// matches.
+static inline bool EmitAtomLetter(RegExpCompiler* compiler,
+                                  uc16 c,
+                                  Label* on_failure,
+                                  int cp_offset,
+                                  bool check,
+                                  bool preloaded) {
+  RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+  bool ascii = compiler->ascii();
+  unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+  int length = GetCaseIndependentLetters(c, ascii, chars);
+  if (length <= 1) return false;
+  // We may not need to check against the end of the input string
+  // if this character lies before a character that matched.
+  if (!preloaded) {
+    macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
+  }
+  Label ok;
+  ASSERT(unibrow::Ecma262UnCanonicalize::kMaxWidth == 4);
+  switch (length) {
+    case 2: {
+      if (ShortCutEmitCharacterPair(macro_assembler,
+                                    ascii,
+                                    chars[0],
+                                    chars[1],
+                                    on_failure)) {
+      } else {
+        macro_assembler->CheckCharacter(chars[0], &ok);
+        macro_assembler->CheckNotCharacter(chars[1], on_failure);
+        macro_assembler->Bind(&ok);
+      }
+      break;
+    }
+    case 4:
+      macro_assembler->CheckCharacter(chars[3], &ok);
+      // Fall through!
+    case 3:
+      macro_assembler->CheckCharacter(chars[0], &ok);
+      macro_assembler->CheckCharacter(chars[1], &ok);
+      macro_assembler->CheckNotCharacter(chars[2], on_failure);
+      macro_assembler->Bind(&ok);
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return true;
+}
+
+
+static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
+                          RegExpCharacterClass* cc,
+                          bool ascii,
+                          Label* on_failure,
+                          int cp_offset,
+                          bool check_offset,
+                          bool preloaded) {
+  if (cc->is_standard() &&
+      macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
+                                                  cp_offset,
+                                                  check_offset,
+                                                  on_failure)) {
+    return;
+  }
+
+  ZoneList<CharacterRange>* ranges = cc->ranges();
+  int max_char;
+  if (ascii) {
+    max_char = String::kMaxAsciiCharCode;
+  } else {
+    max_char = String::kMaxUC16CharCode;
+  }
+
+  Label success;
+
+  Label* char_is_in_class =
+      cc->is_negated() ? on_failure : &success;
+
+  int range_count = ranges->length();
+
+  int last_valid_range = range_count - 1;
+  while (last_valid_range >= 0) {
+    CharacterRange& range = ranges->at(last_valid_range);
+    if (range.from() <= max_char) {
+      break;
+    }
+    last_valid_range--;
+  }
+
+  if (last_valid_range < 0) {
+    if (!cc->is_negated()) {
+      // TODO(plesner): We can remove this when the node level does our
+      // ASCII optimizations for us.
+      macro_assembler->GoTo(on_failure);
+    }
+    if (check_offset) {
+      macro_assembler->CheckPosition(cp_offset, on_failure);
+    }
+    return;
+  }
+
+  if (last_valid_range == 0 &&
+      !cc->is_negated() &&
+      ranges->at(0).IsEverything(max_char)) {
+    // This is a common case hit by non-anchored expressions.
+    if (check_offset) {
+      macro_assembler->CheckPosition(cp_offset, on_failure);
+    }
+    return;
+  }
+
+  if (!preloaded) {
+    macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
+  }
+
+  for (int i = 0; i < last_valid_range; i++) {
+    CharacterRange& range = ranges->at(i);
+    Label next_range;
+    uc16 from = range.from();
+    uc16 to = range.to();
+    if (from > max_char) {
+      continue;
+    }
+    if (to > max_char) to = max_char;
+    if (to == from) {
+      macro_assembler->CheckCharacter(to, char_is_in_class);
+    } else {
+      if (from != 0) {
+        macro_assembler->CheckCharacterLT(from, &next_range);
+      }
+      if (to != max_char) {
+        macro_assembler->CheckCharacterLT(to + 1, char_is_in_class);
+      } else {
+        macro_assembler->GoTo(char_is_in_class);
+      }
+    }
+    macro_assembler->Bind(&next_range);
+  }
+
+  CharacterRange& range = ranges->at(last_valid_range);
+  uc16 from = range.from();
+  uc16 to = range.to();
+
+  if (to > max_char) to = max_char;
+  ASSERT(to >= from);
+
+  if (to == from) {
+    if (cc->is_negated()) {
+      macro_assembler->CheckCharacter(to, on_failure);
+    } else {
+      macro_assembler->CheckNotCharacter(to, on_failure);
+    }
+  } else {
+    if (from != 0) {
+      if (cc->is_negated()) {
+        macro_assembler->CheckCharacterLT(from, &success);
+      } else {
+        macro_assembler->CheckCharacterLT(from, on_failure);
+      }
+    }
+    if (to != String::kMaxUC16CharCode) {
+      if (cc->is_negated()) {
+        macro_assembler->CheckCharacterLT(to + 1, on_failure);
+      } else {
+        macro_assembler->CheckCharacterGT(to, on_failure);
+      }
+    } else {
+      if (cc->is_negated()) {
+        macro_assembler->GoTo(on_failure);
+      }
+    }
+  }
+  macro_assembler->Bind(&success);
+}
+
+
+RegExpNode::~RegExpNode() {
+}
+
+
+RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
+                                                  Trace* trace) {
+  // If we are generating a greedy loop then don't stop and don't reuse code.
+  if (trace->stop_node() != NULL) {
+    return CONTINUE;
+  }
+
+  RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+  if (trace->is_trivial()) {
+    if (label_.is_bound()) {
+      // We are being asked to generate a generic version, but that's already
+      // been done so just go to it.
+      macro_assembler->GoTo(&label_);
+      return DONE;
+    }
+    if (compiler->recursion_depth() >= RegExpCompiler::kMaxRecursion) {
+      // To avoid too deep recursion we push the node to the work queue and just
+      // generate a goto here.
+      compiler->AddWork(this);
+      macro_assembler->GoTo(&label_);
+      return DONE;
+    }
+    // Generate generic version of the node and bind the label for later use.
+    macro_assembler->Bind(&label_);
+    return CONTINUE;
+  }
+
+  // We are being asked to make a non-generic version.  Keep track of how many
+  // non-generic versions we generate so as not to overdo it.
+  trace_count_++;
+  if (FLAG_regexp_optimization &&
+      trace_count_ < kMaxCopiesCodeGenerated &&
+      compiler->recursion_depth() <= RegExpCompiler::kMaxRecursion) {
+    return CONTINUE;
+  }
+
+  // If we get here code has been generated for this node too many times or
+  // recursion is too deep.  Time to switch to a generic version.  The code for
+  // generic versions above can handle deep recursion properly.
+  trace->Flush(compiler, this);
+  return DONE;
+}
+
+
+int ActionNode::EatsAtLeast(int still_to_find, int recursion_depth) {
+  if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+  if (type_ == POSITIVE_SUBMATCH_SUCCESS) return 0;  // Rewinds input!
+  return on_success()->EatsAtLeast(still_to_find, recursion_depth + 1);
+}
+
+
+int AssertionNode::EatsAtLeast(int still_to_find, int recursion_depth) {
+  if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+  return on_success()->EatsAtLeast(still_to_find, recursion_depth + 1);
+}
+
+
+int BackReferenceNode::EatsAtLeast(int still_to_find, int recursion_depth) {
+  if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+  return on_success()->EatsAtLeast(still_to_find, recursion_depth + 1);
+}
+
+
+int TextNode::EatsAtLeast(int still_to_find, int recursion_depth) {
+  int answer = Length();
+  if (answer >= still_to_find) return answer;
+  if (recursion_depth > RegExpCompiler::kMaxRecursion) return answer;
+  return answer + on_success()->EatsAtLeast(still_to_find - answer,
+                                            recursion_depth + 1);
+}
+
+
+int NegativeLookaheadChoiceNode:: EatsAtLeast(int still_to_find,
+                                              int recursion_depth) {
+  if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+  // Alternative 0 is the negative lookahead, alternative 1 is what comes
+  // afterwards.
+  RegExpNode* node = alternatives_->at(1).node();
+  return node->EatsAtLeast(still_to_find, recursion_depth + 1);
+}
+
+
+void NegativeLookaheadChoiceNode::GetQuickCheckDetails(
+    QuickCheckDetails* details,
+    RegExpCompiler* compiler,
+    int filled_in,
+    bool not_at_start) {
+  // Alternative 0 is the negative lookahead, alternative 1 is what comes
+  // afterwards.
+  RegExpNode* node = alternatives_->at(1).node();
+  return node->GetQuickCheckDetails(details, compiler, filled_in, not_at_start);
+}
+
+
+int ChoiceNode::EatsAtLeastHelper(int still_to_find,
+                                  int recursion_depth,
+                                  RegExpNode* ignore_this_node) {
+  if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+  int min = 100;
+  int choice_count = alternatives_->length();
+  for (int i = 0; i < choice_count; i++) {
+    RegExpNode* node = alternatives_->at(i).node();
+    if (node == ignore_this_node) continue;
+    int node_eats_at_least = node->EatsAtLeast(still_to_find,
+                                               recursion_depth + 1);
+    if (node_eats_at_least < min) min = node_eats_at_least;
+  }
+  return min;
+}
+
+
+int LoopChoiceNode::EatsAtLeast(int still_to_find, int recursion_depth) {
+  return EatsAtLeastHelper(still_to_find, recursion_depth, loop_node_);
+}
+
+
+int ChoiceNode::EatsAtLeast(int still_to_find, int recursion_depth) {
+  return EatsAtLeastHelper(still_to_find, recursion_depth, NULL);
+}
+
+
+// Takes the left-most 1-bit and smears it out, setting all bits to its right.
+static inline uint32_t SmearBitsRight(uint32_t v) {
+  v |= v >> 1;
+  v |= v >> 2;
+  v |= v >> 4;
+  v |= v >> 8;
+  v |= v >> 16;
+  return v;
+}
+
+
+bool QuickCheckDetails::Rationalize(bool asc) {
+  bool found_useful_op = false;
+  uint32_t char_mask;
+  if (asc) {
+    char_mask = String::kMaxAsciiCharCode;
+  } else {
+    char_mask = String::kMaxUC16CharCode;
+  }
+  mask_ = 0;
+  value_ = 0;
+  int char_shift = 0;
+  for (int i = 0; i < characters_; i++) {
+    Position* pos = &positions_[i];
+    if ((pos->mask & String::kMaxAsciiCharCode) != 0) {
+      found_useful_op = true;
+    }
+    mask_ |= (pos->mask & char_mask) << char_shift;
+    value_ |= (pos->value & char_mask) << char_shift;
+    char_shift += asc ? 8 : 16;
+  }
+  return found_useful_op;
+}
+
+
+bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
+                                Trace* trace,
+                                bool preload_has_checked_bounds,
+                                Label* on_possible_success,
+                                QuickCheckDetails* details,
+                                bool fall_through_on_failure) {
+  if (details->characters() == 0) return false;
+  GetQuickCheckDetails(details, compiler, 0, trace->at_start() == Trace::FALSE);
+  if (details->cannot_match()) return false;
+  if (!details->Rationalize(compiler->ascii())) return false;
+  ASSERT(details->characters() == 1 ||
+         compiler->macro_assembler()->CanReadUnaligned());
+  uint32_t mask = details->mask();
+  uint32_t value = details->value();
+
+  RegExpMacroAssembler* assembler = compiler->macro_assembler();
+
+  if (trace->characters_preloaded() != details->characters()) {
+    assembler->LoadCurrentCharacter(trace->cp_offset(),
+                                    trace->backtrack(),
+                                    !preload_has_checked_bounds,
+                                    details->characters());
+  }
+
+
+  bool need_mask = true;
+
+  if (details->characters() == 1) {
+    // If number of characters preloaded is 1 then we used a byte or 16 bit
+    // load so the value is already masked down.
+    uint32_t char_mask;
+    if (compiler->ascii()) {
+      char_mask = String::kMaxAsciiCharCode;
+    } else {
+      char_mask = String::kMaxUC16CharCode;
+    }
+    if ((mask & char_mask) == char_mask) need_mask = false;
+    mask &= char_mask;
+  } else {
+    // For 2-character preloads in ASCII mode we also use a 16 bit load with
+    // zero extend.
+    if (details->characters() == 2 && compiler->ascii()) {
+      if ((mask & 0xffff) == 0xffff) need_mask = false;
+    } else {
+      if (mask == 0xffffffff) need_mask = false;
+    }
+  }
+
+  if (fall_through_on_failure) {
+    if (need_mask) {
+      assembler->CheckCharacterAfterAnd(value, mask, on_possible_success);
+    } else {
+      assembler->CheckCharacter(value, on_possible_success);
+    }
+  } else {
+    if (need_mask) {
+      assembler->CheckNotCharacterAfterAnd(value, mask, trace->backtrack());
+    } else {
+      assembler->CheckNotCharacter(value, trace->backtrack());
+    }
+  }
+  return true;
+}
+
+
+// Here is the meat of GetQuickCheckDetails (see also the comment on the
+// super-class in the .h file).
+//
+// We iterate along the text object, building up for each character a
+// mask and value that can be used to test for a quick failure to match.
+// The masks and values for the positions will be combined into a single
+// machine word for the current character width in order to be used in
+// generating a quick check.
+void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
+                                    RegExpCompiler* compiler,
+                                    int characters_filled_in,
+                                    bool not_at_start) {
+  ASSERT(characters_filled_in < details->characters());
+  int characters = details->characters();
+  int char_mask;
+  int char_shift;
+  if (compiler->ascii()) {
+    char_mask = String::kMaxAsciiCharCode;
+    char_shift = 8;
+  } else {
+    char_mask = String::kMaxUC16CharCode;
+    char_shift = 16;
+  }
+  for (int k = 0; k < elms_->length(); k++) {
+    TextElement elm = elms_->at(k);
+    if (elm.type == TextElement::ATOM) {
+      Vector<const uc16> quarks = elm.data.u_atom->data();
+      for (int i = 0; i < characters && i < quarks.length(); i++) {
+        QuickCheckDetails::Position* pos =
+            details->positions(characters_filled_in);
+        uc16 c = quarks[i];
+        if (c > char_mask) {
+          // If we expect a non-ASCII character from an ASCII string,
+          // there is no way we can match. Not even case independent
+          // matching can turn an ASCII character into non-ASCII or
+          // vice versa.
+          details->set_cannot_match();
+          pos->determines_perfectly = false;
+          return;
+        }
+        if (compiler->ignore_case()) {
+          unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+          int length = GetCaseIndependentLetters(c, compiler->ascii(), chars);
+          ASSERT(length != 0);  // Can only happen if c > char_mask (see above).
+          if (length == 1) {
+            // This letter has no case equivalents, so it's nice and simple
+            // and the mask-compare will determine definitely whether we have
+            // a match at this character position.
+            pos->mask = char_mask;
+            pos->value = c;
+            pos->determines_perfectly = true;
+          } else {
+            uint32_t common_bits = char_mask;
+            uint32_t bits = chars[0];
+            for (int j = 1; j < length; j++) {
+              uint32_t differing_bits = ((chars[j] & common_bits) ^ bits);
+              common_bits ^= differing_bits;
+              bits &= common_bits;
+            }
+            // If length is 2 and common bits has only one zero in it then
+            // our mask and compare instruction will determine definitely
+            // whether we have a match at this character position.  Otherwise
+            // it can only be an approximate check.
+            uint32_t one_zero = (common_bits | ~char_mask);
+            if (length == 2 && ((~one_zero) & ((~one_zero) - 1)) == 0) {
+              pos->determines_perfectly = true;
+            }
+            pos->mask = common_bits;
+            pos->value = bits;
+          }
+        } else {
+          // Don't ignore case.  Nice simple case where the mask-compare will
+          // determine definitely whether we have a match at this character
+          // position.
+          pos->mask = char_mask;
+          pos->value = c;
+          pos->determines_perfectly = true;
+        }
+        characters_filled_in++;
+        ASSERT(characters_filled_in <= details->characters());
+        if (characters_filled_in == details->characters()) {
+          return;
+        }
+      }
+    } else {
+      QuickCheckDetails::Position* pos =
+          details->positions(characters_filled_in);
+      RegExpCharacterClass* tree = elm.data.u_char_class;
+      ZoneList<CharacterRange>* ranges = tree->ranges();
+      if (tree->is_negated()) {
+        // A quick check uses multi-character mask and compare.  There is no
+        // useful way to incorporate a negative char class into this scheme
+        // so we just conservatively create a mask and value that will always
+        // succeed.
+        pos->mask = 0;
+        pos->value = 0;
+      } else {
+        int first_range = 0;
+        while (ranges->at(first_range).from() > char_mask) {
+          first_range++;
+          if (first_range == ranges->length()) {
+            details->set_cannot_match();
+            pos->determines_perfectly = false;
+            return;
+          }
+        }
+        CharacterRange range = ranges->at(first_range);
+        uc16 from = range.from();
+        uc16 to = range.to();
+        if (to > char_mask) {
+          to = char_mask;
+        }
+        uint32_t differing_bits = (from ^ to);
+        // A mask and compare is only perfect if the differing bits form a
+        // number like 00011111 with one single block of trailing 1s.
+        if ((differing_bits & (differing_bits + 1)) == 0 &&
+             from + differing_bits == to) {
+          pos->determines_perfectly = true;
+        }
+        uint32_t common_bits = ~SmearBitsRight(differing_bits);
+        uint32_t bits = (from & common_bits);
+        for (int i = first_range + 1; i < ranges->length(); i++) {
+          CharacterRange range = ranges->at(i);
+          uc16 from = range.from();
+          uc16 to = range.to();
+          if (from > char_mask) continue;
+          if (to > char_mask) to = char_mask;
+          // Here we are combining more ranges into the mask and compare
+          // value.  With each new range the mask becomes more sparse and
+          // so the chances of a false positive rise.  A character class
+          // with multiple ranges is assumed never to be equivalent to a
+          // mask and compare operation.
+          pos->determines_perfectly = false;
+          uint32_t new_common_bits = (from ^ to);
+          new_common_bits = ~SmearBitsRight(new_common_bits);
+          common_bits &= new_common_bits;
+          bits &= new_common_bits;
+          uint32_t differing_bits = (from & common_bits) ^ bits;
+          common_bits ^= differing_bits;
+          bits &= common_bits;
+        }
+        pos->mask = common_bits;
+        pos->value = bits;
+      }
+      characters_filled_in++;
+      ASSERT(characters_filled_in <= details->characters());
+      if (characters_filled_in == details->characters()) {
+        return;
+      }
+    }
+  }
+  ASSERT(characters_filled_in != details->characters());
+  on_success()-> GetQuickCheckDetails(details,
+                                      compiler,
+                                      characters_filled_in,
+                                      true);
+}
+
+
+void QuickCheckDetails::Clear() {
+  for (int i = 0; i < characters_; i++) {
+    positions_[i].mask = 0;
+    positions_[i].value = 0;
+    positions_[i].determines_perfectly = false;
+  }
+  characters_ = 0;
+}
+
+
+void QuickCheckDetails::Advance(int by, bool ascii) {
+  ASSERT(by >= 0);
+  if (by >= characters_) {
+    Clear();
+    return;
+  }
+  for (int i = 0; i < characters_ - by; i++) {
+    positions_[i] = positions_[by + i];
+  }
+  for (int i = characters_ - by; i < characters_; i++) {
+    positions_[i].mask = 0;
+    positions_[i].value = 0;
+    positions_[i].determines_perfectly = false;
+  }
+  characters_ -= by;
+  // We could change mask_ and value_ here but we would never advance unless
+  // they had already been used in a check and they won't be used again because
+  // it would gain us nothing.  So there's no point.
+}
+
+
+void QuickCheckDetails::Merge(QuickCheckDetails* other, int from_index) {
+  ASSERT(characters_ == other->characters_);
+  if (other->cannot_match_) {
+    return;
+  }
+  if (cannot_match_) {
+    *this = *other;
+    return;
+  }
+  for (int i = from_index; i < characters_; i++) {
+    QuickCheckDetails::Position* pos = positions(i);
+    QuickCheckDetails::Position* other_pos = other->positions(i);
+    if (pos->mask != other_pos->mask ||
+        pos->value != other_pos->value ||
+        !other_pos->determines_perfectly) {
+      // Our mask-compare operation will be approximate unless we have the
+      // exact same operation on both sides of the alternation.
+      pos->determines_perfectly = false;
+    }
+    pos->mask &= other_pos->mask;
+    pos->value &= pos->mask;
+    other_pos->value &= pos->mask;
+    uc16 differing_bits = (pos->value ^ other_pos->value);
+    pos->mask &= ~differing_bits;
+    pos->value &= pos->mask;
+  }
+}
+
+
+class VisitMarker {
+ public:
+  explicit VisitMarker(NodeInfo* info) : info_(info) {
+    ASSERT(!info->visited);
+    info->visited = true;
+  }
+  ~VisitMarker() {
+    info_->visited = false;
+  }
+ private:
+  NodeInfo* info_;
+};
+
+
+void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
+                                          RegExpCompiler* compiler,
+                                          int characters_filled_in,
+                                          bool not_at_start) {
+  if (body_can_be_zero_length_ || info()->visited) return;
+  VisitMarker marker(info());
+  return ChoiceNode::GetQuickCheckDetails(details,
+                                          compiler,
+                                          characters_filled_in,
+                                          not_at_start);
+}
+
+
+void ChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
+                                      RegExpCompiler* compiler,
+                                      int characters_filled_in,
+                                      bool not_at_start) {
+  not_at_start = (not_at_start || not_at_start_);
+  int choice_count = alternatives_->length();
+  ASSERT(choice_count > 0);
+  alternatives_->at(0).node()->GetQuickCheckDetails(details,
+                                                    compiler,
+                                                    characters_filled_in,
+                                                    not_at_start);
+  for (int i = 1; i < choice_count; i++) {
+    QuickCheckDetails new_details(details->characters());
+    RegExpNode* node = alternatives_->at(i).node();
+    node->GetQuickCheckDetails(&new_details, compiler,
+                               characters_filled_in,
+                               not_at_start);
+    // Here we merge the quick match details of the two branches.
+    details->Merge(&new_details, characters_filled_in);
+  }
+}
+
+
+// Check for [0-9A-Z_a-z].
+static void EmitWordCheck(RegExpMacroAssembler* assembler,
+                          Label* word,
+                          Label* non_word,
+                          bool fall_through_on_word) {
+  assembler->CheckCharacterGT('z', non_word);
+  assembler->CheckCharacterLT('0', non_word);
+  assembler->CheckCharacterGT('a' - 1, word);
+  assembler->CheckCharacterLT('9' + 1, word);
+  assembler->CheckCharacterLT('A', non_word);
+  assembler->CheckCharacterLT('Z' + 1, word);
+  if (fall_through_on_word) {
+    assembler->CheckNotCharacter('_', non_word);
+  } else {
+    assembler->CheckCharacter('_', word);
+  }
+}
+
+
+// Emit the code to check for a ^ in multiline mode (1-character lookbehind
+// that matches newline or the start of input).
+static void EmitHat(RegExpCompiler* compiler,
+                    RegExpNode* on_success,
+                    Trace* trace) {
+  RegExpMacroAssembler* assembler = compiler->macro_assembler();
+  // We will be loading the previous character into the current character
+  // register.
+  Trace new_trace(*trace);
+  new_trace.InvalidateCurrentCharacter();
+
+  Label ok;
+  if (new_trace.cp_offset() == 0) {
+    // The start of input counts as a newline in this context, so skip to
+    // ok if we are at the start.
+    assembler->CheckAtStart(&ok);
+  }
+  // We already checked that we are not at the start of input so it must be
+  // OK to load the previous character.
+  assembler->LoadCurrentCharacter(new_trace.cp_offset() -1,
+                                  new_trace.backtrack(),
+                                  false);
+  // Newline means \n, \r, 0x2028 or 0x2029.
+  if (!compiler->ascii()) {
+    assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
+  }
+  assembler->CheckCharacter('\n', &ok);
+  assembler->CheckNotCharacter('\r', new_trace.backtrack());
+  assembler->Bind(&ok);
+  on_success->Emit(compiler, &new_trace);
+}
+
+
+// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
+static void EmitBoundaryCheck(AssertionNode::AssertionNodeType type,
+                              RegExpCompiler* compiler,
+                              RegExpNode* on_success,
+                              Trace* trace) {
+  RegExpMacroAssembler* assembler = compiler->macro_assembler();
+  Label before_non_word;
+  Label before_word;
+  if (trace->characters_preloaded() != 1) {
+    assembler->LoadCurrentCharacter(trace->cp_offset(), &before_non_word);
+  }
+  // Fall through on non-word.
+  EmitWordCheck(assembler, &before_word, &before_non_word, false);
+
+  // We will be loading the previous character into the current character
+  // register.
+  Trace new_trace(*trace);
+  new_trace.InvalidateCurrentCharacter();
+
+  Label ok;
+  Label* boundary;
+  Label* not_boundary;
+  if (type == AssertionNode::AT_BOUNDARY) {
+    boundary = &ok;
+    not_boundary = new_trace.backtrack();
+  } else {
+    not_boundary = &ok;
+    boundary = new_trace.backtrack();
+  }
+
+  // Next character is not a word character.
+  assembler->Bind(&before_non_word);
+  if (new_trace.cp_offset() == 0) {
+    // The start of input counts as a non-word character, so the question is
+    // decided if we are at the start.
+    assembler->CheckAtStart(not_boundary);
+  }
+  // We already checked that we are not at the start of input so it must be
+  // OK to load the previous character.
+  assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1,
+                                  &ok,  // Unused dummy label in this call.
+                                  false);
+  // Fall through on non-word.
+  EmitWordCheck(assembler, boundary, not_boundary, false);
+  assembler->GoTo(not_boundary);
+
+  // Next character is a word character.
+  assembler->Bind(&before_word);
+  if (new_trace.cp_offset() == 0) {
+    // The start of input counts as a non-word character, so the question is
+    // decided if we are at the start.
+    assembler->CheckAtStart(boundary);
+  }
+  // We already checked that we are not at the start of input so it must be
+  // OK to load the previous character.
+  assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1,
+                                  &ok,  // Unused dummy label in this call.
+                                  false);
+  bool fall_through_on_word = (type == AssertionNode::AT_NON_BOUNDARY);
+  EmitWordCheck(assembler, not_boundary, boundary, fall_through_on_word);
+
+  assembler->Bind(&ok);
+
+  on_success->Emit(compiler, &new_trace);
+}
+
+
+void AssertionNode::GetQuickCheckDetails(QuickCheckDetails* details,
+                                         RegExpCompiler* compiler,
+                                         int filled_in,
+                                         bool not_at_start) {
+  if (type_ == AT_START && not_at_start) {
+    details->set_cannot_match();
+    return;
+  }
+  return on_success()->GetQuickCheckDetails(details,
+                                            compiler,
+                                            filled_in,
+                                            not_at_start);
+}
+
+
+void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+  RegExpMacroAssembler* assembler = compiler->macro_assembler();
+  switch (type_) {
+    case AT_END: {
+      Label ok;
+      assembler->CheckPosition(trace->cp_offset(), &ok);
+      assembler->GoTo(trace->backtrack());
+      assembler->Bind(&ok);
+      break;
+    }
+    case AT_START: {
+      if (trace->at_start() == Trace::FALSE) {
+        assembler->GoTo(trace->backtrack());
+        return;
+      }
+      if (trace->at_start() == Trace::UNKNOWN) {
+        assembler->CheckNotAtStart(trace->backtrack());
+        Trace at_start_trace = *trace;
+        at_start_trace.set_at_start(true);
+        on_success()->Emit(compiler, &at_start_trace);
+        return;
+      }
+    }
+    break;
+    case AFTER_NEWLINE:
+      EmitHat(compiler, on_success(), trace);
+      return;
+    case AT_NON_BOUNDARY:
+    case AT_BOUNDARY:
+      EmitBoundaryCheck(type_, compiler, on_success(), trace);
+      return;
+  }
+  on_success()->Emit(compiler, trace);
+}
+
+
+static bool DeterminedAlready(QuickCheckDetails* quick_check, int offset) {
+  if (quick_check == NULL) return false;
+  if (offset >= quick_check->characters()) return false;
+  return quick_check->positions(offset)->determines_perfectly;
+}
+
+
+static void UpdateBoundsCheck(int index, int* checked_up_to) {
+  if (index > *checked_up_to) {
+    *checked_up_to = index;
+  }
+}
+
+
+// We call this repeatedly to generate code for each pass over the text node.
+// The passes are in increasing order of difficulty because we hope one
+// of the first passes will fail in which case we are saved the work of the
+// later passes.  for example for the case independent regexp /%[asdfghjkl]a/
+// we will check the '%' in the first pass, the case independent 'a' in the
+// second pass and the character class in the last pass.
+//
+// The passes are done from right to left, so for example to test for /bar/
+// we will first test for an 'r' with offset 2, then an 'a' with offset 1
+// and then a 'b' with offset 0.  This means we can avoid the end-of-input
+// bounds check most of the time.  In the example we only need to check for
+// end-of-input when loading the putative 'r'.
+//
+// A slight complication involves the fact that the first character may already
+// be fetched into a register by the previous node.  In this case we want to
+// do the test for that character first.  We do this in separate passes.  The
+// 'preloaded' argument indicates that we are doing such a 'pass'.  If such a
+// pass has been performed then subsequent passes will have true in
+// first_element_checked to indicate that that character does not need to be
+// checked again.
+//
+// In addition to all this we are passed a Trace, which can
+// contain an AlternativeGeneration object.  In this AlternativeGeneration
+// object we can see details of any quick check that was already passed in
+// order to get to the code we are now generating.  The quick check can involve
+// loading characters, which means we do not need to recheck the bounds
+// up to the limit the quick check already checked.  In addition the quick
+// check can have involved a mask and compare operation which may simplify
+// or obviate the need for further checks at some character positions.
+void TextNode::TextEmitPass(RegExpCompiler* compiler,
+                            TextEmitPassType pass,
+                            bool preloaded,
+                            Trace* trace,
+                            bool first_element_checked,
+                            int* checked_up_to) {
+  RegExpMacroAssembler* assembler = compiler->macro_assembler();
+  bool ascii = compiler->ascii();
+  Label* backtrack = trace->backtrack();
+  QuickCheckDetails* quick_check = trace->quick_check_performed();
+  int element_count = elms_->length();
+  for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
+    TextElement elm = elms_->at(i);
+    int cp_offset = trace->cp_offset() + elm.cp_offset;
+    if (elm.type == TextElement::ATOM) {
+      Vector<const uc16> quarks = elm.data.u_atom->data();
+      for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
+        if (first_element_checked && i == 0 && j == 0) continue;
+        if (DeterminedAlready(quick_check, elm.cp_offset + j)) continue;
+        EmitCharacterFunction* emit_function = NULL;
+        switch (pass) {
+          case NON_ASCII_MATCH:
+            ASSERT(ascii);
+            if (quarks[j] > String::kMaxAsciiCharCode) {
+              assembler->GoTo(backtrack);
+              return;
+            }
+            break;
+          case NON_LETTER_CHARACTER_MATCH:
+            emit_function = &EmitAtomNonLetter;
+            break;
+          case SIMPLE_CHARACTER_MATCH:
+            emit_function = &EmitSimpleCharacter;
+            break;
+          case CASE_CHARACTER_MATCH:
+            emit_function = &EmitAtomLetter;
+            break;
+          default:
+            break;
+        }
+        if (emit_function != NULL) {
+          bool bound_checked = emit_function(compiler,
+                                             quarks[j],
+                                             backtrack,
+                                             cp_offset + j,
+                                             *checked_up_to < cp_offset + j,
+                                             preloaded);
+          if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
+        }
+      }
+    } else {
+      ASSERT_EQ(elm.type, TextElement::CHAR_CLASS);
+      if (pass == CHARACTER_CLASS_MATCH) {
+        if (first_element_checked && i == 0) continue;
+        if (DeterminedAlready(quick_check, elm.cp_offset)) continue;
+        RegExpCharacterClass* cc = elm.data.u_char_class;
+        EmitCharClass(assembler,
+                      cc,
+                      ascii,
+                      backtrack,
+                      cp_offset,
+                      *checked_up_to < cp_offset,
+                      preloaded);
+        UpdateBoundsCheck(cp_offset, checked_up_to);
+      }
+    }
+  }
+}
+
+
+int TextNode::Length() {
+  TextElement elm = elms_->last();
+  ASSERT(elm.cp_offset >= 0);
+  if (elm.type == TextElement::ATOM) {
+    return elm.cp_offset + elm.data.u_atom->data().length();
+  } else {
+    return elm.cp_offset + 1;
+  }
+}
+
+
+bool TextNode::SkipPass(int int_pass, bool ignore_case) {
+  TextEmitPassType pass = static_cast<TextEmitPassType>(int_pass);
+  if (ignore_case) {
+    return pass == SIMPLE_CHARACTER_MATCH;
+  } else {
+    return pass == NON_LETTER_CHARACTER_MATCH || pass == CASE_CHARACTER_MATCH;
+  }
+}
+
+
+// This generates the code to match a text node.  A text node can contain
+// straight character sequences (possibly to be matched in a case-independent
+// way) and character classes.  For efficiency we do not do this in a single
+// pass from left to right.  Instead we pass over the text node several times,
+// emitting code for some character positions every time.  See the comment on
+// TextEmitPass for details.
+void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+  LimitResult limit_result = LimitVersions(compiler, trace);
+  if (limit_result == DONE) return;
+  ASSERT(limit_result == CONTINUE);
+
+  if (trace->cp_offset() + Length() > RegExpMacroAssembler::kMaxCPOffset) {
+    compiler->SetRegExpTooBig();
+    return;
+  }
+
+  if (compiler->ascii()) {
+    int dummy = 0;
+    TextEmitPass(compiler, NON_ASCII_MATCH, false, trace, false, &dummy);
+  }
+
+  bool first_elt_done = false;
+  int bound_checked_to = trace->cp_offset() - 1;
+  bound_checked_to += trace->bound_checked_up_to();
+
+  // If a character is preloaded into the current character register then
+  // check that now.
+  if (trace->characters_preloaded() == 1) {
+    for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
+      if (!SkipPass(pass, compiler->ignore_case())) {
+        TextEmitPass(compiler,
+                     static_cast<TextEmitPassType>(pass),
+                     true,
+                     trace,
+                     false,
+                     &bound_checked_to);
+      }
+    }
+    first_elt_done = true;
+  }
+
+  for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
+    if (!SkipPass(pass, compiler->ignore_case())) {
+      TextEmitPass(compiler,
+                   static_cast<TextEmitPassType>(pass),
+                   false,
+                   trace,
+                   first_elt_done,
+                   &bound_checked_to);
+    }
+  }
+
+  Trace successor_trace(*trace);
+  successor_trace.set_at_start(false);
+  successor_trace.AdvanceCurrentPositionInTrace(Length(), compiler);
+  RecursionCheck rc(compiler);
+  on_success()->Emit(compiler, &successor_trace);
+}
+
+
+void Trace::InvalidateCurrentCharacter() {
+  characters_preloaded_ = 0;
+}
+
+
+void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
+  ASSERT(by > 0);
+  // We don't have an instruction for shifting the current character register
+  // down or for using a shifted value for anything so lets just forget that
+  // we preloaded any characters into it.
+  characters_preloaded_ = 0;
+  // Adjust the offsets of the quick check performed information.  This
+  // information is used to find out what we already determined about the
+  // characters by means of mask and compare.
+  quick_check_performed_.Advance(by, compiler->ascii());
+  cp_offset_ += by;
+  if (cp_offset_ > RegExpMacroAssembler::kMaxCPOffset) {
+    compiler->SetRegExpTooBig();
+    cp_offset_ = 0;
+  }
+  bound_checked_up_to_ = Max(0, bound_checked_up_to_ - by);
+}
+
+
+void TextNode::MakeCaseIndependent() {
+  int element_count = elms_->length();
+  for (int i = 0; i < element_count; i++) {
+    TextElement elm = elms_->at(i);
+    if (elm.type == TextElement::CHAR_CLASS) {
+      RegExpCharacterClass* cc = elm.data.u_char_class;
+      ZoneList<CharacterRange>* ranges = cc->ranges();
+      int range_count = ranges->length();
+      for (int i = 0; i < range_count; i++) {
+        ranges->at(i).AddCaseEquivalents(ranges);
+      }
+    }
+  }
+}
+
+
+int TextNode::GreedyLoopTextLength() {
+  TextElement elm = elms_->at(elms_->length() - 1);
+  if (elm.type == TextElement::CHAR_CLASS) {
+    return elm.cp_offset + 1;
+  } else {
+    return elm.cp_offset + elm.data.u_atom->data().length();
+  }
+}
+
+
+// Finds the fixed match length of a sequence of nodes that goes from
+// this alternative and back to this choice node.  If there are variable
+// length nodes or other complications in the way then return a sentinel
+// value indicating that a greedy loop cannot be constructed.
+int ChoiceNode::GreedyLoopTextLength(GuardedAlternative* alternative) {
+  int length = 0;
+  RegExpNode* node = alternative->node();
+  // Later we will generate code for all these text nodes using recursion
+  // so we have to limit the max number.
+  int recursion_depth = 0;
+  while (node != this) {
+    if (recursion_depth++ > RegExpCompiler::kMaxRecursion) {
+      return kNodeIsTooComplexForGreedyLoops;
+    }
+    int node_length = node->GreedyLoopTextLength();
+    if (node_length == kNodeIsTooComplexForGreedyLoops) {
+      return kNodeIsTooComplexForGreedyLoops;
+    }
+    length += node_length;
+    SeqRegExpNode* seq_node = static_cast<SeqRegExpNode*>(node);
+    node = seq_node->on_success();
+  }
+  return length;
+}
+
+
+void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) {
+  ASSERT_EQ(loop_node_, NULL);
+  AddAlternative(alt);
+  loop_node_ = alt.node();
+}
+
+
+void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) {
+  ASSERT_EQ(continue_node_, NULL);
+  AddAlternative(alt);
+  continue_node_ = alt.node();
+}
+
+
+void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+  RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+  if (trace->stop_node() == this) {
+    int text_length = GreedyLoopTextLength(&(alternatives_->at(0)));
+    ASSERT(text_length != kNodeIsTooComplexForGreedyLoops);
+    // Update the counter-based backtracking info on the stack.  This is an
+    // optimization for greedy loops (see below).
+    ASSERT(trace->cp_offset() == text_length);
+    macro_assembler->AdvanceCurrentPosition(text_length);
+    macro_assembler->GoTo(trace->loop_label());
+    return;
+  }
+  ASSERT(trace->stop_node() == NULL);
+  if (!trace->is_trivial()) {
+    trace->Flush(compiler, this);
+    return;
+  }
+  ChoiceNode::Emit(compiler, trace);
+}
+
+
+int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler) {
+  int preload_characters = EatsAtLeast(4, 0);
+  if (compiler->macro_assembler()->CanReadUnaligned()) {
+    bool ascii = compiler->ascii();
+    if (ascii) {
+      if (preload_characters > 4) preload_characters = 4;
+      // We can't preload 3 characters because there is no machine instruction
+      // to do that.  We can't just load 4 because we could be reading
+      // beyond the end of the string, which could cause a memory fault.
+      if (preload_characters == 3) preload_characters = 2;
+    } else {
+      if (preload_characters > 2) preload_characters = 2;
+    }
+  } else {
+    if (preload_characters > 1) preload_characters = 1;
+  }
+  return preload_characters;
+}
+
+
+// This class is used when generating the alternatives in a choice node.  It
+// records the way the alternative is being code generated.
+class AlternativeGeneration: public Malloced {
+ public:
+  AlternativeGeneration()
+      : possible_success(),
+        expects_preload(false),
+        after(),
+        quick_check_details() { }
+  Label possible_success;
+  bool expects_preload;
+  Label after;
+  QuickCheckDetails quick_check_details;
+};
+
+
+// Creates a list of AlternativeGenerations.  If the list has a reasonable
+// size then it is on the stack, otherwise the excess is on the heap.
+class AlternativeGenerationList {
+ public:
+  explicit AlternativeGenerationList(int count)
+      : alt_gens_(count) {
+    for (int i = 0; i < count && i < kAFew; i++) {
+      alt_gens_.Add(a_few_alt_gens_ + i);
+    }
+    for (int i = kAFew; i < count; i++) {
+      alt_gens_.Add(new AlternativeGeneration());
+    }
+  }
+  ~AlternativeGenerationList() {
+    for (int i = kAFew; i < alt_gens_.length(); i++) {
+      delete alt_gens_[i];
+      alt_gens_[i] = NULL;
+    }
+  }
+
+  AlternativeGeneration* at(int i) {
+    return alt_gens_[i];
+  }
+ private:
+  static const int kAFew = 10;
+  ZoneList<AlternativeGeneration*> alt_gens_;
+  AlternativeGeneration a_few_alt_gens_[kAFew];
+};
+
+
+/* Code generation for choice nodes.
+ *
+ * We generate quick checks that do a mask and compare to eliminate a
+ * choice.  If the quick check succeeds then it jumps to the continuation to
+ * do slow checks and check subsequent nodes.  If it fails (the common case)
+ * it falls through to the next choice.
+ *
+ * Here is the desired flow graph.  Nodes directly below each other imply
+ * fallthrough.  Alternatives 1 and 2 have quick checks.  Alternative
+ * 3 doesn't have a quick check so we have to call the slow check.
+ * Nodes are marked Qn for quick checks and Sn for slow checks.  The entire
+ * regexp continuation is generated directly after the Sn node, up to the
+ * next GoTo if we decide to reuse some already generated code.  Some
+ * nodes expect preload_characters to be preloaded into the current
+ * character register.  R nodes do this preloading.  Vertices are marked
+ * F for failures and S for success (possible success in the case of quick
+ * nodes).  L, V, < and > are used as arrow heads.
+ *
+ * ----------> R
+ *             |
+ *             V
+ *            Q1 -----> S1
+ *             |   S   /
+ *            F|      /
+ *             |    F/
+ *             |    /
+ *             |   R
+ *             |  /
+ *             V L
+ *            Q2 -----> S2
+ *             |   S   /
+ *            F|      /
+ *             |    F/
+ *             |    /
+ *             |   R
+ *             |  /
+ *             V L
+ *            S3
+ *             |
+ *            F|
+ *             |
+ *             R
+ *             |
+ * backtrack   V
+ * <----------Q4
+ *   \    F    |
+ *    \        |S
+ *     \   F   V
+ *      \-----S4
+ *
+ * For greedy loops we reverse our expectation and expect to match rather
+ * than fail. Therefore we want the loop code to look like this (U is the
+ * unwind code that steps back in the greedy loop).  The following alternatives
+ * look the same as above.
+ *              _____
+ *             /     \
+ *             V     |
+ * ----------> S1    |
+ *            /|     |
+ *           / |S    |
+ *         F/  \_____/
+ *         /
+ *        |<-----------
+ *        |            \
+ *        V             \
+ *        Q2 ---> S2     \
+ *        |  S   /       |
+ *       F|     /        |
+ *        |   F/         |
+ *        |   /          |
+ *        |  R           |
+ *        | /            |
+ *   F    VL             |
+ * <------U              |
+ * back   |S             |
+ *        \______________/
+ */
+
+
+void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+  RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+  int choice_count = alternatives_->length();
+#ifdef DEBUG
+  for (int i = 0; i < choice_count - 1; i++) {
+    GuardedAlternative alternative = alternatives_->at(i);
+    ZoneList<Guard*>* guards = alternative.guards();
+    int guard_count = (guards == NULL) ? 0 : guards->length();
+    for (int j = 0; j < guard_count; j++) {
+      ASSERT(!trace->mentions_reg(guards->at(j)->reg()));
+    }
+  }
+#endif
+
+  LimitResult limit_result = LimitVersions(compiler, trace);
+  if (limit_result == DONE) return;
+  ASSERT(limit_result == CONTINUE);
+
+  int new_flush_budget = trace->flush_budget() / choice_count;
+  if (trace->flush_budget() == 0 && trace->actions() != NULL) {
+    trace->Flush(compiler, this);
+    return;
+  }
+
+  RecursionCheck rc(compiler);
+
+  Trace* current_trace = trace;
+
+  int text_length = GreedyLoopTextLength(&(alternatives_->at(0)));
+  bool greedy_loop = false;
+  Label greedy_loop_label;
+  Trace counter_backtrack_trace;
+  counter_backtrack_trace.set_backtrack(&greedy_loop_label);
+  if (not_at_start()) counter_backtrack_trace.set_at_start(false);
+
+  if (choice_count > 1 && text_length != kNodeIsTooComplexForGreedyLoops) {
+    // Here we have special handling for greedy loops containing only text nodes
+    // and other simple nodes.  These are handled by pushing the current
+    // position on the stack and then incrementing the current position each
+    // time around the switch.  On backtrack we decrement the current position
+    // and check it against the pushed value.  This avoids pushing backtrack
+    // information for each iteration of the loop, which could take up a lot of
+    // space.
+    greedy_loop = true;
+    ASSERT(trace->stop_node() == NULL);
+    macro_assembler->PushCurrentPosition();
+    current_trace = &counter_backtrack_trace;
+    Label greedy_match_failed;
+    Trace greedy_match_trace;
+    if (not_at_start()) greedy_match_trace.set_at_start(false);
+    greedy_match_trace.set_backtrack(&greedy_match_failed);
+    Label loop_label;
+    macro_assembler->Bind(&loop_label);
+    greedy_match_trace.set_stop_node(this);
+    greedy_match_trace.set_loop_label(&loop_label);
+    alternatives_->at(0).node()->Emit(compiler, &greedy_match_trace);
+    macro_assembler->Bind(&greedy_match_failed);
+  }
+
+  Label second_choice;  // For use in greedy matches.
+  macro_assembler->Bind(&second_choice);
+
+  int first_normal_choice = greedy_loop ? 1 : 0;
+
+  int preload_characters = CalculatePreloadCharacters(compiler);
+  bool preload_is_current =
+      (current_trace->characters_preloaded() == preload_characters);
+  bool preload_has_checked_bounds = preload_is_current;
+
+  AlternativeGenerationList alt_gens(choice_count);
+
+  // For now we just call all choices one after the other.  The idea ultimately
+  // is to use the Dispatch table to try only the relevant ones.
+  for (int i = first_normal_choice; i < choice_count; i++) {
+    GuardedAlternative alternative = alternatives_->at(i);
+    AlternativeGeneration* alt_gen = alt_gens.at(i);
+    alt_gen->quick_check_details.set_characters(preload_characters);
+    ZoneList<Guard*>* guards = alternative.guards();
+    int guard_count = (guards == NULL) ? 0 : guards->length();
+    Trace new_trace(*current_trace);
+    new_trace.set_characters_preloaded(preload_is_current ?
+                                         preload_characters :
+                                         0);
+    if (preload_has_checked_bounds) {
+      new_trace.set_bound_checked_up_to(preload_characters);
+    }
+    new_trace.quick_check_performed()->Clear();
+    if (not_at_start_) new_trace.set_at_start(Trace::FALSE);
+    alt_gen->expects_preload = preload_is_current;
+    bool generate_full_check_inline = false;
+    if (FLAG_regexp_optimization &&
+        try_to_emit_quick_check_for_alternative(i) &&
+        alternative.node()->EmitQuickCheck(compiler,
+                                           &new_trace,
+                                           preload_has_checked_bounds,
+                                           &alt_gen->possible_success,
+                                           &alt_gen->quick_check_details,
+                                           i < choice_count - 1)) {
+      // Quick check was generated for this choice.
+      preload_is_current = true;
+      preload_has_checked_bounds = true;
+      // On the last choice in the ChoiceNode we generated the quick
+      // check to fall through on possible success.  So now we need to
+      // generate the full check inline.
+      if (i == choice_count - 1) {
+        macro_assembler->Bind(&alt_gen->possible_success);
+        new_trace.set_quick_check_performed(&alt_gen->quick_check_details);
+        new_trace.set_characters_preloaded(preload_characters);
+        new_trace.set_bound_checked_up_to(preload_characters);
+        generate_full_check_inline = true;
+      }
+    } else if (alt_gen->quick_check_details.cannot_match()) {
+      if (i == choice_count - 1 && !greedy_loop) {
+        macro_assembler->GoTo(trace->backtrack());
+      }
+      continue;
+    } else {
+      // No quick check was generated.  Put the full code here.
+      // If this is not the first choice then there could be slow checks from
+      // previous cases that go here when they fail.  There's no reason to
+      // insist that they preload characters since the slow check we are about
+      // to generate probably can't use it.
+      if (i != first_normal_choice) {
+        alt_gen->expects_preload = false;
+        new_trace.set_characters_preloaded(0);
+      }
+      if (i < choice_count - 1) {
+        new_trace.set_backtrack(&alt_gen->after);
+      }
+      generate_full_check_inline = true;
+    }
+    if (generate_full_check_inline) {
+      if (new_trace.actions() != NULL) {
+        new_trace.set_flush_budget(new_flush_budget);
+      }
+      for (int j = 0; j < guard_count; j++) {
+        GenerateGuard(macro_assembler, guards->at(j), &new_trace);
+      }
+      alternative.node()->Emit(compiler, &new_trace);
+      preload_is_current = false;
+    }
+    macro_assembler->Bind(&alt_gen->after);
+  }
+  if (greedy_loop) {
+    macro_assembler->Bind(&greedy_loop_label);
+    // If we have unwound to the bottom then backtrack.
+    macro_assembler->CheckGreedyLoop(trace->backtrack());
+    // Otherwise try the second priority at an earlier position.
+    macro_assembler->AdvanceCurrentPosition(-text_length);
+    macro_assembler->GoTo(&second_choice);
+  }
+
+  // At this point we need to generate slow checks for the alternatives where
+  // the quick check was inlined.  We can recognize these because the associated
+  // label was bound.
+  for (int i = first_normal_choice; i < choice_count - 1; i++) {
+    AlternativeGeneration* alt_gen = alt_gens.at(i);
+    Trace new_trace(*current_trace);
+    // If there are actions to be flushed we have to limit how many times
+    // they are flushed.  Take the budget of the parent trace and distribute
+    // it fairly amongst the children.
+    if (new_trace.actions() != NULL) {
+      new_trace.set_flush_budget(new_flush_budget);
+    }
+    EmitOutOfLineContinuation(compiler,
+                              &new_trace,
+                              alternatives_->at(i),
+                              alt_gen,
+                              preload_characters,
+                              alt_gens.at(i + 1)->expects_preload);
+  }
+}
+
+
+void ChoiceNode::EmitOutOfLineContinuation(RegExpCompiler* compiler,
+                                           Trace* trace,
+                                           GuardedAlternative alternative,
+                                           AlternativeGeneration* alt_gen,
+                                           int preload_characters,
+                                           bool next_expects_preload) {
+  if (!alt_gen->possible_success.is_linked()) return;
+
+  RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+  macro_assembler->Bind(&alt_gen->possible_success);
+  Trace out_of_line_trace(*trace);
+  out_of_line_trace.set_characters_preloaded(preload_characters);
+  out_of_line_trace.set_quick_check_performed(&alt_gen->quick_check_details);
+  if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE);
+  ZoneList<Guard*>* guards = alternative.guards();
+  int guard_count = (guards == NULL) ? 0 : guards->length();
+  if (next_expects_preload) {
+    Label reload_current_char;
+    out_of_line_trace.set_backtrack(&reload_current_char);
+    for (int j = 0; j < guard_count; j++) {
+      GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
+    }
+    alternative.node()->Emit(compiler, &out_of_line_trace);
+    macro_assembler->Bind(&reload_current_char);
+    // Reload the current character, since the next quick check expects that.
+    // We don't need to check bounds here because we only get into this
+    // code through a quick check which already did the checked load.
+    macro_assembler->LoadCurrentCharacter(trace->cp_offset(),
+                                          NULL,
+                                          false,
+                                          preload_characters);
+    macro_assembler->GoTo(&(alt_gen->after));
+  } else {
+    out_of_line_trace.set_backtrack(&(alt_gen->after));
+    for (int j = 0; j < guard_count; j++) {
+      GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
+    }
+    alternative.node()->Emit(compiler, &out_of_line_trace);
+  }
+}
+
+
+void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+  RegExpMacroAssembler* assembler = compiler->macro_assembler();
+  LimitResult limit_result = LimitVersions(compiler, trace);
+  if (limit_result == DONE) return;
+  ASSERT(limit_result == CONTINUE);
+
+  RecursionCheck rc(compiler);
+
+  switch (type_) {
+    case STORE_POSITION: {
+      Trace::DeferredCapture
+          new_capture(data_.u_position_register.reg,
+                      data_.u_position_register.is_capture,
+                      trace);
+      Trace new_trace = *trace;
+      new_trace.add_action(&new_capture);
+      on_success()->Emit(compiler, &new_trace);
+      break;
+    }
+    case INCREMENT_REGISTER: {
+      Trace::DeferredIncrementRegister
+          new_increment(data_.u_increment_register.reg);
+      Trace new_trace = *trace;
+      new_trace.add_action(&new_increment);
+      on_success()->Emit(compiler, &new_trace);
+      break;
+    }
+    case SET_REGISTER: {
+      Trace::DeferredSetRegister
+          new_set(data_.u_store_register.reg, data_.u_store_register.value);
+      Trace new_trace = *trace;
+      new_trace.add_action(&new_set);
+      on_success()->Emit(compiler, &new_trace);
+      break;
+    }
+    case CLEAR_CAPTURES: {
+      Trace::DeferredClearCaptures
+        new_capture(Interval(data_.u_clear_captures.range_from,
+                             data_.u_clear_captures.range_to));
+      Trace new_trace = *trace;
+      new_trace.add_action(&new_capture);
+      on_success()->Emit(compiler, &new_trace);
+      break;
+    }
+    case BEGIN_SUBMATCH:
+      if (!trace->is_trivial()) {
+        trace->Flush(compiler, this);
+      } else {
+        assembler->WriteCurrentPositionToRegister(
+            data_.u_submatch.current_position_register, 0);
+        assembler->WriteStackPointerToRegister(
+            data_.u_submatch.stack_pointer_register);
+        on_success()->Emit(compiler, trace);
+      }
+      break;
+    case EMPTY_MATCH_CHECK: {
+      int start_pos_reg = data_.u_empty_match_check.start_register;
+      int stored_pos = 0;
+      int rep_reg = data_.u_empty_match_check.repetition_register;
+      bool has_minimum = (rep_reg != RegExpCompiler::kNoRegister);
+      bool know_dist = trace->GetStoredPosition(start_pos_reg, &stored_pos);
+      if (know_dist && !has_minimum && stored_pos == trace->cp_offset()) {
+        // If we know we haven't advanced and there is no minimum we
+        // can just backtrack immediately.
+        assembler->GoTo(trace->backtrack());
+      } else if (know_dist && stored_pos < trace->cp_offset()) {
+        // If we know we've advanced we can generate the continuation
+        // immediately.
+        on_success()->Emit(compiler, trace);
+      } else if (!trace->is_trivial()) {
+        trace->Flush(compiler, this);
+      } else {
+        Label skip_empty_check;
+        // If we have a minimum number of repetitions we check the current
+        // number first and skip the empty check if it's not enough.
+        if (has_minimum) {
+          int limit = data_.u_empty_match_check.repetition_limit;
+          assembler->IfRegisterLT(rep_reg, limit, &skip_empty_check);
+        }
+        // If the match is empty we bail out, otherwise we fall through
+        // to the on-success continuation.
+        assembler->IfRegisterEqPos(data_.u_empty_match_check.start_register,
+                                   trace->backtrack());
+        assembler->Bind(&skip_empty_check);
+        on_success()->Emit(compiler, trace);
+      }
+      break;
+    }
+    case POSITIVE_SUBMATCH_SUCCESS: {
+      if (!trace->is_trivial()) {
+        trace->Flush(compiler, this);
+        return;
+      }
+      assembler->ReadCurrentPositionFromRegister(
+          data_.u_submatch.current_position_register);
+      assembler->ReadStackPointerFromRegister(
+          data_.u_submatch.stack_pointer_register);
+      int clear_register_count = data_.u_submatch.clear_register_count;
+      if (clear_register_count == 0) {
+        on_success()->Emit(compiler, trace);
+        return;
+      }
+      int clear_registers_from = data_.u_submatch.clear_register_from;
+      Label clear_registers_backtrack;
+      Trace new_trace = *trace;
+      new_trace.set_backtrack(&clear_registers_backtrack);
+      on_success()->Emit(compiler, &new_trace);
+
+      assembler->Bind(&clear_registers_backtrack);
+      int clear_registers_to = clear_registers_from + clear_register_count - 1;
+      assembler->ClearRegisters(clear_registers_from, clear_registers_to);
+
+      ASSERT(trace->backtrack() == NULL);
+      assembler->Backtrack();
+      return;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+  RegExpMacroAssembler* assembler = compiler->macro_assembler();
+  if (!trace->is_trivial()) {
+    trace->Flush(compiler, this);
+    return;
+  }
+
+  LimitResult limit_result = LimitVersions(compiler, trace);
+  if (limit_result == DONE) return;
+  ASSERT(limit_result == CONTINUE);
+
+  RecursionCheck rc(compiler);
+
+  ASSERT_EQ(start_reg_ + 1, end_reg_);
+  if (compiler->ignore_case()) {
+    assembler->CheckNotBackReferenceIgnoreCase(start_reg_,
+                                               trace->backtrack());
+  } else {
+    assembler->CheckNotBackReference(start_reg_, trace->backtrack());
+  }
+  on_success()->Emit(compiler, trace);
+}
+
+
+// -------------------------------------------------------------------
+// Dot/dotty output
+
+
+#ifdef DEBUG
+
+
+class DotPrinter: public NodeVisitor {
+ public:
+  explicit DotPrinter(bool ignore_case)
+      : ignore_case_(ignore_case),
+        stream_(&alloc_) { }
+  void PrintNode(const char* label, RegExpNode* node);
+  void Visit(RegExpNode* node);
+  void PrintAttributes(RegExpNode* from);
+  StringStream* stream() { return &stream_; }
+  void PrintOnFailure(RegExpNode* from, RegExpNode* to);
+#define DECLARE_VISIT(Type)                                          \
+  virtual void Visit##Type(Type##Node* that);
+FOR_EACH_NODE_TYPE(DECLARE_VISIT)
+#undef DECLARE_VISIT
+ private:
+  bool ignore_case_;
+  HeapStringAllocator alloc_;
+  StringStream stream_;
+};
+
+
+void DotPrinter::PrintNode(const char* label, RegExpNode* node) {
+  stream()->Add("digraph G {\n  graph [label=\"");
+  for (int i = 0; label[i]; i++) {
+    switch (label[i]) {
+      case '\\':
+        stream()->Add("\\\\");
+        break;
+      case '"':
+        stream()->Add("\"");
+        break;
+      default:
+        stream()->Put(label[i]);
+        break;
+    }
+  }
+  stream()->Add("\"];\n");
+  Visit(node);
+  stream()->Add("}\n");
+  printf("%s", *(stream()->ToCString()));
+}
+
+
+void DotPrinter::Visit(RegExpNode* node) {
+  if (node->info()->visited) return;
+  node->info()->visited = true;
+  node->Accept(this);
+}
+
+
+void DotPrinter::PrintOnFailure(RegExpNode* from, RegExpNode* on_failure) {
+  stream()->Add("  n%p -> n%p [style=dotted];\n", from, on_failure);
+  Visit(on_failure);
+}
+
+
+class TableEntryBodyPrinter {
+ public:
+  TableEntryBodyPrinter(StringStream* stream, ChoiceNode* choice)
+      : stream_(stream), choice_(choice) { }
+  void Call(uc16 from, DispatchTable::Entry entry) {
+    OutSet* out_set = entry.out_set();
+    for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
+      if (out_set->Get(i)) {
+        stream()->Add("    n%p:s%io%i -> n%p;\n",
+                      choice(),
+                      from,
+                      i,
+                      choice()->alternatives()->at(i).node());
+      }
+    }
+  }
+ private:
+  StringStream* stream() { return stream_; }
+  ChoiceNode* choice() { return choice_; }
+  StringStream* stream_;
+  ChoiceNode* choice_;
+};
+
+
+class TableEntryHeaderPrinter {
+ public:
+  explicit TableEntryHeaderPrinter(StringStream* stream)
+      : first_(true), stream_(stream) { }
+  void Call(uc16 from, DispatchTable::Entry entry) {
+    if (first_) {
+      first_ = false;
+    } else {
+      stream()->Add("|");
+    }
+    stream()->Add("{\\%k-\\%k|{", from, entry.to());
+    OutSet* out_set = entry.out_set();
+    int priority = 0;
+    for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
+      if (out_set->Get(i)) {
+        if (priority > 0) stream()->Add("|");
+        stream()->Add("<s%io%i> %i", from, i, priority);
+        priority++;
+      }
+    }
+    stream()->Add("}}");
+  }
+ private:
+  bool first_;
+  StringStream* stream() { return stream_; }
+  StringStream* stream_;
+};
+
+
+class AttributePrinter {
+ public:
+  explicit AttributePrinter(DotPrinter* out)
+      : out_(out), first_(true) { }
+  void PrintSeparator() {
+    if (first_) {
+      first_ = false;
+    } else {
+      out_->stream()->Add("|");
+    }
+  }
+  void PrintBit(const char* name, bool value) {
+    if (!value) return;
+    PrintSeparator();
+    out_->stream()->Add("{%s}", name);
+  }
+  void PrintPositive(const char* name, int value) {
+    if (value < 0) return;
+    PrintSeparator();
+    out_->stream()->Add("{%s|%x}", name, value);
+  }
+ private:
+  DotPrinter* out_;
+  bool first_;
+};
+
+
+void DotPrinter::PrintAttributes(RegExpNode* that) {
+  stream()->Add("  a%p [shape=Mrecord, color=grey, fontcolor=grey, "
+                "margin=0.1, fontsize=10, label=\"{",
+                that);
+  AttributePrinter printer(this);
+  NodeInfo* info = that->info();
+  printer.PrintBit("NI", info->follows_newline_interest);
+  printer.PrintBit("WI", info->follows_word_interest);
+  printer.PrintBit("SI", info->follows_start_interest);
+  Label* label = that->label();
+  if (label->is_bound())
+    printer.PrintPositive("@", label->pos());
+  stream()->Add("}\"];\n");
+  stream()->Add("  a%p -> n%p [style=dashed, color=grey, "
+                "arrowhead=none];\n", that, that);
+}
+
+
+static const bool kPrintDispatchTable = false;
+void DotPrinter::VisitChoice(ChoiceNode* that) {
+  if (kPrintDispatchTable) {
+    stream()->Add("  n%p [shape=Mrecord, label=\"", that);
+    TableEntryHeaderPrinter header_printer(stream());
+    that->GetTable(ignore_case_)->ForEach(&header_printer);
+    stream()->Add("\"]\n", that);
+    PrintAttributes(that);
+    TableEntryBodyPrinter body_printer(stream(), that);
+    that->GetTable(ignore_case_)->ForEach(&body_printer);
+  } else {
+    stream()->Add("  n%p [shape=Mrecord, label=\"?\"];\n", that);
+    for (int i = 0; i < that->alternatives()->length(); i++) {
+      GuardedAlternative alt = that->alternatives()->at(i);
+      stream()->Add("  n%p -> n%p;\n", that, alt.node());
+    }
+  }
+  for (int i = 0; i < that->alternatives()->length(); i++) {
+    GuardedAlternative alt = that->alternatives()->at(i);
+    alt.node()->Accept(this);
+  }
+}
+
+
+void DotPrinter::VisitText(TextNode* that) {
+  stream()->Add("  n%p [label=\"", that);
+  for (int i = 0; i < that->elements()->length(); i++) {
+    if (i > 0) stream()->Add(" ");
+    TextElement elm = that->elements()->at(i);
+    switch (elm.type) {
+      case TextElement::ATOM: {
+        stream()->Add("'%w'", elm.data.u_atom->data());
+        break;
+      }
+      case TextElement::CHAR_CLASS: {
+        RegExpCharacterClass* node = elm.data.u_char_class;
+        stream()->Add("[");
+        if (node->is_negated())
+          stream()->Add("^");
+        for (int j = 0; j < node->ranges()->length(); j++) {
+          CharacterRange range = node->ranges()->at(j);
+          stream()->Add("%k-%k", range.from(), range.to());
+        }
+        stream()->Add("]");
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+  }
+  stream()->Add("\", shape=box, peripheries=2];\n");
+  PrintAttributes(that);
+  stream()->Add("  n%p -> n%p;\n", that, that->on_success());
+  Visit(that->on_success());
+}
+
+
+void DotPrinter::VisitBackReference(BackReferenceNode* that) {
+  stream()->Add("  n%p [label=\"$%i..$%i\", shape=doubleoctagon];\n",
+                that,
+                that->start_register(),
+                that->end_register());
+  PrintAttributes(that);
+  stream()->Add("  n%p -> n%p;\n", that, that->on_success());
+  Visit(that->on_success());
+}
+
+
+void DotPrinter::VisitEnd(EndNode* that) {
+  stream()->Add("  n%p [style=bold, shape=point];\n", that);
+  PrintAttributes(that);
+}
+
+
+void DotPrinter::VisitAssertion(AssertionNode* that) {
+  stream()->Add("  n%p [", that);
+  switch (that->type()) {
+    case AssertionNode::AT_END:
+      stream()->Add("label=\"$\", shape=septagon");
+      break;
+    case AssertionNode::AT_START:
+      stream()->Add("label=\"^\", shape=septagon");
+      break;
+    case AssertionNode::AT_BOUNDARY:
+      stream()->Add("label=\"\\b\", shape=septagon");
+      break;
+    case AssertionNode::AT_NON_BOUNDARY:
+      stream()->Add("label=\"\\B\", shape=septagon");
+      break;
+    case AssertionNode::AFTER_NEWLINE:
+      stream()->Add("label=\"(?<=\\n)\", shape=septagon");
+      break;
+  }
+  stream()->Add("];\n");
+  PrintAttributes(that);
+  RegExpNode* successor = that->on_success();
+  stream()->Add("  n%p -> n%p;\n", that, successor);
+  Visit(successor);
+}
+
+
+void DotPrinter::VisitAction(ActionNode* that) {
+  stream()->Add("  n%p [", that);
+  switch (that->type_) {
+    case ActionNode::SET_REGISTER:
+      stream()->Add("label=\"$%i:=%i\", shape=octagon",
+                    that->data_.u_store_register.reg,
+                    that->data_.u_store_register.value);
+      break;
+    case ActionNode::INCREMENT_REGISTER:
+      stream()->Add("label=\"$%i++\", shape=octagon",
+                    that->data_.u_increment_register.reg);
+      break;
+    case ActionNode::STORE_POSITION:
+      stream()->Add("label=\"$%i:=$pos\", shape=octagon",
+                    that->data_.u_position_register.reg);
+      break;
+    case ActionNode::BEGIN_SUBMATCH:
+      stream()->Add("label=\"$%i:=$pos,begin\", shape=septagon",
+                    that->data_.u_submatch.current_position_register);
+      break;
+    case ActionNode::POSITIVE_SUBMATCH_SUCCESS:
+      stream()->Add("label=\"escape\", shape=septagon");
+      break;
+    case ActionNode::EMPTY_MATCH_CHECK:
+      stream()->Add("label=\"$%i=$pos?,$%i<%i?\", shape=septagon",
+                    that->data_.u_empty_match_check.start_register,
+                    that->data_.u_empty_match_check.repetition_register,
+                    that->data_.u_empty_match_check.repetition_limit);
+      break;
+    case ActionNode::CLEAR_CAPTURES: {
+      stream()->Add("label=\"clear $%i to $%i\", shape=septagon",
+                    that->data_.u_clear_captures.range_from,
+                    that->data_.u_clear_captures.range_to);
+      break;
+    }
+  }
+  stream()->Add("];\n");
+  PrintAttributes(that);
+  RegExpNode* successor = that->on_success();
+  stream()->Add("  n%p -> n%p;\n", that, successor);
+  Visit(successor);
+}
+
+
+class DispatchTableDumper {
+ public:
+  explicit DispatchTableDumper(StringStream* stream) : stream_(stream) { }
+  void Call(uc16 key, DispatchTable::Entry entry);
+  StringStream* stream() { return stream_; }
+ private:
+  StringStream* stream_;
+};
+
+
+void DispatchTableDumper::Call(uc16 key, DispatchTable::Entry entry) {
+  stream()->Add("[%k-%k]: {", key, entry.to());
+  OutSet* set = entry.out_set();
+  bool first = true;
+  for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
+    if (set->Get(i)) {
+      if (first) {
+        first = false;
+      } else {
+        stream()->Add(", ");
+      }
+      stream()->Add("%i", i);
+    }
+  }
+  stream()->Add("}\n");
+}
+
+
+void DispatchTable::Dump() {
+  HeapStringAllocator alloc;
+  StringStream stream(&alloc);
+  DispatchTableDumper dumper(&stream);
+  tree()->ForEach(&dumper);
+  OS::PrintError("%s", *stream.ToCString());
+}
+
+
+void RegExpEngine::DotPrint(const char* label,
+                            RegExpNode* node,
+                            bool ignore_case) {
+  DotPrinter printer(ignore_case);
+  printer.PrintNode(label, node);
+}
+
+
+#endif  // DEBUG
+
+
+// -------------------------------------------------------------------
+// Tree to graph conversion
+
+static const int kSpaceRangeCount = 20;
+static const int kSpaceRangeAsciiCount = 4;
+static const uc16 kSpaceRanges[kSpaceRangeCount] = { 0x0009, 0x000D, 0x0020,
+    0x0020, 0x00A0, 0x00A0, 0x1680, 0x1680, 0x180E, 0x180E, 0x2000, 0x200A,
+    0x2028, 0x2029, 0x202F, 0x202F, 0x205F, 0x205F, 0x3000, 0x3000 };
+
+static const int kWordRangeCount = 8;
+static const uc16 kWordRanges[kWordRangeCount] = { '0', '9', 'A', 'Z', '_',
+    '_', 'a', 'z' };
+
+static const int kDigitRangeCount = 2;
+static const uc16 kDigitRanges[kDigitRangeCount] = { '0', '9' };
+
+static const int kLineTerminatorRangeCount = 6;
+static const uc16 kLineTerminatorRanges[kLineTerminatorRangeCount] = { 0x000A,
+    0x000A, 0x000D, 0x000D, 0x2028, 0x2029 };
+
+RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
+                               RegExpNode* on_success) {
+  ZoneList<TextElement>* elms = new ZoneList<TextElement>(1);
+  elms->Add(TextElement::Atom(this));
+  return new TextNode(elms, on_success);
+}
+
+
+RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
+                               RegExpNode* on_success) {
+  return new TextNode(elements(), on_success);
+}
+
+static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
+                                 const uc16* special_class,
+                                 int length) {
+  ASSERT(ranges->length() != 0);
+  ASSERT(length != 0);
+  ASSERT(special_class[0] != 0);
+  if (ranges->length() != (length >> 1) + 1) {
+    return false;
+  }
+  CharacterRange range = ranges->at(0);
+  if (range.from() != 0) {
+    return false;
+  }
+  for (int i = 0; i < length; i += 2) {
+    if (special_class[i] != (range.to() + 1)) {
+      return false;
+    }
+    range = ranges->at((i >> 1) + 1);
+    if (special_class[i+1] != range.from() - 1) {
+      return false;
+    }
+  }
+  if (range.to() != 0xffff) {
+    return false;
+  }
+  return true;
+}
+
+
+static bool CompareRanges(ZoneList<CharacterRange>* ranges,
+                          const uc16* special_class,
+                          int length) {
+  if (ranges->length() * 2 != length) {
+    return false;
+  }
+  for (int i = 0; i < length; i += 2) {
+    CharacterRange range = ranges->at(i >> 1);
+    if (range.from() != special_class[i] || range.to() != special_class[i+1]) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+bool RegExpCharacterClass::is_standard() {
+  // TODO(lrn): Remove need for this function, by not throwing away information
+  // along the way.
+  if (is_negated_) {
+    return false;
+  }
+  if (set_.is_standard()) {
+    return true;
+  }
+  if (CompareRanges(set_.ranges(), kSpaceRanges, kSpaceRangeCount)) {
+    set_.set_standard_set_type('s');
+    return true;
+  }
+  if (CompareInverseRanges(set_.ranges(), kSpaceRanges, kSpaceRangeCount)) {
+    set_.set_standard_set_type('S');
+    return true;
+  }
+  if (CompareInverseRanges(set_.ranges(),
+                           kLineTerminatorRanges,
+                           kLineTerminatorRangeCount)) {
+    set_.set_standard_set_type('.');
+    return true;
+  }
+  return false;
+}
+
+
+RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
+                                         RegExpNode* on_success) {
+  return new TextNode(this, on_success);
+}
+
+
+RegExpNode* RegExpDisjunction::ToNode(RegExpCompiler* compiler,
+                                      RegExpNode* on_success) {
+  ZoneList<RegExpTree*>* alternatives = this->alternatives();
+  int length = alternatives->length();
+  ChoiceNode* result = new ChoiceNode(length);
+  for (int i = 0; i < length; i++) {
+    GuardedAlternative alternative(alternatives->at(i)->ToNode(compiler,
+                                                               on_success));
+    result->AddAlternative(alternative);
+  }
+  return result;
+}
+
+
+RegExpNode* RegExpQuantifier::ToNode(RegExpCompiler* compiler,
+                                     RegExpNode* on_success) {
+  return ToNode(min(),
+                max(),
+                is_greedy(),
+                body(),
+                compiler,
+                on_success);
+}
+
+
+RegExpNode* RegExpQuantifier::ToNode(int min,
+                                     int max,
+                                     bool is_greedy,
+                                     RegExpTree* body,
+                                     RegExpCompiler* compiler,
+                                     RegExpNode* on_success,
+                                     bool not_at_start) {
+  // x{f, t} becomes this:
+  //
+  //             (r++)<-.
+  //               |     `
+  //               |     (x)
+  //               v     ^
+  //      (r=0)-->(?)---/ [if r < t]
+  //               |
+  //   [if r >= f] \----> ...
+  //
+
+  // 15.10.2.5 RepeatMatcher algorithm.
+  // The parser has already eliminated the case where max is 0.  In the case
+  // where max_match is zero the parser has removed the quantifier if min was
+  // > 0 and removed the atom if min was 0.  See AddQuantifierToAtom.
+
+  // If we know that we cannot match zero length then things are a little
+  // simpler since we don't need to make the special zero length match check
+  // from step 2.1.  If the min and max are small we can unroll a little in
+  // this case.
+  static const int kMaxUnrolledMinMatches = 3;  // Unroll (foo)+ and (foo){3,}
+  static const int kMaxUnrolledMaxMatches = 3;  // Unroll (foo)? and (foo){x,3}
+  if (max == 0) return on_success;  // This can happen due to recursion.
+  bool body_can_be_empty = (body->min_match() == 0);
+  int body_start_reg = RegExpCompiler::kNoRegister;
+  Interval capture_registers = body->CaptureRegisters();
+  bool needs_capture_clearing = !capture_registers.is_empty();
+  if (body_can_be_empty) {
+    body_start_reg = compiler->AllocateRegister();
+  } else if (FLAG_regexp_optimization && !needs_capture_clearing) {
+    // Only unroll if there are no captures and the body can't be
+    // empty.
+    if (min > 0 && min <= kMaxUnrolledMinMatches) {
+      int new_max = (max == kInfinity) ? max : max - min;
+      // Recurse once to get the loop or optional matches after the fixed ones.
+      RegExpNode* answer = ToNode(
+          0, new_max, is_greedy, body, compiler, on_success, true);
+      // Unroll the forced matches from 0 to min.  This can cause chains of
+      // TextNodes (which the parser does not generate).  These should be
+      // combined if it turns out they hinder good code generation.
+      for (int i = 0; i < min; i++) {
+        answer = body->ToNode(compiler, answer);
+      }
+      return answer;
+    }
+    if (max <= kMaxUnrolledMaxMatches) {
+      ASSERT(min == 0);
+      // Unroll the optional matches up to max.
+      RegExpNode* answer = on_success;
+      for (int i = 0; i < max; i++) {
+        ChoiceNode* alternation = new ChoiceNode(2);
+        if (is_greedy) {
+          alternation->AddAlternative(GuardedAlternative(body->ToNode(compiler,
+                                                                      answer)));
+          alternation->AddAlternative(GuardedAlternative(on_success));
+        } else {
+          alternation->AddAlternative(GuardedAlternative(on_success));
+          alternation->AddAlternative(GuardedAlternative(body->ToNode(compiler,
+                                                                      answer)));
+        }
+        answer = alternation;
+        if (not_at_start) alternation->set_not_at_start();
+      }
+      return answer;
+    }
+  }
+  bool has_min = min > 0;
+  bool has_max = max < RegExpTree::kInfinity;
+  bool needs_counter = has_min || has_max;
+  int reg_ctr = needs_counter
+      ? compiler->AllocateRegister()
+      : RegExpCompiler::kNoRegister;
+  LoopChoiceNode* center = new LoopChoiceNode(body->min_match() == 0);
+  if (not_at_start) center->set_not_at_start();
+  RegExpNode* loop_return = needs_counter
+      ? static_cast<RegExpNode*>(ActionNode::IncrementRegister(reg_ctr, center))
+      : static_cast<RegExpNode*>(center);
+  if (body_can_be_empty) {
+    // If the body can be empty we need to check if it was and then
+    // backtrack.
+    loop_return = ActionNode::EmptyMatchCheck(body_start_reg,
+                                              reg_ctr,
+                                              min,
+                                              loop_return);
+  }
+  RegExpNode* body_node = body->ToNode(compiler, loop_return);
+  if (body_can_be_empty) {
+    // If the body can be empty we need to store the start position
+    // so we can bail out if it was empty.
+    body_node = ActionNode::StorePosition(body_start_reg, false, body_node);
+  }
+  if (needs_capture_clearing) {
+    // Before entering the body of this loop we need to clear captures.
+    body_node = ActionNode::ClearCaptures(capture_registers, body_node);
+  }
+  GuardedAlternative body_alt(body_node);
+  if (has_max) {
+    Guard* body_guard = new Guard(reg_ctr, Guard::LT, max);
+    body_alt.AddGuard(body_guard);
+  }
+  GuardedAlternative rest_alt(on_success);
+  if (has_min) {
+    Guard* rest_guard = new Guard(reg_ctr, Guard::GEQ, min);
+    rest_alt.AddGuard(rest_guard);
+  }
+  if (is_greedy) {
+    center->AddLoopAlternative(body_alt);
+    center->AddContinueAlternative(rest_alt);
+  } else {
+    center->AddContinueAlternative(rest_alt);
+    center->AddLoopAlternative(body_alt);
+  }
+  if (needs_counter) {
+    return ActionNode::SetRegister(reg_ctr, 0, center);
+  } else {
+    return center;
+  }
+}
+
+
+RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
+                                    RegExpNode* on_success) {
+  NodeInfo info;
+  switch (type()) {
+    case START_OF_LINE:
+      return AssertionNode::AfterNewline(on_success);
+    case START_OF_INPUT:
+      return AssertionNode::AtStart(on_success);
+    case BOUNDARY:
+      return AssertionNode::AtBoundary(on_success);
+    case NON_BOUNDARY:
+      return AssertionNode::AtNonBoundary(on_success);
+    case END_OF_INPUT:
+      return AssertionNode::AtEnd(on_success);
+    case END_OF_LINE: {
+      // Compile $ in multiline regexps as an alternation with a positive
+      // lookahead in one side and an end-of-input on the other side.
+      // We need two registers for the lookahead.
+      int stack_pointer_register = compiler->AllocateRegister();
+      int position_register = compiler->AllocateRegister();
+      // The ChoiceNode to distinguish between a newline and end-of-input.
+      ChoiceNode* result = new ChoiceNode(2);
+      // Create a newline atom.
+      ZoneList<CharacterRange>* newline_ranges =
+          new ZoneList<CharacterRange>(3);
+      CharacterRange::AddClassEscape('n', newline_ranges);
+      RegExpCharacterClass* newline_atom = new RegExpCharacterClass('n');
+      TextNode* newline_matcher = new TextNode(
+         newline_atom,
+         ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
+                                             position_register,
+                                             0,  // No captures inside.
+                                             -1,  // Ignored if no captures.
+                                             on_success));
+      // Create an end-of-input matcher.
+      RegExpNode* end_of_line = ActionNode::BeginSubmatch(
+          stack_pointer_register,
+          position_register,
+          newline_matcher);
+      // Add the two alternatives to the ChoiceNode.
+      GuardedAlternative eol_alternative(end_of_line);
+      result->AddAlternative(eol_alternative);
+      GuardedAlternative end_alternative(AssertionNode::AtEnd(on_success));
+      result->AddAlternative(end_alternative);
+      return result;
+    }
+    default:
+      UNREACHABLE();
+  }
+  return on_success;
+}
+
+
+RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
+                                        RegExpNode* on_success) {
+  return new BackReferenceNode(RegExpCapture::StartRegister(index()),
+                               RegExpCapture::EndRegister(index()),
+                               on_success);
+}
+
+
+RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
+                                RegExpNode* on_success) {
+  return on_success;
+}
+
+
+RegExpNode* RegExpLookahead::ToNode(RegExpCompiler* compiler,
+                                    RegExpNode* on_success) {
+  int stack_pointer_register = compiler->AllocateRegister();
+  int position_register = compiler->AllocateRegister();
+
+  const int registers_per_capture = 2;
+  const int register_of_first_capture = 2;
+  int register_count = capture_count_ * registers_per_capture;
+  int register_start =
+    register_of_first_capture + capture_from_ * registers_per_capture;
+
+  RegExpNode* success;
+  if (is_positive()) {
+    RegExpNode* node = ActionNode::BeginSubmatch(
+        stack_pointer_register,
+        position_register,
+        body()->ToNode(
+            compiler,
+            ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
+                                                position_register,
+                                                register_count,
+                                                register_start,
+                                                on_success)));
+    return node;
+  } else {
+    // We use a ChoiceNode for a negative lookahead because it has most of
+    // the characteristics we need.  It has the body of the lookahead as its
+    // first alternative and the expression after the lookahead of the second
+    // alternative.  If the first alternative succeeds then the
+    // NegativeSubmatchSuccess will unwind the stack including everything the
+    // choice node set up and backtrack.  If the first alternative fails then
+    // the second alternative is tried, which is exactly the desired result
+    // for a negative lookahead.  The NegativeLookaheadChoiceNode is a special
+    // ChoiceNode that knows to ignore the first exit when calculating quick
+    // checks.
+    GuardedAlternative body_alt(
+        body()->ToNode(
+            compiler,
+            success = new NegativeSubmatchSuccess(stack_pointer_register,
+                                                  position_register,
+                                                  register_count,
+                                                  register_start)));
+    ChoiceNode* choice_node =
+        new NegativeLookaheadChoiceNode(body_alt,
+                                        GuardedAlternative(on_success));
+    return ActionNode::BeginSubmatch(stack_pointer_register,
+                                     position_register,
+                                     choice_node);
+  }
+}
+
+
+RegExpNode* RegExpCapture::ToNode(RegExpCompiler* compiler,
+                                  RegExpNode* on_success) {
+  return ToNode(body(), index(), compiler, on_success);
+}
+
+
+RegExpNode* RegExpCapture::ToNode(RegExpTree* body,
+                                  int index,
+                                  RegExpCompiler* compiler,
+                                  RegExpNode* on_success) {
+  int start_reg = RegExpCapture::StartRegister(index);
+  int end_reg = RegExpCapture::EndRegister(index);
+  RegExpNode* store_end = ActionNode::StorePosition(end_reg, true, on_success);
+  RegExpNode* body_node = body->ToNode(compiler, store_end);
+  return ActionNode::StorePosition(start_reg, true, body_node);
+}
+
+
+RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
+                                      RegExpNode* on_success) {
+  ZoneList<RegExpTree*>* children = nodes();
+  RegExpNode* current = on_success;
+  for (int i = children->length() - 1; i >= 0; i--) {
+    current = children->at(i)->ToNode(compiler, current);
+  }
+  return current;
+}
+
+
+static void AddClass(const uc16* elmv,
+                     int elmc,
+                     ZoneList<CharacterRange>* ranges) {
+  for (int i = 0; i < elmc; i += 2) {
+    ASSERT(elmv[i] <= elmv[i + 1]);
+    ranges->Add(CharacterRange(elmv[i], elmv[i + 1]));
+  }
+}
+
+
+static void AddClassNegated(const uc16 *elmv,
+                            int elmc,
+                            ZoneList<CharacterRange>* ranges) {
+  ASSERT(elmv[0] != 0x0000);
+  ASSERT(elmv[elmc-1] != String::kMaxUC16CharCode);
+  uc16 last = 0x0000;
+  for (int i = 0; i < elmc; i += 2) {
+    ASSERT(last <= elmv[i] - 1);
+    ASSERT(elmv[i] <= elmv[i + 1]);
+    ranges->Add(CharacterRange(last, elmv[i] - 1));
+    last = elmv[i + 1] + 1;
+  }
+  ranges->Add(CharacterRange(last, String::kMaxUC16CharCode));
+}
+
+
+void CharacterRange::AddClassEscape(uc16 type,
+                                    ZoneList<CharacterRange>* ranges) {
+  switch (type) {
+    case 's':
+      AddClass(kSpaceRanges, kSpaceRangeCount, ranges);
+      break;
+    case 'S':
+      AddClassNegated(kSpaceRanges, kSpaceRangeCount, ranges);
+      break;
+    case 'w':
+      AddClass(kWordRanges, kWordRangeCount, ranges);
+      break;
+    case 'W':
+      AddClassNegated(kWordRanges, kWordRangeCount, ranges);
+      break;
+    case 'd':
+      AddClass(kDigitRanges, kDigitRangeCount, ranges);
+      break;
+    case 'D':
+      AddClassNegated(kDigitRanges, kDigitRangeCount, ranges);
+      break;
+    case '.':
+      AddClassNegated(kLineTerminatorRanges,
+                      kLineTerminatorRangeCount,
+                      ranges);
+      break;
+    // This is not a character range as defined by the spec but a
+    // convenient shorthand for a character class that matches any
+    // character.
+    case '*':
+      ranges->Add(CharacterRange::Everything());
+      break;
+    // This is the set of characters matched by the $ and ^ symbols
+    // in multiline mode.
+    case 'n':
+      AddClass(kLineTerminatorRanges,
+               kLineTerminatorRangeCount,
+               ranges);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+Vector<const uc16> CharacterRange::GetWordBounds() {
+  return Vector<const uc16>(kWordRanges, kWordRangeCount);
+}
+
+
+class CharacterRangeSplitter {
+ public:
+  CharacterRangeSplitter(ZoneList<CharacterRange>** included,
+                          ZoneList<CharacterRange>** excluded)
+      : included_(included),
+        excluded_(excluded) { }
+  void Call(uc16 from, DispatchTable::Entry entry);
+
+  static const int kInBase = 0;
+  static const int kInOverlay = 1;
+
+ private:
+  ZoneList<CharacterRange>** included_;
+  ZoneList<CharacterRange>** excluded_;
+};
+
+
+void CharacterRangeSplitter::Call(uc16 from, DispatchTable::Entry entry) {
+  if (!entry.out_set()->Get(kInBase)) return;
+  ZoneList<CharacterRange>** target = entry.out_set()->Get(kInOverlay)
+    ? included_
+    : excluded_;
+  if (*target == NULL) *target = new ZoneList<CharacterRange>(2);
+  (*target)->Add(CharacterRange(entry.from(), entry.to()));
+}
+
+
+void CharacterRange::Split(ZoneList<CharacterRange>* base,
+                           Vector<const uc16> overlay,
+                           ZoneList<CharacterRange>** included,
+                           ZoneList<CharacterRange>** excluded) {
+  ASSERT_EQ(NULL, *included);
+  ASSERT_EQ(NULL, *excluded);
+  DispatchTable table;
+  for (int i = 0; i < base->length(); i++)
+    table.AddRange(base->at(i), CharacterRangeSplitter::kInBase);
+  for (int i = 0; i < overlay.length(); i += 2) {
+    table.AddRange(CharacterRange(overlay[i], overlay[i+1]),
+                   CharacterRangeSplitter::kInOverlay);
+  }
+  CharacterRangeSplitter callback(included, excluded);
+  table.ForEach(&callback);
+}
+
+
+void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) {
+  unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+  if (IsSingleton()) {
+    // If this is a singleton we just expand the one character.
+    int length = uncanonicalize.get(from(), '\0', chars);
+    for (int i = 0; i < length; i++) {
+      uc32 chr = chars[i];
+      if (chr != from()) {
+        ranges->Add(CharacterRange::Singleton(chars[i]));
+      }
+    }
+  } else if (from() <= kRangeCanonicalizeMax &&
+             to() <= kRangeCanonicalizeMax) {
+    // If this is a range we expand the characters block by block,
+    // expanding contiguous subranges (blocks) one at a time.
+    // The approach is as follows.  For a given start character we
+    // look up the block that contains it, for instance 'a' if the
+    // start character is 'c'.  A block is characterized by the property
+    // that all characters uncanonicalize in the same way as the first
+    // element, except that each entry in the result is incremented
+    // by the distance from the first element.  So a-z is a block
+    // because 'a' uncanonicalizes to ['a', 'A'] and the k'th letter
+    // uncanonicalizes to ['a' + k, 'A' + k].
+    // Once we've found the start point we look up its uncanonicalization
+    // and produce a range for each element.  For instance for [c-f]
+    // we look up ['a', 'A'] and produce [c-f] and [C-F].  We then only
+    // add a range if it is not already contained in the input, so [c-f]
+    // will be skipped but [C-F] will be added.  If this range is not
+    // completely contained in a block we do this for all the blocks
+    // covered by the range.
+    unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+    // First, look up the block that contains the 'from' character.
+    int length = canonrange.get(from(), '\0', range);
+    if (length == 0) {
+      range[0] = from();
+    } else {
+      ASSERT_EQ(1, length);
+    }
+    int pos = from();
+    // The start of the current block.  Note that except for the first
+    // iteration 'start' is always equal to 'pos'.
+    int start;
+    // If it is not the start point of a block the entry contains the
+    // offset of the character from the start point.
+    if ((range[0] & kStartMarker) == 0) {
+      start = pos - range[0];
+    } else {
+      start = pos;
+    }
+    // Then we add the ranges on at a time, incrementing the current
+    // position to be after the last block each time.  The position
+    // always points to the start of a block.
+    while (pos < to()) {
+      length = canonrange.get(start, '\0', range);
+      if (length == 0) {
+        range[0] = start;
+      } else {
+        ASSERT_EQ(1, length);
+      }
+      ASSERT((range[0] & kStartMarker) != 0);
+      // The start point of a block contains the distance to the end
+      // of the range.
+      int block_end = start + (range[0] & kPayloadMask) - 1;
+      int end = (block_end > to()) ? to() : block_end;
+      length = uncanonicalize.get(start, '\0', range);
+      for (int i = 0; i < length; i++) {
+        uc32 c = range[i];
+        uc16 range_from = c + (pos - start);
+        uc16 range_to = c + (end - start);
+        if (!(from() <= range_from && range_to <= to())) {
+          ranges->Add(CharacterRange(range_from, range_to));
+        }
+      }
+      start = pos = block_end + 1;
+    }
+  } else {
+    // TODO(plesner) when we've fixed the 2^11 bug in unibrow.
+  }
+}
+
+
+ZoneList<CharacterRange>* CharacterSet::ranges() {
+  if (ranges_ == NULL) {
+    ranges_ = new ZoneList<CharacterRange>(2);
+    CharacterRange::AddClassEscape(standard_set_type_, ranges_);
+  }
+  return ranges_;
+}
+
+
+
+// -------------------------------------------------------------------
+// Interest propagation
+
+
+RegExpNode* RegExpNode::TryGetSibling(NodeInfo* info) {
+  for (int i = 0; i < siblings_.length(); i++) {
+    RegExpNode* sibling = siblings_.Get(i);
+    if (sibling->info()->Matches(info))
+      return sibling;
+  }
+  return NULL;
+}
+
+
+RegExpNode* RegExpNode::EnsureSibling(NodeInfo* info, bool* cloned) {
+  ASSERT_EQ(false, *cloned);
+  siblings_.Ensure(this);
+  RegExpNode* result = TryGetSibling(info);
+  if (result != NULL) return result;
+  result = this->Clone();
+  NodeInfo* new_info = result->info();
+  new_info->ResetCompilationState();
+  new_info->AddFromPreceding(info);
+  AddSibling(result);
+  *cloned = true;
+  return result;
+}
+
+
+template <class C>
+static RegExpNode* PropagateToEndpoint(C* node, NodeInfo* info) {
+  NodeInfo full_info(*node->info());
+  full_info.AddFromPreceding(info);
+  bool cloned = false;
+  return RegExpNode::EnsureSibling(node, &full_info, &cloned);
+}
+
+
+// -------------------------------------------------------------------
+// Splay tree
+
+
+OutSet* OutSet::Extend(unsigned value) {
+  if (Get(value))
+    return this;
+  if (successors() != NULL) {
+    for (int i = 0; i < successors()->length(); i++) {
+      OutSet* successor = successors()->at(i);
+      if (successor->Get(value))
+        return successor;
+    }
+  } else {
+    successors_ = new ZoneList<OutSet*>(2);
+  }
+  OutSet* result = new OutSet(first_, remaining_);
+  result->Set(value);
+  successors()->Add(result);
+  return result;
+}
+
+
+void OutSet::Set(unsigned value) {
+  if (value < kFirstLimit) {
+    first_ |= (1 << value);
+  } else {
+    if (remaining_ == NULL)
+      remaining_ = new ZoneList<unsigned>(1);
+    if (remaining_->is_empty() || !remaining_->Contains(value))
+      remaining_->Add(value);
+  }
+}
+
+
+bool OutSet::Get(unsigned value) {
+  if (value < kFirstLimit) {
+    return (first_ & (1 << value)) != 0;
+  } else if (remaining_ == NULL) {
+    return false;
+  } else {
+    return remaining_->Contains(value);
+  }
+}
+
+
+const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
+const DispatchTable::Entry DispatchTable::Config::kNoValue;
+
+
+void DispatchTable::AddRange(CharacterRange full_range, int value) {
+  CharacterRange current = full_range;
+  if (tree()->is_empty()) {
+    // If this is the first range we just insert into the table.
+    ZoneSplayTree<Config>::Locator loc;
+    ASSERT_RESULT(tree()->Insert(current.from(), &loc));
+    loc.set_value(Entry(current.from(), current.to(), empty()->Extend(value)));
+    return;
+  }
+  // First see if there is a range to the left of this one that
+  // overlaps.
+  ZoneSplayTree<Config>::Locator loc;
+  if (tree()->FindGreatestLessThan(current.from(), &loc)) {
+    Entry* entry = &loc.value();
+    // If we've found a range that overlaps with this one, and it
+    // starts strictly to the left of this one, we have to fix it
+    // because the following code only handles ranges that start on
+    // or after the start point of the range we're adding.
+    if (entry->from() < current.from() && entry->to() >= current.from()) {
+      // Snap the overlapping range in half around the start point of
+      // the range we're adding.
+      CharacterRange left(entry->from(), current.from() - 1);
+      CharacterRange right(current.from(), entry->to());
+      // The left part of the overlapping range doesn't overlap.
+      // Truncate the whole entry to be just the left part.
+      entry->set_to(left.to());
+      // The right part is the one that overlaps.  We add this part
+      // to the map and let the next step deal with merging it with
+      // the range we're adding.
+      ZoneSplayTree<Config>::Locator loc;
+      ASSERT_RESULT(tree()->Insert(right.from(), &loc));
+      loc.set_value(Entry(right.from(),
+                          right.to(),
+                          entry->out_set()));
+    }
+  }
+  while (current.is_valid()) {
+    if (tree()->FindLeastGreaterThan(current.from(), &loc) &&
+        (loc.value().from() <= current.to()) &&
+        (loc.value().to() >= current.from())) {
+      Entry* entry = &loc.value();
+      // We have overlap.  If there is space between the start point of
+      // the range we're adding and where the overlapping range starts
+      // then we have to add a range covering just that space.
+      if (current.from() < entry->from()) {
+        ZoneSplayTree<Config>::Locator ins;
+        ASSERT_RESULT(tree()->Insert(current.from(), &ins));
+        ins.set_value(Entry(current.from(),
+                            entry->from() - 1,
+                            empty()->Extend(value)));
+        current.set_from(entry->from());
+      }
+      ASSERT_EQ(current.from(), entry->from());
+      // If the overlapping range extends beyond the one we want to add
+      // we have to snap the right part off and add it separately.
+      if (entry->to() > current.to()) {
+        ZoneSplayTree<Config>::Locator ins;
+        ASSERT_RESULT(tree()->Insert(current.to() + 1, &ins));
+        ins.set_value(Entry(current.to() + 1,
+                            entry->to(),
+                            entry->out_set()));
+        entry->set_to(current.to());
+      }
+      ASSERT(entry->to() <= current.to());
+      // The overlapping range is now completely contained by the range
+      // we're adding so we can just update it and move the start point
+      // of the range we're adding just past it.
+      entry->AddValue(value);
+      // Bail out if the last interval ended at 0xFFFF since otherwise
+      // adding 1 will wrap around to 0.
+      if (entry->to() == String::kMaxUC16CharCode)
+        break;
+      ASSERT(entry->to() + 1 > current.from());
+      current.set_from(entry->to() + 1);
+    } else {
+      // There is no overlap so we can just add the range
+      ZoneSplayTree<Config>::Locator ins;
+      ASSERT_RESULT(tree()->Insert(current.from(), &ins));
+      ins.set_value(Entry(current.from(),
+                          current.to(),
+                          empty()->Extend(value)));
+      break;
+    }
+  }
+}
+
+
+OutSet* DispatchTable::Get(uc16 value) {
+  ZoneSplayTree<Config>::Locator loc;
+  if (!tree()->FindGreatestLessThan(value, &loc))
+    return empty();
+  Entry* entry = &loc.value();
+  if (value <= entry->to())
+    return entry->out_set();
+  else
+    return empty();
+}
+
+
+// -------------------------------------------------------------------
+// Analysis
+
+
+void Analysis::EnsureAnalyzed(RegExpNode* that) {
+  StackLimitCheck check;
+  if (check.HasOverflowed()) {
+    fail("Stack overflow");
+    return;
+  }
+  if (that->info()->been_analyzed || that->info()->being_analyzed)
+    return;
+  that->info()->being_analyzed = true;
+  that->Accept(this);
+  that->info()->being_analyzed = false;
+  that->info()->been_analyzed = true;
+}
+
+
+void Analysis::VisitEnd(EndNode* that) {
+  // nothing to do
+}
+
+
+void TextNode::CalculateOffsets() {
+  int element_count = elements()->length();
+  // Set up the offsets of the elements relative to the start.  This is a fixed
+  // quantity since a TextNode can only contain fixed-width things.
+  int cp_offset = 0;
+  for (int i = 0; i < element_count; i++) {
+    TextElement& elm = elements()->at(i);
+    elm.cp_offset = cp_offset;
+    if (elm.type == TextElement::ATOM) {
+      cp_offset += elm.data.u_atom->data().length();
+    } else {
+      cp_offset++;
+      Vector<const uc16> quarks = elm.data.u_atom->data();
+    }
+  }
+}
+
+
+void Analysis::VisitText(TextNode* that) {
+  if (ignore_case_) {
+    that->MakeCaseIndependent();
+  }
+  EnsureAnalyzed(that->on_success());
+  if (!has_failed()) {
+    that->CalculateOffsets();
+  }
+}
+
+
+void Analysis::VisitAction(ActionNode* that) {
+  RegExpNode* target = that->on_success();
+  EnsureAnalyzed(target);
+  if (!has_failed()) {
+    // If the next node is interested in what it follows then this node
+    // has to be interested too so it can pass the information on.
+    that->info()->AddFromFollowing(target->info());
+  }
+}
+
+
+void Analysis::VisitChoice(ChoiceNode* that) {
+  NodeInfo* info = that->info();
+  for (int i = 0; i < that->alternatives()->length(); i++) {
+    RegExpNode* node = that->alternatives()->at(i).node();
+    EnsureAnalyzed(node);
+    if (has_failed()) return;
+    // Anything the following nodes need to know has to be known by
+    // this node also, so it can pass it on.
+    info->AddFromFollowing(node->info());
+  }
+}
+
+
+void Analysis::VisitLoopChoice(LoopChoiceNode* that) {
+  NodeInfo* info = that->info();
+  for (int i = 0; i < that->alternatives()->length(); i++) {
+    RegExpNode* node = that->alternatives()->at(i).node();
+    if (node != that->loop_node()) {
+      EnsureAnalyzed(node);
+      if (has_failed()) return;
+      info->AddFromFollowing(node->info());
+    }
+  }
+  // Check the loop last since it may need the value of this node
+  // to get a correct result.
+  EnsureAnalyzed(that->loop_node());
+  if (!has_failed()) {
+    info->AddFromFollowing(that->loop_node()->info());
+  }
+}
+
+
+void Analysis::VisitBackReference(BackReferenceNode* that) {
+  EnsureAnalyzed(that->on_success());
+}
+
+
+void Analysis::VisitAssertion(AssertionNode* that) {
+  EnsureAnalyzed(that->on_success());
+}
+
+
+// -------------------------------------------------------------------
+// Dispatch table construction
+
+
+void DispatchTableConstructor::VisitEnd(EndNode* that) {
+  AddRange(CharacterRange::Everything());
+}
+
+
+void DispatchTableConstructor::BuildTable(ChoiceNode* node) {
+  node->set_being_calculated(true);
+  ZoneList<GuardedAlternative>* alternatives = node->alternatives();
+  for (int i = 0; i < alternatives->length(); i++) {
+    set_choice_index(i);
+    alternatives->at(i).node()->Accept(this);
+  }
+  node->set_being_calculated(false);
+}
+
+
+class AddDispatchRange {
+ public:
+  explicit AddDispatchRange(DispatchTableConstructor* constructor)
+    : constructor_(constructor) { }
+  void Call(uc32 from, DispatchTable::Entry entry);
+ private:
+  DispatchTableConstructor* constructor_;
+};
+
+
+void AddDispatchRange::Call(uc32 from, DispatchTable::Entry entry) {
+  CharacterRange range(from, entry.to());
+  constructor_->AddRange(range);
+}
+
+
+void DispatchTableConstructor::VisitChoice(ChoiceNode* node) {
+  if (node->being_calculated())
+    return;
+  DispatchTable* table = node->GetTable(ignore_case_);
+  AddDispatchRange adder(this);
+  table->ForEach(&adder);
+}
+
+
+void DispatchTableConstructor::VisitBackReference(BackReferenceNode* that) {
+  // TODO(160): Find the node that we refer back to and propagate its start
+  // set back to here.  For now we just accept anything.
+  AddRange(CharacterRange::Everything());
+}
+
+
+void DispatchTableConstructor::VisitAssertion(AssertionNode* that) {
+  RegExpNode* target = that->on_success();
+  target->Accept(this);
+}
+
+
+
+static int CompareRangeByFrom(const CharacterRange* a,
+                              const CharacterRange* b) {
+  return Compare<uc16>(a->from(), b->from());
+}
+
+
+void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
+  ranges->Sort(CompareRangeByFrom);
+  uc16 last = 0;
+  for (int i = 0; i < ranges->length(); i++) {
+    CharacterRange range = ranges->at(i);
+    if (last < range.from())
+      AddRange(CharacterRange(last, range.from() - 1));
+    if (range.to() >= last) {
+      if (range.to() == String::kMaxUC16CharCode) {
+        return;
+      } else {
+        last = range.to() + 1;
+      }
+    }
+  }
+  AddRange(CharacterRange(last, String::kMaxUC16CharCode));
+}
+
+
+void DispatchTableConstructor::VisitText(TextNode* that) {
+  TextElement elm = that->elements()->at(0);
+  switch (elm.type) {
+    case TextElement::ATOM: {
+      uc16 c = elm.data.u_atom->data()[0];
+      AddRange(CharacterRange(c, c));
+      break;
+    }
+    case TextElement::CHAR_CLASS: {
+      RegExpCharacterClass* tree = elm.data.u_char_class;
+      ZoneList<CharacterRange>* ranges = tree->ranges();
+      if (tree->is_negated()) {
+        AddInverse(ranges);
+      } else {
+        for (int i = 0; i < ranges->length(); i++)
+          AddRange(ranges->at(i));
+      }
+      break;
+    }
+    default: {
+      UNIMPLEMENTED();
+    }
+  }
+}
+
+
+void DispatchTableConstructor::VisitAction(ActionNode* that) {
+  RegExpNode* target = that->on_success();
+  target->Accept(this);
+}
+
+
+RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
+                                                      bool ignore_case,
+                                                      bool is_multiline,
+                                                      Handle<String> pattern,
+                                                      bool is_ascii) {
+  if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
+    return IrregexpRegExpTooBig();
+  }
+  RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii);
+  // Wrap the body of the regexp in capture #0.
+  RegExpNode* captured_body = RegExpCapture::ToNode(data->tree,
+                                                    0,
+                                                    &compiler,
+                                                    compiler.accept());
+  RegExpNode* node = captured_body;
+  if (!data->tree->IsAnchored()) {
+    // Add a .*? at the beginning, outside the body capture, unless
+    // this expression is anchored at the beginning.
+    RegExpNode* loop_node =
+        RegExpQuantifier::ToNode(0,
+                                 RegExpTree::kInfinity,
+                                 false,
+                                 new RegExpCharacterClass('*'),
+                                 &compiler,
+                                 captured_body,
+                                 data->contains_anchor);
+
+    if (data->contains_anchor) {
+      // Unroll loop once, to take care of the case that might start
+      // at the start of input.
+      ChoiceNode* first_step_node = new ChoiceNode(2);
+      first_step_node->AddAlternative(GuardedAlternative(captured_body));
+      first_step_node->AddAlternative(GuardedAlternative(
+          new TextNode(new RegExpCharacterClass('*'), loop_node)));
+      node = first_step_node;
+    } else {
+      node = loop_node;
+    }
+  }
+  data->node = node;
+  Analysis analysis(ignore_case);
+  analysis.EnsureAnalyzed(node);
+  if (analysis.has_failed()) {
+    const char* error_message = analysis.error_message();
+    return CompilationResult(error_message);
+  }
+
+  NodeInfo info = *node->info();
+
+  // Create the correct assembler for the architecture.
+#ifdef V8_NATIVE_REGEXP
+  // Native regexp implementation.
+
+  NativeRegExpMacroAssembler::Mode mode =
+      is_ascii ? NativeRegExpMacroAssembler::ASCII
+               : NativeRegExpMacroAssembler::UC16;
+
+#if V8_TARGET_ARCH_IA32
+  RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2);
+#elif V8_TARGET_ARCH_X64
+  RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2);
+#elif V8_TARGET_ARCH_ARM
+  RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
+#endif
+
+#else  // ! V8_NATIVE_REGEXP
+  // Interpreted regexp implementation.
+  EmbeddedVector<byte, 1024> codes;
+  RegExpMacroAssemblerIrregexp macro_assembler(codes);
+#endif
+
+  return compiler.Assemble(&macro_assembler,
+                           node,
+                           data->capture_count,
+                           pattern);
+}
+
+}}  // namespace v8::internal
diff --git a/src/jsregexp.h b/src/jsregexp.h
new file mode 100644
index 0000000..3bc30b6
--- /dev/null
+++ b/src/jsregexp.h
@@ -0,0 +1,1283 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JSREGEXP_H_
+#define V8_JSREGEXP_H_
+
+namespace v8 {
+namespace internal {
+
+
+class RegExpMacroAssembler;
+
+
+class RegExpImpl {
+ public:
+  // Whether V8 is compiled with native regexp support or not.
+  static bool UsesNativeRegExp() {
+#ifdef V8_NATIVE_REGEXP
+    return true;
+#else
+    return false;
+#endif
+  }
+
+  // Creates a regular expression literal in the old space.
+  // This function calls the garbage collector if necessary.
+  static Handle<Object> CreateRegExpLiteral(Handle<JSFunction> constructor,
+                                            Handle<String> pattern,
+                                            Handle<String> flags,
+                                            bool* has_pending_exception);
+
+  // Returns a string representation of a regular expression.
+  // Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
+  // This function calls the garbage collector if necessary.
+  static Handle<String> ToString(Handle<Object> value);
+
+  // Parses the RegExp pattern and prepares the JSRegExp object with
+  // generic data and choice of implementation - as well as what
+  // the implementation wants to store in the data field.
+  // Returns false if compilation fails.
+  static Handle<Object> Compile(Handle<JSRegExp> re,
+                                Handle<String> pattern,
+                                Handle<String> flags);
+
+  // See ECMA-262 section 15.10.6.2.
+  // This function calls the garbage collector if necessary.
+  static Handle<Object> Exec(Handle<JSRegExp> regexp,
+                             Handle<String> subject,
+                             int index,
+                             Handle<JSArray> lastMatchInfo);
+
+  // Call RegExp.prototyp.exec(string) in a loop.
+  // Used by String.prototype.match and String.prototype.replace.
+  // This function calls the garbage collector if necessary.
+  static Handle<Object> ExecGlobal(Handle<JSRegExp> regexp,
+                                   Handle<String> subject,
+                                   Handle<JSArray> lastMatchInfo);
+
+  // Prepares a JSRegExp object with Irregexp-specific data.
+  static void IrregexpPrepare(Handle<JSRegExp> re,
+                              Handle<String> pattern,
+                              JSRegExp::Flags flags,
+                              int capture_register_count);
+
+
+  static void AtomCompile(Handle<JSRegExp> re,
+                          Handle<String> pattern,
+                          JSRegExp::Flags flags,
+                          Handle<String> match_pattern);
+
+  static Handle<Object> AtomExec(Handle<JSRegExp> regexp,
+                                 Handle<String> subject,
+                                 int index,
+                                 Handle<JSArray> lastMatchInfo);
+
+  // Execute an Irregexp bytecode pattern.
+  // On a successful match, the result is a JSArray containing
+  // captured positions. On a failure, the result is the null value.
+  // Returns an empty handle in case of an exception.
+  static Handle<Object> IrregexpExec(Handle<JSRegExp> regexp,
+                                     Handle<String> subject,
+                                     int index,
+                                     Handle<JSArray> lastMatchInfo);
+
+  // Offsets in the lastMatchInfo array.
+  static const int kLastCaptureCount = 0;
+  static const int kLastSubject = 1;
+  static const int kLastInput = 2;
+  static const int kFirstCapture = 3;
+  static const int kLastMatchOverhead = 3;
+
+  // Used to access the lastMatchInfo array.
+  static int GetCapture(FixedArray* array, int index) {
+    return Smi::cast(array->get(index + kFirstCapture))->value();
+  }
+
+  static void SetLastCaptureCount(FixedArray* array, int to) {
+    array->set(kLastCaptureCount, Smi::FromInt(to));
+  }
+
+  static void SetLastSubject(FixedArray* array, String* to) {
+    array->set(kLastSubject, to);
+  }
+
+  static void SetLastInput(FixedArray* array, String* to) {
+    array->set(kLastInput, to);
+  }
+
+  static void SetCapture(FixedArray* array, int index, int to) {
+    array->set(index + kFirstCapture, Smi::FromInt(to));
+  }
+
+  static int GetLastCaptureCount(FixedArray* array) {
+    return Smi::cast(array->get(kLastCaptureCount))->value();
+  }
+
+  // For acting on the JSRegExp data FixedArray.
+  static int IrregexpMaxRegisterCount(FixedArray* re);
+  static void SetIrregexpMaxRegisterCount(FixedArray* re, int value);
+  static int IrregexpNumberOfCaptures(FixedArray* re);
+  static int IrregexpNumberOfRegisters(FixedArray* re);
+  static ByteArray* IrregexpByteCode(FixedArray* re, bool is_ascii);
+  static Code* IrregexpNativeCode(FixedArray* re, bool is_ascii);
+
+ private:
+  static String* last_ascii_string_;
+  static String* two_byte_cached_string_;
+
+  static bool CompileIrregexp(Handle<JSRegExp> re, bool is_ascii);
+  static inline bool EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii);
+
+
+  // Set the subject cache.  The previous string buffer is not deleted, so the
+  // caller should ensure that it doesn't leak.
+  static void SetSubjectCache(String* subject,
+                              char* utf8_subject,
+                              int uft8_length,
+                              int character_position,
+                              int utf8_position);
+
+  // A one element cache of the last utf8_subject string and its length.  The
+  // subject JS String object is cached in the heap.  We also cache a
+  // translation between position and utf8 position.
+  static char* utf8_subject_cache_;
+  static int utf8_length_cache_;
+  static int utf8_position_;
+  static int character_position_;
+};
+
+
+class CharacterRange {
+ public:
+  CharacterRange() : from_(0), to_(0) { }
+  // For compatibility with the CHECK_OK macro
+  CharacterRange(void* null) { ASSERT_EQ(NULL, null); }  //NOLINT
+  CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { }
+  static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges);
+  static Vector<const uc16> GetWordBounds();
+  static inline CharacterRange Singleton(uc16 value) {
+    return CharacterRange(value, value);
+  }
+  static inline CharacterRange Range(uc16 from, uc16 to) {
+    ASSERT(from <= to);
+    return CharacterRange(from, to);
+  }
+  static inline CharacterRange Everything() {
+    return CharacterRange(0, 0xFFFF);
+  }
+  bool Contains(uc16 i) { return from_ <= i && i <= to_; }
+  uc16 from() const { return from_; }
+  void set_from(uc16 value) { from_ = value; }
+  uc16 to() const { return to_; }
+  void set_to(uc16 value) { to_ = value; }
+  bool is_valid() { return from_ <= to_; }
+  bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
+  bool IsSingleton() { return (from_ == to_); }
+  void AddCaseEquivalents(ZoneList<CharacterRange>* ranges);
+  static void Split(ZoneList<CharacterRange>* base,
+                    Vector<const uc16> overlay,
+                    ZoneList<CharacterRange>** included,
+                    ZoneList<CharacterRange>** excluded);
+
+  static const int kRangeCanonicalizeMax = 0x346;
+  static const int kStartMarker = (1 << 24);
+  static const int kPayloadMask = (1 << 24) - 1;
+
+ private:
+  uc16 from_;
+  uc16 to_;
+};
+
+
+// A set of unsigned integers that behaves especially well on small
+// integers (< 32).  May do zone-allocation.
+class OutSet: public ZoneObject {
+ public:
+  OutSet() : first_(0), remaining_(NULL), successors_(NULL) { }
+  OutSet* Extend(unsigned value);
+  bool Get(unsigned value);
+  static const unsigned kFirstLimit = 32;
+
+ private:
+  // Destructively set a value in this set.  In most cases you want
+  // to use Extend instead to ensure that only one instance exists
+  // that contains the same values.
+  void Set(unsigned value);
+
+  // The successors are a list of sets that contain the same values
+  // as this set and the one more value that is not present in this
+  // set.
+  ZoneList<OutSet*>* successors() { return successors_; }
+
+  OutSet(uint32_t first, ZoneList<unsigned>* remaining)
+      : first_(first), remaining_(remaining), successors_(NULL) { }
+  uint32_t first_;
+  ZoneList<unsigned>* remaining_;
+  ZoneList<OutSet*>* successors_;
+  friend class Trace;
+};
+
+
+// A mapping from integers, specified as ranges, to a set of integers.
+// Used for mapping character ranges to choices.
+class DispatchTable : public ZoneObject {
+ public:
+  class Entry {
+   public:
+    Entry() : from_(0), to_(0), out_set_(NULL) { }
+    Entry(uc16 from, uc16 to, OutSet* out_set)
+        : from_(from), to_(to), out_set_(out_set) { }
+    uc16 from() { return from_; }
+    uc16 to() { return to_; }
+    void set_to(uc16 value) { to_ = value; }
+    void AddValue(int value) { out_set_ = out_set_->Extend(value); }
+    OutSet* out_set() { return out_set_; }
+   private:
+    uc16 from_;
+    uc16 to_;
+    OutSet* out_set_;
+  };
+
+  class Config {
+   public:
+    typedef uc16 Key;
+    typedef Entry Value;
+    static const uc16 kNoKey;
+    static const Entry kNoValue;
+    static inline int Compare(uc16 a, uc16 b) {
+      if (a == b)
+        return 0;
+      else if (a < b)
+        return -1;
+      else
+        return 1;
+    }
+  };
+
+  void AddRange(CharacterRange range, int value);
+  OutSet* Get(uc16 value);
+  void Dump();
+
+  template <typename Callback>
+  void ForEach(Callback* callback) { return tree()->ForEach(callback); }
+ private:
+  // There can't be a static empty set since it allocates its
+  // successors in a zone and caches them.
+  OutSet* empty() { return &empty_; }
+  OutSet empty_;
+  ZoneSplayTree<Config>* tree() { return &tree_; }
+  ZoneSplayTree<Config> tree_;
+};
+
+
+#define FOR_EACH_NODE_TYPE(VISIT)                                    \
+  VISIT(End)                                                         \
+  VISIT(Action)                                                      \
+  VISIT(Choice)                                                      \
+  VISIT(BackReference)                                               \
+  VISIT(Assertion)                                                   \
+  VISIT(Text)
+
+
+#define FOR_EACH_REG_EXP_TREE_TYPE(VISIT)                            \
+  VISIT(Disjunction)                                                 \
+  VISIT(Alternative)                                                 \
+  VISIT(Assertion)                                                   \
+  VISIT(CharacterClass)                                              \
+  VISIT(Atom)                                                        \
+  VISIT(Quantifier)                                                  \
+  VISIT(Capture)                                                     \
+  VISIT(Lookahead)                                                   \
+  VISIT(BackReference)                                               \
+  VISIT(Empty)                                                       \
+  VISIT(Text)
+
+
+#define FORWARD_DECLARE(Name) class RegExp##Name;
+FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
+#undef FORWARD_DECLARE
+
+
+class TextElement {
+ public:
+  enum Type {UNINITIALIZED, ATOM, CHAR_CLASS};
+  TextElement() : type(UNINITIALIZED) { }
+  explicit TextElement(Type t) : type(t), cp_offset(-1) { }
+  static TextElement Atom(RegExpAtom* atom);
+  static TextElement CharClass(RegExpCharacterClass* char_class);
+  int length();
+  Type type;
+  union {
+    RegExpAtom* u_atom;
+    RegExpCharacterClass* u_char_class;
+  } data;
+  int cp_offset;
+};
+
+
+class Trace;
+
+
+struct NodeInfo {
+  NodeInfo()
+      : being_analyzed(false),
+        been_analyzed(false),
+        follows_word_interest(false),
+        follows_newline_interest(false),
+        follows_start_interest(false),
+        at_end(false),
+        visited(false) { }
+
+  // Returns true if the interests and assumptions of this node
+  // matches the given one.
+  bool Matches(NodeInfo* that) {
+    return (at_end == that->at_end) &&
+           (follows_word_interest == that->follows_word_interest) &&
+           (follows_newline_interest == that->follows_newline_interest) &&
+           (follows_start_interest == that->follows_start_interest);
+  }
+
+  // Updates the interests of this node given the interests of the
+  // node preceding it.
+  void AddFromPreceding(NodeInfo* that) {
+    at_end |= that->at_end;
+    follows_word_interest |= that->follows_word_interest;
+    follows_newline_interest |= that->follows_newline_interest;
+    follows_start_interest |= that->follows_start_interest;
+  }
+
+  bool HasLookbehind() {
+    return follows_word_interest ||
+           follows_newline_interest ||
+           follows_start_interest;
+  }
+
+  // Sets the interests of this node to include the interests of the
+  // following node.
+  void AddFromFollowing(NodeInfo* that) {
+    follows_word_interest |= that->follows_word_interest;
+    follows_newline_interest |= that->follows_newline_interest;
+    follows_start_interest |= that->follows_start_interest;
+  }
+
+  void ResetCompilationState() {
+    being_analyzed = false;
+    been_analyzed = false;
+  }
+
+  bool being_analyzed: 1;
+  bool been_analyzed: 1;
+
+  // These bits are set of this node has to know what the preceding
+  // character was.
+  bool follows_word_interest: 1;
+  bool follows_newline_interest: 1;
+  bool follows_start_interest: 1;
+
+  bool at_end: 1;
+  bool visited: 1;
+};
+
+
+class SiblingList {
+ public:
+  SiblingList() : list_(NULL) { }
+  int length() {
+    return list_ == NULL ? 0 : list_->length();
+  }
+  void Ensure(RegExpNode* parent) {
+    if (list_ == NULL) {
+      list_ = new ZoneList<RegExpNode*>(2);
+      list_->Add(parent);
+    }
+  }
+  void Add(RegExpNode* node) { list_->Add(node); }
+  RegExpNode* Get(int index) { return list_->at(index); }
+ private:
+  ZoneList<RegExpNode*>* list_;
+};
+
+
+// Details of a quick mask-compare check that can look ahead in the
+// input stream.
+class QuickCheckDetails {
+ public:
+  QuickCheckDetails()
+      : characters_(0),
+        mask_(0),
+        value_(0),
+        cannot_match_(false) { }
+  explicit QuickCheckDetails(int characters)
+      : characters_(characters),
+        mask_(0),
+        value_(0),
+        cannot_match_(false) { }
+  bool Rationalize(bool ascii);
+  // Merge in the information from another branch of an alternation.
+  void Merge(QuickCheckDetails* other, int from_index);
+  // Advance the current position by some amount.
+  void Advance(int by, bool ascii);
+  void Clear();
+  bool cannot_match() { return cannot_match_; }
+  void set_cannot_match() { cannot_match_ = true; }
+  struct Position {
+    Position() : mask(0), value(0), determines_perfectly(false) { }
+    uc16 mask;
+    uc16 value;
+    bool determines_perfectly;
+  };
+  int characters() { return characters_; }
+  void set_characters(int characters) { characters_ = characters; }
+  Position* positions(int index) {
+    ASSERT(index >= 0);
+    ASSERT(index < characters_);
+    return positions_ + index;
+  }
+  uint32_t mask() { return mask_; }
+  uint32_t value() { return value_; }
+
+ private:
+  // How many characters do we have quick check information from.  This is
+  // the same for all branches of a choice node.
+  int characters_;
+  Position positions_[4];
+  // These values are the condensate of the above array after Rationalize().
+  uint32_t mask_;
+  uint32_t value_;
+  // If set to true, there is no way this quick check can match at all.
+  // E.g., if it requires to be at the start of the input, and isn't.
+  bool cannot_match_;
+};
+
+
+class RegExpNode: public ZoneObject {
+ public:
+  RegExpNode() : trace_count_(0) { }
+  virtual ~RegExpNode();
+  virtual void Accept(NodeVisitor* visitor) = 0;
+  // Generates a goto to this node or actually generates the code at this point.
+  virtual void Emit(RegExpCompiler* compiler, Trace* trace) = 0;
+  // How many characters must this node consume at a minimum in order to
+  // succeed.  If we have found at least 'still_to_find' characters that
+  // must be consumed there is no need to ask any following nodes whether
+  // they are sure to eat any more characters.
+  virtual int EatsAtLeast(int still_to_find, int recursion_depth) = 0;
+  // Emits some quick code that checks whether the preloaded characters match.
+  // Falls through on certain failure, jumps to the label on possible success.
+  // If the node cannot make a quick check it does nothing and returns false.
+  bool EmitQuickCheck(RegExpCompiler* compiler,
+                      Trace* trace,
+                      bool preload_has_checked_bounds,
+                      Label* on_possible_success,
+                      QuickCheckDetails* details_return,
+                      bool fall_through_on_failure);
+  // For a given number of characters this returns a mask and a value.  The
+  // next n characters are anded with the mask and compared with the value.
+  // A comparison failure indicates the node cannot match the next n characters.
+  // A comparison success indicates the node may match.
+  virtual void GetQuickCheckDetails(QuickCheckDetails* details,
+                                    RegExpCompiler* compiler,
+                                    int characters_filled_in,
+                                    bool not_at_start) = 0;
+  static const int kNodeIsTooComplexForGreedyLoops = -1;
+  virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
+  Label* label() { return &label_; }
+  // If non-generic code is generated for a node (ie the node is not at the
+  // start of the trace) then it cannot be reused.  This variable sets a limit
+  // on how often we allow that to happen before we insist on starting a new
+  // trace and generating generic code for a node that can be reused by flushing
+  // the deferred actions in the current trace and generating a goto.
+  static const int kMaxCopiesCodeGenerated = 10;
+
+  NodeInfo* info() { return &info_; }
+
+  void AddSibling(RegExpNode* node) { siblings_.Add(node); }
+
+  // Static version of EnsureSibling that expresses the fact that the
+  // result has the same type as the input.
+  template <class C>
+  static C* EnsureSibling(C* node, NodeInfo* info, bool* cloned) {
+    return static_cast<C*>(node->EnsureSibling(info, cloned));
+  }
+
+  SiblingList* siblings() { return &siblings_; }
+  void set_siblings(SiblingList* other) { siblings_ = *other; }
+
+ protected:
+  enum LimitResult { DONE, CONTINUE };
+  LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
+
+  // Returns a sibling of this node whose interests and assumptions
+  // match the ones in the given node info.  If no sibling exists NULL
+  // is returned.
+  RegExpNode* TryGetSibling(NodeInfo* info);
+
+  // Returns a sibling of this node whose interests match the ones in
+  // the given node info.  The info must not contain any assertions.
+  // If no node exists a new one will be created by cloning the current
+  // node.  The result will always be an instance of the same concrete
+  // class as this node.
+  RegExpNode* EnsureSibling(NodeInfo* info, bool* cloned);
+
+  // Returns a clone of this node initialized using the copy constructor
+  // of its concrete class.  Note that the node may have to be pre-
+  // processed before it is on a usable state.
+  virtual RegExpNode* Clone() = 0;
+
+ private:
+  Label label_;
+  NodeInfo info_;
+  SiblingList siblings_;
+  // This variable keeps track of how many times code has been generated for
+  // this node (in different traces).  We don't keep track of where the
+  // generated code is located unless the code is generated at the start of
+  // a trace, in which case it is generic and can be reused by flushing the
+  // deferred operations in the current trace and generating a goto.
+  int trace_count_;
+};
+
+
+// A simple closed interval.
+class Interval {
+ public:
+  Interval() : from_(kNone), to_(kNone) { }
+  Interval(int from, int to) : from_(from), to_(to) { }
+  Interval Union(Interval that) {
+    if (that.from_ == kNone)
+      return *this;
+    else if (from_ == kNone)
+      return that;
+    else
+      return Interval(Min(from_, that.from_), Max(to_, that.to_));
+  }
+  bool Contains(int value) {
+    return (from_ <= value) && (value <= to_);
+  }
+  bool is_empty() { return from_ == kNone; }
+  int from() { return from_; }
+  int to() { return to_; }
+  static Interval Empty() { return Interval(); }
+  static const int kNone = -1;
+ private:
+  int from_;
+  int to_;
+};
+
+
+class SeqRegExpNode: public RegExpNode {
+ public:
+  explicit SeqRegExpNode(RegExpNode* on_success)
+      : on_success_(on_success) { }
+  RegExpNode* on_success() { return on_success_; }
+  void set_on_success(RegExpNode* node) { on_success_ = node; }
+ private:
+  RegExpNode* on_success_;
+};
+
+
+class ActionNode: public SeqRegExpNode {
+ public:
+  enum Type {
+    SET_REGISTER,
+    INCREMENT_REGISTER,
+    STORE_POSITION,
+    BEGIN_SUBMATCH,
+    POSITIVE_SUBMATCH_SUCCESS,
+    EMPTY_MATCH_CHECK,
+    CLEAR_CAPTURES
+  };
+  static ActionNode* SetRegister(int reg, int val, RegExpNode* on_success);
+  static ActionNode* IncrementRegister(int reg, RegExpNode* on_success);
+  static ActionNode* StorePosition(int reg,
+                                   bool is_capture,
+                                   RegExpNode* on_success);
+  static ActionNode* ClearCaptures(Interval range, RegExpNode* on_success);
+  static ActionNode* BeginSubmatch(int stack_pointer_reg,
+                                   int position_reg,
+                                   RegExpNode* on_success);
+  static ActionNode* PositiveSubmatchSuccess(int stack_pointer_reg,
+                                             int restore_reg,
+                                             int clear_capture_count,
+                                             int clear_capture_from,
+                                             RegExpNode* on_success);
+  static ActionNode* EmptyMatchCheck(int start_register,
+                                     int repetition_register,
+                                     int repetition_limit,
+                                     RegExpNode* on_success);
+  virtual void Accept(NodeVisitor* visitor);
+  virtual void Emit(RegExpCompiler* compiler, Trace* trace);
+  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual void GetQuickCheckDetails(QuickCheckDetails* details,
+                                    RegExpCompiler* compiler,
+                                    int filled_in,
+                                    bool not_at_start) {
+    return on_success()->GetQuickCheckDetails(
+        details, compiler, filled_in, not_at_start);
+  }
+  Type type() { return type_; }
+  // TODO(erikcorry): We should allow some action nodes in greedy loops.
+  virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
+  virtual ActionNode* Clone() { return new ActionNode(*this); }
+
+ private:
+  union {
+    struct {
+      int reg;
+      int value;
+    } u_store_register;
+    struct {
+      int reg;
+    } u_increment_register;
+    struct {
+      int reg;
+      bool is_capture;
+    } u_position_register;
+    struct {
+      int stack_pointer_register;
+      int current_position_register;
+      int clear_register_count;
+      int clear_register_from;
+    } u_submatch;
+    struct {
+      int start_register;
+      int repetition_register;
+      int repetition_limit;
+    } u_empty_match_check;
+    struct {
+      int range_from;
+      int range_to;
+    } u_clear_captures;
+  } data_;
+  ActionNode(Type type, RegExpNode* on_success)
+      : SeqRegExpNode(on_success),
+        type_(type) { }
+  Type type_;
+  friend class DotPrinter;
+};
+
+
+class TextNode: public SeqRegExpNode {
+ public:
+  TextNode(ZoneList<TextElement>* elms,
+           RegExpNode* on_success)
+      : SeqRegExpNode(on_success),
+        elms_(elms) { }
+  TextNode(RegExpCharacterClass* that,
+           RegExpNode* on_success)
+      : SeqRegExpNode(on_success),
+        elms_(new ZoneList<TextElement>(1)) {
+    elms_->Add(TextElement::CharClass(that));
+  }
+  virtual void Accept(NodeVisitor* visitor);
+  virtual void Emit(RegExpCompiler* compiler, Trace* trace);
+  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual void GetQuickCheckDetails(QuickCheckDetails* details,
+                                    RegExpCompiler* compiler,
+                                    int characters_filled_in,
+                                    bool not_at_start);
+  ZoneList<TextElement>* elements() { return elms_; }
+  void MakeCaseIndependent();
+  virtual int GreedyLoopTextLength();
+  virtual TextNode* Clone() {
+    TextNode* result = new TextNode(*this);
+    result->CalculateOffsets();
+    return result;
+  }
+  void CalculateOffsets();
+
+ private:
+  enum TextEmitPassType {
+    NON_ASCII_MATCH,             // Check for characters that can't match.
+    SIMPLE_CHARACTER_MATCH,      // Case-dependent single character check.
+    NON_LETTER_CHARACTER_MATCH,  // Check characters that have no case equivs.
+    CASE_CHARACTER_MATCH,        // Case-independent single character check.
+    CHARACTER_CLASS_MATCH        // Character class.
+  };
+  static bool SkipPass(int pass, bool ignore_case);
+  static const int kFirstRealPass = SIMPLE_CHARACTER_MATCH;
+  static const int kLastPass = CHARACTER_CLASS_MATCH;
+  void TextEmitPass(RegExpCompiler* compiler,
+                    TextEmitPassType pass,
+                    bool preloaded,
+                    Trace* trace,
+                    bool first_element_checked,
+                    int* checked_up_to);
+  int Length();
+  ZoneList<TextElement>* elms_;
+};
+
+
+class AssertionNode: public SeqRegExpNode {
+ public:
+  enum AssertionNodeType {
+    AT_END,
+    AT_START,
+    AT_BOUNDARY,
+    AT_NON_BOUNDARY,
+    AFTER_NEWLINE
+  };
+  static AssertionNode* AtEnd(RegExpNode* on_success) {
+    return new AssertionNode(AT_END, on_success);
+  }
+  static AssertionNode* AtStart(RegExpNode* on_success) {
+    return new AssertionNode(AT_START, on_success);
+  }
+  static AssertionNode* AtBoundary(RegExpNode* on_success) {
+    return new AssertionNode(AT_BOUNDARY, on_success);
+  }
+  static AssertionNode* AtNonBoundary(RegExpNode* on_success) {
+    return new AssertionNode(AT_NON_BOUNDARY, on_success);
+  }
+  static AssertionNode* AfterNewline(RegExpNode* on_success) {
+    return new AssertionNode(AFTER_NEWLINE, on_success);
+  }
+  virtual void Accept(NodeVisitor* visitor);
+  virtual void Emit(RegExpCompiler* compiler, Trace* trace);
+  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual void GetQuickCheckDetails(QuickCheckDetails* details,
+                                    RegExpCompiler* compiler,
+                                    int filled_in,
+                                    bool not_at_start);
+  virtual AssertionNode* Clone() { return new AssertionNode(*this); }
+  AssertionNodeType type() { return type_; }
+ private:
+  AssertionNode(AssertionNodeType t, RegExpNode* on_success)
+      : SeqRegExpNode(on_success), type_(t) { }
+  AssertionNodeType type_;
+};
+
+
+class BackReferenceNode: public SeqRegExpNode {
+ public:
+  BackReferenceNode(int start_reg,
+                    int end_reg,
+                    RegExpNode* on_success)
+      : SeqRegExpNode(on_success),
+        start_reg_(start_reg),
+        end_reg_(end_reg) { }
+  virtual void Accept(NodeVisitor* visitor);
+  int start_register() { return start_reg_; }
+  int end_register() { return end_reg_; }
+  virtual void Emit(RegExpCompiler* compiler, Trace* trace);
+  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual void GetQuickCheckDetails(QuickCheckDetails* details,
+                                    RegExpCompiler* compiler,
+                                    int characters_filled_in,
+                                    bool not_at_start) {
+    return;
+  }
+  virtual BackReferenceNode* Clone() { return new BackReferenceNode(*this); }
+
+ private:
+  int start_reg_;
+  int end_reg_;
+};
+
+
+class EndNode: public RegExpNode {
+ public:
+  enum Action { ACCEPT, BACKTRACK, NEGATIVE_SUBMATCH_SUCCESS };
+  explicit EndNode(Action action) : action_(action) { }
+  virtual void Accept(NodeVisitor* visitor);
+  virtual void Emit(RegExpCompiler* compiler, Trace* trace);
+  virtual int EatsAtLeast(int still_to_find, int recursion_depth) { return 0; }
+  virtual void GetQuickCheckDetails(QuickCheckDetails* details,
+                                    RegExpCompiler* compiler,
+                                    int characters_filled_in,
+                                    bool not_at_start) {
+    // Returning 0 from EatsAtLeast should ensure we never get here.
+    UNREACHABLE();
+  }
+  virtual EndNode* Clone() { return new EndNode(*this); }
+
+ private:
+  Action action_;
+};
+
+
+class NegativeSubmatchSuccess: public EndNode {
+ public:
+  NegativeSubmatchSuccess(int stack_pointer_reg,
+                          int position_reg,
+                          int clear_capture_count,
+                          int clear_capture_start)
+      : EndNode(NEGATIVE_SUBMATCH_SUCCESS),
+        stack_pointer_register_(stack_pointer_reg),
+        current_position_register_(position_reg),
+        clear_capture_count_(clear_capture_count),
+        clear_capture_start_(clear_capture_start) { }
+  virtual void Emit(RegExpCompiler* compiler, Trace* trace);
+
+ private:
+  int stack_pointer_register_;
+  int current_position_register_;
+  int clear_capture_count_;
+  int clear_capture_start_;
+};
+
+
+class Guard: public ZoneObject {
+ public:
+  enum Relation { LT, GEQ };
+  Guard(int reg, Relation op, int value)
+      : reg_(reg),
+        op_(op),
+        value_(value) { }
+  int reg() { return reg_; }
+  Relation op() { return op_; }
+  int value() { return value_; }
+
+ private:
+  int reg_;
+  Relation op_;
+  int value_;
+};
+
+
+class GuardedAlternative {
+ public:
+  explicit GuardedAlternative(RegExpNode* node) : node_(node), guards_(NULL) { }
+  void AddGuard(Guard* guard);
+  RegExpNode* node() { return node_; }
+  void set_node(RegExpNode* node) { node_ = node; }
+  ZoneList<Guard*>* guards() { return guards_; }
+
+ private:
+  RegExpNode* node_;
+  ZoneList<Guard*>* guards_;
+};
+
+
+class AlternativeGeneration;
+
+
+class ChoiceNode: public RegExpNode {
+ public:
+  explicit ChoiceNode(int expected_size)
+      : alternatives_(new ZoneList<GuardedAlternative>(expected_size)),
+        table_(NULL),
+        not_at_start_(false),
+        being_calculated_(false) { }
+  virtual void Accept(NodeVisitor* visitor);
+  void AddAlternative(GuardedAlternative node) { alternatives()->Add(node); }
+  ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
+  DispatchTable* GetTable(bool ignore_case);
+  virtual void Emit(RegExpCompiler* compiler, Trace* trace);
+  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  int EatsAtLeastHelper(int still_to_find,
+                        int recursion_depth,
+                        RegExpNode* ignore_this_node);
+  virtual void GetQuickCheckDetails(QuickCheckDetails* details,
+                                    RegExpCompiler* compiler,
+                                    int characters_filled_in,
+                                    bool not_at_start);
+  virtual ChoiceNode* Clone() { return new ChoiceNode(*this); }
+
+  bool being_calculated() { return being_calculated_; }
+  bool not_at_start() { return not_at_start_; }
+  void set_not_at_start() { not_at_start_ = true; }
+  void set_being_calculated(bool b) { being_calculated_ = b; }
+  virtual bool try_to_emit_quick_check_for_alternative(int i) { return true; }
+
+ protected:
+  int GreedyLoopTextLength(GuardedAlternative* alternative);
+  ZoneList<GuardedAlternative>* alternatives_;
+
+ private:
+  friend class DispatchTableConstructor;
+  friend class Analysis;
+  void GenerateGuard(RegExpMacroAssembler* macro_assembler,
+                     Guard* guard,
+                     Trace* trace);
+  int CalculatePreloadCharacters(RegExpCompiler* compiler);
+  void EmitOutOfLineContinuation(RegExpCompiler* compiler,
+                                 Trace* trace,
+                                 GuardedAlternative alternative,
+                                 AlternativeGeneration* alt_gen,
+                                 int preload_characters,
+                                 bool next_expects_preload);
+  DispatchTable* table_;
+  // If true, this node is never checked at the start of the input.
+  // Allows a new trace to start with at_start() set to false.
+  bool not_at_start_;
+  bool being_calculated_;
+};
+
+
+class NegativeLookaheadChoiceNode: public ChoiceNode {
+ public:
+  explicit NegativeLookaheadChoiceNode(GuardedAlternative this_must_fail,
+                                       GuardedAlternative then_do_this)
+      : ChoiceNode(2) {
+    AddAlternative(this_must_fail);
+    AddAlternative(then_do_this);
+  }
+  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual void GetQuickCheckDetails(QuickCheckDetails* details,
+                                    RegExpCompiler* compiler,
+                                    int characters_filled_in,
+                                    bool not_at_start);
+  // For a negative lookahead we don't emit the quick check for the
+  // alternative that is expected to fail.  This is because quick check code
+  // starts by loading enough characters for the alternative that takes fewest
+  // characters, but on a negative lookahead the negative branch did not take
+  // part in that calculation (EatsAtLeast) so the assumptions don't hold.
+  virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
+};
+
+
+class LoopChoiceNode: public ChoiceNode {
+ public:
+  explicit LoopChoiceNode(bool body_can_be_zero_length)
+      : ChoiceNode(2),
+        loop_node_(NULL),
+        continue_node_(NULL),
+        body_can_be_zero_length_(body_can_be_zero_length) { }
+  void AddLoopAlternative(GuardedAlternative alt);
+  void AddContinueAlternative(GuardedAlternative alt);
+  virtual void Emit(RegExpCompiler* compiler, Trace* trace);
+  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual void GetQuickCheckDetails(QuickCheckDetails* details,
+                                    RegExpCompiler* compiler,
+                                    int characters_filled_in,
+                                    bool not_at_start);
+  virtual LoopChoiceNode* Clone() { return new LoopChoiceNode(*this); }
+  RegExpNode* loop_node() { return loop_node_; }
+  RegExpNode* continue_node() { return continue_node_; }
+  bool body_can_be_zero_length() { return body_can_be_zero_length_; }
+  virtual void Accept(NodeVisitor* visitor);
+
+ private:
+  // AddAlternative is made private for loop nodes because alternatives
+  // should not be added freely, we need to keep track of which node
+  // goes back to the node itself.
+  void AddAlternative(GuardedAlternative node) {
+    ChoiceNode::AddAlternative(node);
+  }
+
+  RegExpNode* loop_node_;
+  RegExpNode* continue_node_;
+  bool body_can_be_zero_length_;
+};
+
+
+// There are many ways to generate code for a node.  This class encapsulates
+// the current way we should be generating.  In other words it encapsulates
+// the current state of the code generator.  The effect of this is that we
+// generate code for paths that the matcher can take through the regular
+// expression.  A given node in the regexp can be code-generated several times
+// as it can be part of several traces.  For example for the regexp:
+// /foo(bar|ip)baz/ the code to match baz will be generated twice, once as part
+// of the foo-bar-baz trace and once as part of the foo-ip-baz trace.  The code
+// to match foo is generated only once (the traces have a common prefix).  The
+// code to store the capture is deferred and generated (twice) after the places
+// where baz has been matched.
+class Trace {
+ public:
+  // A value for a property that is either known to be true, know to be false,
+  // or not known.
+  enum TriBool {
+    UNKNOWN = -1, FALSE = 0, TRUE = 1
+  };
+
+  class DeferredAction {
+   public:
+    DeferredAction(ActionNode::Type type, int reg)
+        : type_(type), reg_(reg), next_(NULL) { }
+    DeferredAction* next() { return next_; }
+    bool Mentions(int reg);
+    int reg() { return reg_; }
+    ActionNode::Type type() { return type_; }
+   private:
+    ActionNode::Type type_;
+    int reg_;
+    DeferredAction* next_;
+    friend class Trace;
+  };
+
+  class DeferredCapture : public DeferredAction {
+   public:
+    DeferredCapture(int reg, bool is_capture, Trace* trace)
+        : DeferredAction(ActionNode::STORE_POSITION, reg),
+          cp_offset_(trace->cp_offset()),
+          is_capture_(is_capture) { }
+    int cp_offset() { return cp_offset_; }
+    bool is_capture() { return is_capture_; }
+   private:
+    int cp_offset_;
+    bool is_capture_;
+    void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
+  };
+
+  class DeferredSetRegister : public DeferredAction {
+   public:
+    DeferredSetRegister(int reg, int value)
+        : DeferredAction(ActionNode::SET_REGISTER, reg),
+          value_(value) { }
+    int value() { return value_; }
+   private:
+    int value_;
+  };
+
+  class DeferredClearCaptures : public DeferredAction {
+   public:
+    explicit DeferredClearCaptures(Interval range)
+        : DeferredAction(ActionNode::CLEAR_CAPTURES, -1),
+          range_(range) { }
+    Interval range() { return range_; }
+   private:
+    Interval range_;
+  };
+
+  class DeferredIncrementRegister : public DeferredAction {
+   public:
+    explicit DeferredIncrementRegister(int reg)
+        : DeferredAction(ActionNode::INCREMENT_REGISTER, reg) { }
+  };
+
+  Trace()
+      : cp_offset_(0),
+        actions_(NULL),
+        backtrack_(NULL),
+        stop_node_(NULL),
+        loop_label_(NULL),
+        characters_preloaded_(0),
+        bound_checked_up_to_(0),
+        flush_budget_(100),
+        at_start_(UNKNOWN) { }
+
+  // End the trace.  This involves flushing the deferred actions in the trace
+  // and pushing a backtrack location onto the backtrack stack.  Once this is
+  // done we can start a new trace or go to one that has already been
+  // generated.
+  void Flush(RegExpCompiler* compiler, RegExpNode* successor);
+  int cp_offset() { return cp_offset_; }
+  DeferredAction* actions() { return actions_; }
+  // A trivial trace is one that has no deferred actions or other state that
+  // affects the assumptions used when generating code.  There is no recorded
+  // backtrack location in a trivial trace, so with a trivial trace we will
+  // generate code that, on a failure to match, gets the backtrack location
+  // from the backtrack stack rather than using a direct jump instruction.  We
+  // always start code generation with a trivial trace and non-trivial traces
+  // are created as we emit code for nodes or add to the list of deferred
+  // actions in the trace.  The location of the code generated for a node using
+  // a trivial trace is recorded in a label in the node so that gotos can be
+  // generated to that code.
+  bool is_trivial() {
+    return backtrack_ == NULL &&
+           actions_ == NULL &&
+           cp_offset_ == 0 &&
+           characters_preloaded_ == 0 &&
+           bound_checked_up_to_ == 0 &&
+           quick_check_performed_.characters() == 0 &&
+           at_start_ == UNKNOWN;
+  }
+  TriBool at_start() { return at_start_; }
+  void set_at_start(bool at_start) { at_start_ = at_start ? TRUE : FALSE; }
+  Label* backtrack() { return backtrack_; }
+  Label* loop_label() { return loop_label_; }
+  RegExpNode* stop_node() { return stop_node_; }
+  int characters_preloaded() { return characters_preloaded_; }
+  int bound_checked_up_to() { return bound_checked_up_to_; }
+  int flush_budget() { return flush_budget_; }
+  QuickCheckDetails* quick_check_performed() { return &quick_check_performed_; }
+  bool mentions_reg(int reg);
+  // Returns true if a deferred position store exists to the specified
+  // register and stores the offset in the out-parameter.  Otherwise
+  // returns false.
+  bool GetStoredPosition(int reg, int* cp_offset);
+  // These set methods and AdvanceCurrentPositionInTrace should be used only on
+  // new traces - the intention is that traces are immutable after creation.
+  void add_action(DeferredAction* new_action) {
+    ASSERT(new_action->next_ == NULL);
+    new_action->next_ = actions_;
+    actions_ = new_action;
+  }
+  void set_backtrack(Label* backtrack) { backtrack_ = backtrack; }
+  void set_stop_node(RegExpNode* node) { stop_node_ = node; }
+  void set_loop_label(Label* label) { loop_label_ = label; }
+  void set_characters_preloaded(int cpre) { characters_preloaded_ = cpre; }
+  void set_bound_checked_up_to(int to) { bound_checked_up_to_ = to; }
+  void set_flush_budget(int to) { flush_budget_ = to; }
+  void set_quick_check_performed(QuickCheckDetails* d) {
+    quick_check_performed_ = *d;
+  }
+  void InvalidateCurrentCharacter();
+  void AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler);
+ private:
+  int FindAffectedRegisters(OutSet* affected_registers);
+  void PerformDeferredActions(RegExpMacroAssembler* macro,
+                               int max_register,
+                               OutSet& affected_registers,
+                               OutSet* registers_to_pop,
+                               OutSet* registers_to_clear);
+  void RestoreAffectedRegisters(RegExpMacroAssembler* macro,
+                                int max_register,
+                                OutSet& registers_to_pop,
+                                OutSet& registers_to_clear);
+  int cp_offset_;
+  DeferredAction* actions_;
+  Label* backtrack_;
+  RegExpNode* stop_node_;
+  Label* loop_label_;
+  int characters_preloaded_;
+  int bound_checked_up_to_;
+  QuickCheckDetails quick_check_performed_;
+  int flush_budget_;
+  TriBool at_start_;
+};
+
+
+class NodeVisitor {
+ public:
+  virtual ~NodeVisitor() { }
+#define DECLARE_VISIT(Type)                                          \
+  virtual void Visit##Type(Type##Node* that) = 0;
+FOR_EACH_NODE_TYPE(DECLARE_VISIT)
+#undef DECLARE_VISIT
+  virtual void VisitLoopChoice(LoopChoiceNode* that) { VisitChoice(that); }
+};
+
+
+// Node visitor used to add the start set of the alternatives to the
+// dispatch table of a choice node.
+class DispatchTableConstructor: public NodeVisitor {
+ public:
+  DispatchTableConstructor(DispatchTable* table, bool ignore_case)
+      : table_(table),
+        choice_index_(-1),
+        ignore_case_(ignore_case) { }
+
+  void BuildTable(ChoiceNode* node);
+
+  void AddRange(CharacterRange range) {
+    table()->AddRange(range, choice_index_);
+  }
+
+  void AddInverse(ZoneList<CharacterRange>* ranges);
+
+#define DECLARE_VISIT(Type)                                          \
+  virtual void Visit##Type(Type##Node* that);
+FOR_EACH_NODE_TYPE(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  DispatchTable* table() { return table_; }
+  void set_choice_index(int value) { choice_index_ = value; }
+
+ protected:
+  DispatchTable* table_;
+  int choice_index_;
+  bool ignore_case_;
+};
+
+
+// Assertion propagation moves information about assertions such as
+// \b to the affected nodes.  For instance, in /.\b./ information must
+// be propagated to the first '.' that whatever follows needs to know
+// if it matched a word or a non-word, and to the second '.' that it
+// has to check if it succeeds a word or non-word.  In this case the
+// result will be something like:
+//
+//   +-------+        +------------+
+//   |   .   |        |      .     |
+//   +-------+  --->  +------------+
+//   | word? |        | check word |
+//   +-------+        +------------+
+class Analysis: public NodeVisitor {
+ public:
+  explicit Analysis(bool ignore_case)
+      : ignore_case_(ignore_case), error_message_(NULL) { }
+  void EnsureAnalyzed(RegExpNode* node);
+
+#define DECLARE_VISIT(Type)                                          \
+  virtual void Visit##Type(Type##Node* that);
+FOR_EACH_NODE_TYPE(DECLARE_VISIT)
+#undef DECLARE_VISIT
+  virtual void VisitLoopChoice(LoopChoiceNode* that);
+
+  bool has_failed() { return error_message_ != NULL; }
+  const char* error_message() {
+    ASSERT(error_message_ != NULL);
+    return error_message_;
+  }
+  void fail(const char* error_message) {
+    error_message_ = error_message;
+  }
+ private:
+  bool ignore_case_;
+  const char* error_message_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
+};
+
+
+struct RegExpCompileData {
+  RegExpCompileData()
+    : tree(NULL),
+      node(NULL),
+      simple(true),
+      contains_anchor(false),
+      capture_count(0) { }
+  RegExpTree* tree;
+  RegExpNode* node;
+  bool simple;
+  bool contains_anchor;
+  Handle<String> error;
+  int capture_count;
+};
+
+
+class RegExpEngine: public AllStatic {
+ public:
+  struct CompilationResult {
+    explicit CompilationResult(const char* error_message)
+        : error_message(error_message),
+          code(Heap::the_hole_value()),
+          num_registers(0) {}
+    CompilationResult(Object* code, int registers)
+      : error_message(NULL),
+        code(code),
+        num_registers(registers) {}
+    const char* error_message;
+    Object* code;
+    int num_registers;
+  };
+
+  static CompilationResult Compile(RegExpCompileData* input,
+                                   bool ignore_case,
+                                   bool multiline,
+                                   Handle<String> pattern,
+                                   bool is_ascii);
+
+  static void DotPrint(const char* label, RegExpNode* node, bool ignore_case);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_JSREGEXP_H_
diff --git a/src/jump-target-inl.h b/src/jump-target-inl.h
new file mode 100644
index 0000000..1f0676d
--- /dev/null
+++ b/src/jump-target-inl.h
@@ -0,0 +1,49 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JUMP_TARGET_INL_H_
+#define V8_JUMP_TARGET_INL_H_
+
+namespace v8 {
+namespace internal {
+
+CodeGenerator* JumpTarget::cgen() {
+  return CodeGeneratorScope::Current();
+}
+
+void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
+  entry_frame_->elements_[index].clear_copied();
+  if (target->is_register()) {
+    entry_frame_->set_register_location(target->reg(), index);
+  } else if (target->is_copy()) {
+    entry_frame_->elements_[target->index()].set_copied();
+  }
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_JUMP_TARGET_INL_H_
diff --git a/src/jump-target.cc b/src/jump-target.cc
new file mode 100644
index 0000000..3782f92
--- /dev/null
+++ b/src/jump-target.cc
@@ -0,0 +1,383 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+bool JumpTarget::compiling_deferred_code_ = false;
+
+
+void JumpTarget::Unuse() {
+  reaching_frames_.Clear();
+  merge_labels_.Clear();
+  entry_frame_ = NULL;
+  entry_label_.Unuse();
+}
+
+
+void JumpTarget::ComputeEntryFrame() {
+  // Given: a collection of frames reaching by forward CFG edges and
+  // the directionality of the block.  Compute: an entry frame for the
+  // block.
+
+  Counters::compute_entry_frame.Increment();
+#ifdef DEBUG
+  if (compiling_deferred_code_) {
+    ASSERT(reaching_frames_.length() > 1);
+    VirtualFrame* frame = reaching_frames_[0];
+    bool all_identical = true;
+    for (int i = 1; i < reaching_frames_.length(); i++) {
+      if (!frame->Equals(reaching_frames_[i])) {
+        all_identical = false;
+        break;
+      }
+    }
+    ASSERT(!all_identical || all_identical);
+  }
+#endif
+
+  // Choose an initial frame.
+  VirtualFrame* initial_frame = reaching_frames_[0];
+
+  // A list of pointers to frame elements in the entry frame.  NULL
+  // indicates that the element has not yet been determined.
+  int length = initial_frame->element_count();
+  ZoneList<FrameElement*> elements(length);
+
+  // Initially populate the list of elements based on the initial
+  // frame.
+  for (int i = 0; i < length; i++) {
+    FrameElement element = initial_frame->elements_[i];
+    // We do not allow copies or constants in bidirectional frames.
+    if (direction_ == BIDIRECTIONAL) {
+      if (element.is_constant() || element.is_copy()) {
+        elements.Add(NULL);
+        continue;
+      }
+    }
+    elements.Add(&initial_frame->elements_[i]);
+  }
+
+  // Compute elements based on the other reaching frames.
+  if (reaching_frames_.length() > 1) {
+    for (int i = 0; i < length; i++) {
+      FrameElement* element = elements[i];
+      for (int j = 1; j < reaching_frames_.length(); j++) {
+        // Element computation is monotonic: new information will not
+        // change our decision about undetermined or invalid elements.
+        if (element == NULL || !element->is_valid()) break;
+
+        element = element->Combine(&reaching_frames_[j]->elements_[i]);
+      }
+      elements[i] = element;
+    }
+  }
+
+  // Build the new frame.  A freshly allocated frame has memory elements
+  // for the parameters and some platform-dependent elements (e.g.,
+  // return address).  Replace those first.
+  entry_frame_ = new VirtualFrame();
+  int index = 0;
+  for (; index < entry_frame_->element_count(); index++) {
+    FrameElement* target = elements[index];
+    // If the element is determined, set it now.  Count registers.  Mark
+    // elements as copied exactly when they have a copy.  Undetermined
+    // elements are initially recorded as if in memory.
+    if (target != NULL) {
+      entry_frame_->elements_[index] = *target;
+      InitializeEntryElement(index, target);
+    }
+  }
+  // Then fill in the rest of the frame with new elements.
+  for (; index < length; index++) {
+    FrameElement* target = elements[index];
+    if (target == NULL) {
+      entry_frame_->elements_.Add(FrameElement::MemoryElement());
+    } else {
+      entry_frame_->elements_.Add(*target);
+      InitializeEntryElement(index, target);
+    }
+  }
+
+  // Allocate any still-undetermined frame elements to registers or
+  // memory, from the top down.
+  for (int i = length - 1; i >= 0; i--) {
+    if (elements[i] == NULL) {
+      // Loop over all the reaching frames to check whether the element
+      // is synced on all frames and to count the registers it occupies.
+      bool is_synced = true;
+      RegisterFile candidate_registers;
+      int best_count = kMinInt;
+      int best_reg_num = RegisterAllocator::kInvalidRegister;
+
+      for (int j = 0; j < reaching_frames_.length(); j++) {
+        FrameElement element = reaching_frames_[j]->elements_[i];
+        is_synced = is_synced && element.is_synced();
+        if (element.is_register() && !entry_frame_->is_used(element.reg())) {
+          // Count the register occurrence and remember it if better
+          // than the previous best.
+          int num = RegisterAllocator::ToNumber(element.reg());
+          candidate_registers.Use(num);
+          if (candidate_registers.count(num) > best_count) {
+            best_count = candidate_registers.count(num);
+            best_reg_num = num;
+          }
+        }
+      }
+
+      // If the value is synced on all frames, put it in memory.  This
+      // costs nothing at the merge code but will incur a
+      // memory-to-register move when the value is needed later.
+      if (is_synced) {
+        // Already recorded as a memory element.
+        continue;
+      }
+
+      // Try to put it in a register.  If there was no best choice
+      // consider any free register.
+      if (best_reg_num == RegisterAllocator::kInvalidRegister) {
+        for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) {
+          if (!entry_frame_->is_used(j)) {
+            best_reg_num = j;
+            break;
+          }
+        }
+      }
+
+      if (best_reg_num != RegisterAllocator::kInvalidRegister) {
+        // If there was a register choice, use it.  Preserve the copied
+        // flag on the element.
+        bool is_copied = entry_frame_->elements_[i].is_copied();
+        Register reg = RegisterAllocator::ToRegister(best_reg_num);
+        entry_frame_->elements_[i] =
+            FrameElement::RegisterElement(reg,
+                                          FrameElement::NOT_SYNCED);
+        if (is_copied) entry_frame_->elements_[i].set_copied();
+        entry_frame_->set_register_location(reg, i);
+      }
+    }
+  }
+
+  // The stack pointer is at the highest synced element or the base of
+  // the expression stack.
+  int stack_pointer = length - 1;
+  while (stack_pointer >= entry_frame_->expression_base_index() &&
+         !entry_frame_->elements_[stack_pointer].is_synced()) {
+    stack_pointer--;
+  }
+  entry_frame_->stack_pointer_ = stack_pointer;
+}
+
+
+void JumpTarget::Jump() {
+  DoJump();
+}
+
+
+void JumpTarget::Jump(Result* arg) {
+  ASSERT(cgen()->has_valid_frame());
+
+  cgen()->frame()->Push(arg);
+  DoJump();
+}
+
+
+void JumpTarget::Branch(Condition cc, Hint hint) {
+  DoBranch(cc, hint);
+}
+
+
+#ifdef DEBUG
+#define DECLARE_ARGCHECK_VARS(name)                                \
+  Result::Type name##_type = name->type();                         \
+  Register name##_reg = name->is_register() ? name->reg() : no_reg
+
+#define ASSERT_ARGCHECK(name)                                \
+  ASSERT(name->type() == name##_type);                       \
+  ASSERT(!name->is_register() || name->reg().is(name##_reg))
+
+#else
+#define DECLARE_ARGCHECK_VARS(name) do {} while (false)
+
+#define ASSERT_ARGCHECK(name) do {} while (false)
+#endif
+
+void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
+  ASSERT(cgen()->has_valid_frame());
+
+  // We want to check that non-frame registers at the call site stay in
+  // the same registers on the fall-through branch.
+  DECLARE_ARGCHECK_VARS(arg);
+
+  cgen()->frame()->Push(arg);
+  DoBranch(cc, hint);
+  *arg = cgen()->frame()->Pop();
+
+  ASSERT_ARGCHECK(arg);
+}
+
+
+void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
+  ASSERT(cgen()->has_valid_frame());
+
+  int count = cgen()->frame()->height() - expected_height_;
+  if (count > 0) {
+    // We negate and branch here rather than using DoBranch's negate
+    // and branch.  This gives us a hook to remove statement state
+    // from the frame.
+    JumpTarget fall_through;
+    // Branch to fall through will not negate, because it is a
+    // forward-only target.
+    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
+    Jump(arg);  // May emit merge code here.
+    fall_through.Bind();
+  } else {
+    DECLARE_ARGCHECK_VARS(arg);
+    cgen()->frame()->Push(arg);
+    DoBranch(cc, hint);
+    *arg = cgen()->frame()->Pop();
+    ASSERT_ARGCHECK(arg);
+  }
+}
+
+#undef DECLARE_ARGCHECK_VARS
+#undef ASSERT_ARGCHECK
+
+
+void JumpTarget::Bind() {
+  DoBind();
+}
+
+
+void JumpTarget::Bind(Result* arg) {
+  if (cgen()->has_valid_frame()) {
+    cgen()->frame()->Push(arg);
+  }
+  DoBind();
+  *arg = cgen()->frame()->Pop();
+}
+
+
+void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
+  ASSERT(reaching_frames_.length() == merge_labels_.length());
+  ASSERT(entry_frame_ == NULL);
+  Label fresh;
+  merge_labels_.Add(fresh);
+  reaching_frames_.Add(frame);
+}
+
+
+// -------------------------------------------------------------------------
+// BreakTarget implementation.
+
+void BreakTarget::set_direction(Directionality direction) {
+  JumpTarget::set_direction(direction);
+  ASSERT(cgen()->has_valid_frame());
+  expected_height_ = cgen()->frame()->height();
+}
+
+
+void BreakTarget::CopyTo(BreakTarget* destination) {
+  ASSERT(destination != NULL);
+  destination->direction_ = direction_;
+  destination->reaching_frames_.Rewind(0);
+  destination->reaching_frames_.AddAll(reaching_frames_);
+  destination->merge_labels_.Rewind(0);
+  destination->merge_labels_.AddAll(merge_labels_);
+  destination->entry_frame_ = entry_frame_;
+  destination->entry_label_ = entry_label_;
+  destination->expected_height_ = expected_height_;
+}
+
+
+void BreakTarget::Branch(Condition cc, Hint hint) {
+  ASSERT(cgen()->has_valid_frame());
+
+  int count = cgen()->frame()->height() - expected_height_;
+  if (count > 0) {
+    // We negate and branch here rather than using DoBranch's negate
+    // and branch.  This gives us a hook to remove statement state
+    // from the frame.
+    JumpTarget fall_through;
+    // Branch to fall through will not negate, because it is a
+    // forward-only target.
+    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
+    Jump();  // May emit merge code here.
+    fall_through.Bind();
+  } else {
+    DoBranch(cc, hint);
+  }
+}
+
+
+// -------------------------------------------------------------------------
+// ShadowTarget implementation.
+
+ShadowTarget::ShadowTarget(BreakTarget* shadowed) {
+  ASSERT(shadowed != NULL);
+  other_target_ = shadowed;
+
+#ifdef DEBUG
+  is_shadowing_ = true;
+#endif
+  // While shadowing this shadow target saves the state of the original.
+  shadowed->CopyTo(this);
+
+  // The original's state is reset.
+  shadowed->Unuse();
+  ASSERT(cgen()->has_valid_frame());
+  shadowed->set_expected_height(cgen()->frame()->height());
+}
+
+
+void ShadowTarget::StopShadowing() {
+  ASSERT(is_shadowing_);
+
+  // The states of this target, which was shadowed, and the original
+  // target, which was shadowing, are swapped.
+  BreakTarget temp;
+  other_target_->CopyTo(&temp);
+  CopyTo(other_target_);
+  temp.CopyTo(this);
+  temp.Unuse();
+
+#ifdef DEBUG
+  is_shadowing_ = false;
+#endif
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/jump-target.h b/src/jump-target.h
new file mode 100644
index 0000000..0c42f1b
--- /dev/null
+++ b/src/jump-target.h
@@ -0,0 +1,277 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JUMP_TARGET_H_
+#define V8_JUMP_TARGET_H_
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class FrameElement;
+class Result;
+class VirtualFrame;
+
+// -------------------------------------------------------------------------
+// Jump targets
+//
+// A jump target is an abstraction of a basic-block entry in generated
+// code.  It collects all the virtual frames reaching the block by
+// forward jumps and pairs them with labels for the merge code along
+// all forward-reaching paths.  When bound, an expected frame for the
+// block is determined and code is generated to merge to the expected
+// frame.  For backward jumps, the merge code is generated at the edge
+// leaving the predecessor block.
+//
+// A jump target must have been reached via control flow (either by
+// jumping, branching, or falling through) at the time it is bound.
+// In particular, this means that at least one of the control-flow
+// graph edges reaching the target must be a forward edge.
+
+class JumpTarget : public ZoneObject {  // Shadows are dynamically allocated.
+ public:
+  // Forward-only jump targets can only be reached by forward CFG edges.
+  enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
+
+  // Construct a jump target used to generate code and to provide
+  // access to a current frame.
+  explicit JumpTarget(Directionality direction)
+      : direction_(direction),
+        reaching_frames_(0),
+        merge_labels_(0),
+        entry_frame_(NULL) {
+  }
+
+  // Construct a jump target.
+  JumpTarget()
+      : direction_(FORWARD_ONLY),
+        reaching_frames_(0),
+        merge_labels_(0),
+        entry_frame_(NULL) {
+  }
+
+  virtual ~JumpTarget() {}
+
+  // Set the direction of the jump target.
+  virtual void set_direction(Directionality direction) {
+    direction_ = direction;
+  }
+
+  // Treat the jump target as a fresh one.  The state is reset.
+  void Unuse();
+
+  inline CodeGenerator* cgen();
+
+  Label* entry_label() { return &entry_label_; }
+
+  VirtualFrame* entry_frame() const { return entry_frame_; }
+  void set_entry_frame(VirtualFrame* frame) {
+    entry_frame_ = frame;
+  }
+
+  // Predicates testing the state of the encapsulated label.
+  bool is_bound() const { return entry_label_.is_bound(); }
+  bool is_linked() const {
+    return !is_bound() && !reaching_frames_.is_empty();
+  }
+  bool is_unused() const {
+    // This is !is_bound() && !is_linked().
+    return !is_bound() && reaching_frames_.is_empty();
+  }
+
+  // Emit a jump to the target.  There must be a current frame at the
+  // jump and there will be no current frame after the jump.
+  virtual void Jump();
+  virtual void Jump(Result* arg);
+
+  // Emit a conditional branch to the target.  There must be a current
+  // frame at the branch.  The current frame will fall through to the
+  // code after the branch.
+  virtual void Branch(Condition cc, Hint hint = no_hint);
+  virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
+
+  // Bind a jump target.  If there is no current frame at the binding
+  // site, there must be at least one frame reaching via a forward
+  // jump.
+  virtual void Bind();
+  virtual void Bind(Result* arg);
+
+  // Emit a call to a jump target.  There must be a current frame at
+  // the call.  The frame at the target is the same as the current
+  // frame except for an extra return address on top of it.  The frame
+  // after the call is the same as the frame before the call.
+  void Call();
+
+  static void set_compiling_deferred_code(bool flag) {
+    compiling_deferred_code_ = flag;
+  }
+
+ protected:
+  // Directionality flag set at initialization time.
+  Directionality direction_;
+
+  // A list of frames reaching this block via forward jumps.
+  ZoneList<VirtualFrame*> reaching_frames_;
+
+  // A parallel list of labels for merge code.
+  ZoneList<Label> merge_labels_;
+
+  // The frame used on entry to the block and expected at backward
+  // jumps to the block.  Set when the jump target is bound, but may
+  // or may not be set for forward-only blocks.
+  VirtualFrame* entry_frame_;
+
+  // The actual entry label of the block.
+  Label entry_label_;
+
+  // Implementations of Jump, Branch, and Bind with all arguments and
+  // return values using the virtual frame.
+  void DoJump();
+  void DoBranch(Condition cc, Hint hint);
+  void DoBind();
+
+ private:
+  static bool compiling_deferred_code_;
+
+  // Add a virtual frame reaching this labeled block via a forward jump,
+  // and a corresponding merge code label.
+  void AddReachingFrame(VirtualFrame* frame);
+
+  // Perform initialization required during entry frame computation
+  // after setting the virtual frame element at index in frame to be
+  // target.
+  inline void InitializeEntryElement(int index, FrameElement* target);
+
+  // Compute a frame to use for entry to this block.
+  void ComputeEntryFrame();
+
+  DISALLOW_COPY_AND_ASSIGN(JumpTarget);
+};
+
+
+// -------------------------------------------------------------------------
+// Break targets
+//
+// A break target is a jump target that can be used to break out of a
+// statement that keeps extra state on the stack (eg, for/in or
+// try/finally).  They know the expected stack height at the target
+// and will drop state from nested statements as part of merging.
+//
+// Break targets are used for return, break, and continue targets.
+
+class BreakTarget : public JumpTarget {
+ public:
+  // Construct a break target.
+  BreakTarget() {}
+
+  virtual ~BreakTarget() {}
+
+  // Set the direction of the break target.
+  virtual void set_direction(Directionality direction);
+
+  // Copy the state of this break target to the destination.  The
+  // lists of forward-reaching frames and merge-point labels are
+  // copied.  All virtual frame pointers are copied, not the
+  // pointed-to frames.  The previous state of the destination is
+  // overwritten, without deallocating pointed-to virtual frames.
+  void CopyTo(BreakTarget* destination);
+
+  // Emit a jump to the target.  There must be a current frame at the
+  // jump and there will be no current frame after the jump.
+  virtual void Jump();
+  virtual void Jump(Result* arg);
+
+  // Emit a conditional branch to the target.  There must be a current
+  // frame at the branch.  The current frame will fall through to the
+  // code after the branch.
+  virtual void Branch(Condition cc, Hint hint = no_hint);
+  virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
+
+  // Bind a break target.  If there is no current frame at the binding
+  // site, there must be at least one frame reaching via a forward
+  // jump.
+  virtual void Bind();
+  virtual void Bind(Result* arg);
+
+  // Setter for expected height.
+  void set_expected_height(int expected) { expected_height_ = expected; }
+
+ private:
+  // The expected height of the expression stack where the target will
+  // be bound, statically known at initialization time.
+  int expected_height_;
+
+  DISALLOW_COPY_AND_ASSIGN(BreakTarget);
+};
+
+
+// -------------------------------------------------------------------------
+// Shadow break targets
+//
+// A shadow break target represents a break target that is temporarily
+// shadowed by another one (represented by the original during
+// shadowing).  They are used to catch jumps to labels in certain
+// contexts, e.g. try blocks.  After shadowing ends, the formerly
+// shadowed target is again represented by the original and the
+// ShadowTarget can be used as a jump target in its own right,
+// representing the formerly shadowing target.
+
+class ShadowTarget : public BreakTarget {
+ public:
+  // Construct a shadow jump target.  After construction the shadow
+  // target object holds the state of the original target, and the
+  // original target is actually a fresh one that intercepts control
+  // flow intended for the shadowed one.
+  explicit ShadowTarget(BreakTarget* shadowed);
+
+  virtual ~ShadowTarget() {}
+
+  // End shadowing.  After shadowing ends, the original jump target
+  // again gives access to the formerly shadowed target and the shadow
+  // target object gives access to the formerly shadowing target.
+  void StopShadowing();
+
+  // During shadowing, the currently shadowing target.  After
+  // shadowing, the target that was shadowed.
+  BreakTarget* other_target() const { return other_target_; }
+
+ private:
+  // During shadowing, the currently shadowing target.  After
+  // shadowing, the target that was shadowed.
+  BreakTarget* other_target_;
+
+#ifdef DEBUG
+  bool is_shadowing_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(ShadowTarget);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_JUMP_TARGET_H_
diff --git a/src/list-inl.h b/src/list-inl.h
new file mode 100644
index 0000000..e41db11
--- /dev/null
+++ b/src/list-inl.h
@@ -0,0 +1,166 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIST_INL_H_
+#define V8_LIST_INL_H_
+
+#include "list.h"
+
+namespace v8 {
+namespace internal {
+
+
+template<typename T, class P>
+void List<T, P>::Add(const T& element) {
+  if (length_ < capacity_) {
+    data_[length_++] = element;
+  } else {
+    List<T, P>::ResizeAdd(element);
+  }
+}
+
+
+template<typename T, class P>
+void List<T, P>::AddAll(const List<T, P>& other) {
+  int result_length = length_ + other.length_;
+  if (capacity_ < result_length) Resize(result_length);
+  for (int i = 0; i < other.length_; i++) {
+    data_[length_ + i] = other.data_[i];
+  }
+  length_ = result_length;
+}
+
+
+// Use two layers of inlining so that the non-inlined function can
+// use the same implementation as the inlined version.
+template<typename T, class P>
+void List<T, P>::ResizeAdd(const T& element) {
+  ResizeAddInternal(element);
+}
+
+
+template<typename T, class P>
+void List<T, P>::ResizeAddInternal(const T& element) {
+  ASSERT(length_ >= capacity_);
+  // Grow the list capacity by 50%, but make sure to let it grow
+  // even when the capacity is zero (possible initial case).
+  int new_capacity = 1 + capacity_ + (capacity_ >> 1);
+  // Since the element reference could be an element of the list, copy
+  // it out of the old backing storage before resizing.
+  T temp = element;
+  Resize(new_capacity);
+  data_[length_++] = temp;
+}
+
+
+template<typename T, class P>
+void List<T, P>::Resize(int new_capacity) {
+  T* new_data = List<T, P>::NewData(new_capacity);
+  memcpy(new_data, data_, capacity_ * sizeof(T));
+  List<T, P>::DeleteData(data_);
+  data_ = new_data;
+  capacity_ = new_capacity;
+}
+
+
+template<typename T, class P>
+Vector<T> List<T, P>::AddBlock(T value, int count) {
+  int start = length_;
+  for (int i = 0; i < count; i++) Add(value);
+  return Vector<T>(&data_[start], count);
+}
+
+
+template<typename T, class P>
+T List<T, P>::Remove(int i) {
+  T element = at(i);
+  length_--;
+  while (i < length_) {
+    data_[i] = data_[i + 1];
+    i++;
+  }
+  return element;
+}
+
+
+template<typename T, class P>
+void List<T, P>::Clear() {
+  DeleteData(data_);
+  Initialize(0);
+}
+
+
+template<typename T, class P>
+void List<T, P>::Rewind(int pos) {
+  length_ = pos;
+}
+
+
+template<typename T, class P>
+void List<T, P>::Iterate(void (*callback)(T* x)) {
+  for (int i = 0; i < length_; i++) callback(&data_[i]);
+}
+
+
+template<typename T, class P>
+bool List<T, P>::Contains(const T& elm) {
+  for (int i = 0; i < length_; i++) {
+    if (data_[i] == elm)
+      return true;
+  }
+  return false;
+}
+
+
+template<typename T, class P>
+void List<T, P>::Sort(int (*cmp)(const T* x, const T* y)) {
+  ToVector().Sort(cmp);
+#ifdef DEBUG
+  for (int i = 1; i < length_; i++)
+    ASSERT(cmp(&data_[i - 1], &data_[i]) <= 0);
+#endif
+}
+
+
+template<typename T, class P>
+void List<T, P>::Sort() {
+  Sort(PointerValueCompare<T>);
+}
+
+
+template<typename T, class P>
+void List<T, P>::Initialize(int capacity) {
+  ASSERT(capacity >= 0);
+  data_ = (capacity > 0) ? NewData(capacity) : NULL;
+  capacity_ = capacity;
+  length_ = 0;
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_LIST_INL_H_
diff --git a/src/list.h b/src/list.h
new file mode 100644
index 0000000..25211d9
--- /dev/null
+++ b/src/list.h
@@ -0,0 +1,155 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIST_H_
+#define V8_LIST_H_
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// The list is a template for very light-weight lists. We are not
+// using the STL because we want full control over space and speed of
+// the code. This implementation is based on code by Robert Griesemer
+// and Rob Pike.
+//
+// The list is parameterized by the type of its elements (T) and by an
+// allocation policy (P). The policy is used for allocating lists in
+// the C free store or the zone; see zone.h.
+
+// Forward defined as
+// template <typename T, class P = FreeStoreAllocationPolicy> class List;
+template <typename T, class P>
+class List {
+ public:
+
+  INLINE(explicit List(int capacity)) { Initialize(capacity); }
+  INLINE(~List()) { DeleteData(data_); }
+
+  // Deallocates memory used by the list and leaves the list in a consistent
+  // empty state.
+  void Free() {
+    DeleteData(data_);
+    Initialize(0);
+  }
+
+  INLINE(void* operator new(size_t size)) { return P::New(size); }
+  INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
+
+  // Returns a reference to the element at index i.  This reference is
+  // not safe to use after operations that can change the list's
+  // backing store (eg, Add).
+  inline T& operator[](int i) const  {
+    ASSERT(0 <= i && i < length_);
+    return data_[i];
+  }
+  inline T& at(int i) const  { return operator[](i); }
+  inline T& last() const { return at(length_ - 1); }
+  inline T& first() const { return at(0); }
+
+  INLINE(bool is_empty() const) { return length_ == 0; }
+  INLINE(int length() const) { return length_; }
+  INLINE(int capacity() const) { return capacity_; }
+
+  Vector<T> ToVector() { return Vector<T>(data_, length_); }
+
+  Vector<const T> ToConstVector() { return Vector<const T>(data_, length_); }
+
+  // Adds a copy of the given 'element' to the end of the list,
+  // expanding the list if necessary.
+  void Add(const T& element);
+
+  // Add all the elements from the argument list to this list.
+  void AddAll(const List<T, P>& other);
+
+  // Added 'count' elements with the value 'value' and returns a
+  // vector that allows access to the elements.  The vector is valid
+  // until the next change is made to this list.
+  Vector<T> AddBlock(T value, int count);
+
+  // Removes the i'th element without deleting it even if T is a
+  // pointer type; moves all elements above i "down". Returns the
+  // removed element.  This function's complexity is linear in the
+  // size of the list.
+  T Remove(int i);
+
+  // Removes the last element without deleting it even if T is a
+  // pointer type. Returns the removed element.
+  INLINE(T RemoveLast()) { return Remove(length_ - 1); }
+
+  // Clears the list by setting the length to zero. Even if T is a
+  // pointer type, clearing the list doesn't delete the entries.
+  INLINE(void Clear());
+
+  // Drops all but the first 'pos' elements from the list.
+  INLINE(void Rewind(int pos));
+
+  bool Contains(const T& elm);
+
+  // Iterate through all list entries, starting at index 0.
+  void Iterate(void (*callback)(T* x));
+
+  // Sort all list entries (using QuickSort)
+  void Sort(int (*cmp)(const T* x, const T* y));
+  void Sort();
+
+  INLINE(void Initialize(int capacity));
+
+ private:
+  T* data_;
+  int capacity_;
+  int length_;
+
+  INLINE(T* NewData(int n))  { return static_cast<T*>(P::New(n * sizeof(T))); }
+  INLINE(void DeleteData(T* data))  { P::Delete(data); }
+
+  // Increase the capacity of a full list, and add an element.
+  // List must be full already.
+  void ResizeAdd(const T& element);
+
+  // Inlined implementation of ResizeAdd, shared by inlined and
+  // non-inlined versions of ResizeAdd.
+  void ResizeAddInternal(const T& element);
+
+  // Resize the list.
+  void Resize(int new_capacity);
+
+  DISALLOW_COPY_AND_ASSIGN(List);
+};
+
+class FrameElement;
+
+// Add() is inlined, ResizeAdd() called by Add() is inlined except for
+// Lists of FrameElements, and ResizeAddInternal() is inlined in ResizeAdd().
+template <>
+void List<FrameElement,
+          FreeStoreAllocationPolicy>::ResizeAdd(const FrameElement& element);
+
+} }  // namespace v8::internal
+
+#endif  // V8_LIST_H_
diff --git a/src/log-inl.h b/src/log-inl.h
new file mode 100644
index 0000000..1844d2b
--- /dev/null
+++ b/src/log-inl.h
@@ -0,0 +1,126 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LOG_INL_H_
+#define V8_LOG_INL_H_
+
+#include "log.h"
+
+namespace v8 {
+namespace internal {
+
+//
+// VMState class implementation.  A simple stack of VM states held by the
+// logger and partially threaded through the call stack.  States are pushed by
+// VMState construction and popped by destruction.
+//
+#ifdef ENABLE_LOGGING_AND_PROFILING
+inline const char* StateToString(StateTag state) {
+  switch (state) {
+    case JS:
+      return "JS";
+    case GC:
+      return "GC";
+    case COMPILER:
+      return "COMPILER";
+    case OTHER:
+      return "OTHER";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+VMState::VMState(StateTag state) : disabled_(true) {
+  if (!Logger::is_logging()) {
+    return;
+  }
+
+  disabled_ = false;
+#if !defined(ENABLE_HEAP_PROTECTION)
+  // When not protecting the heap, there is no difference between
+  // EXTERNAL and OTHER.  As an optimization in that case, we will not
+  // perform EXTERNAL->OTHER transitions through the API.  We thus
+  // compress the two states into one.
+  if (state == EXTERNAL) state = OTHER;
+#endif
+  state_ = state;
+  previous_ = Logger::current_state_;
+  Logger::current_state_ = this;
+
+  if (FLAG_log_state_changes) {
+    LOG(UncheckedStringEvent("Entering", StateToString(state_)));
+    if (previous_ != NULL) {
+      LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
+    }
+  }
+
+#ifdef ENABLE_HEAP_PROTECTION
+  if (FLAG_protect_heap && previous_ != NULL) {
+    if (state_ == EXTERNAL) {
+      // We are leaving V8.
+      ASSERT(previous_->state_ != EXTERNAL);
+      Heap::Protect();
+    } else if (previous_->state_ == EXTERNAL) {
+      // We are entering V8.
+      Heap::Unprotect();
+    }
+  }
+#endif
+}
+
+
+VMState::~VMState() {
+  if (disabled_) return;
+  Logger::current_state_ = previous_;
+
+  if (FLAG_log_state_changes) {
+    LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
+    if (previous_ != NULL) {
+      LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
+    }
+  }
+
+#ifdef ENABLE_HEAP_PROTECTION
+  if (FLAG_protect_heap && previous_ != NULL) {
+    if (state_ == EXTERNAL) {
+      // We are reentering V8.
+      ASSERT(previous_->state_ != EXTERNAL);
+      Heap::Unprotect();
+    } else if (previous_->state_ == EXTERNAL) {
+      // We are leaving V8.
+      Heap::Protect();
+    }
+  }
+#endif
+}
+#endif
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_LOG_INL_H_
diff --git a/src/log-utils.cc b/src/log-utils.cc
new file mode 100644
index 0000000..f327a0a
--- /dev/null
+++ b/src/log-utils.cc
@@ -0,0 +1,503 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "log-utils.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+LogDynamicBuffer::LogDynamicBuffer(
+    int block_size, int max_size, const char* seal, int seal_size)
+    : block_size_(block_size),
+      max_size_(max_size - (max_size % block_size_)),
+      seal_(seal),
+      seal_size_(seal_size),
+      blocks_(max_size_ / block_size_ + 1),
+      write_pos_(0), block_index_(0), block_write_pos_(0), is_sealed_(false) {
+  ASSERT(BlocksCount() > 0);
+  AllocateBlock(0);
+  for (int i = 1; i < BlocksCount(); ++i) {
+    blocks_[i] = NULL;
+  }
+}
+
+
+LogDynamicBuffer::~LogDynamicBuffer() {
+  for (int i = 0; i < BlocksCount(); ++i) {
+    DeleteArray(blocks_[i]);
+  }
+}
+
+
+int LogDynamicBuffer::Read(int from_pos, char* dest_buf, int buf_size) {
+  if (buf_size == 0) return 0;
+  int read_pos = from_pos;
+  int block_read_index = BlockIndex(from_pos);
+  int block_read_pos = PosInBlock(from_pos);
+  int dest_buf_pos = 0;
+  // Read until dest_buf is filled, or write_pos_ encountered.
+  while (read_pos < write_pos_ && dest_buf_pos < buf_size) {
+    const int read_size = Min(write_pos_ - read_pos,
+        Min(buf_size - dest_buf_pos, block_size_ - block_read_pos));
+    memcpy(dest_buf + dest_buf_pos,
+           blocks_[block_read_index] + block_read_pos, read_size);
+    block_read_pos += read_size;
+    dest_buf_pos += read_size;
+    read_pos += read_size;
+    if (block_read_pos == block_size_) {
+      block_read_pos = 0;
+      ++block_read_index;
+    }
+  }
+  return dest_buf_pos;
+}
+
+
+int LogDynamicBuffer::Seal() {
+  WriteInternal(seal_, seal_size_);
+  is_sealed_ = true;
+  return 0;
+}
+
+
+int LogDynamicBuffer::Write(const char* data, int data_size) {
+  if (is_sealed_) {
+    return 0;
+  }
+  if ((write_pos_ + data_size) <= (max_size_ - seal_size_)) {
+    return WriteInternal(data, data_size);
+  } else {
+    return Seal();
+  }
+}
+
+
+int LogDynamicBuffer::WriteInternal(const char* data, int data_size) {
+  int data_pos = 0;
+  while (data_pos < data_size) {
+    const int write_size =
+        Min(data_size - data_pos, block_size_ - block_write_pos_);
+    memcpy(blocks_[block_index_] + block_write_pos_, data + data_pos,
+           write_size);
+    block_write_pos_ += write_size;
+    data_pos += write_size;
+    if (block_write_pos_ == block_size_) {
+      block_write_pos_ = 0;
+      AllocateBlock(++block_index_);
+    }
+  }
+  write_pos_ += data_size;
+  return data_size;
+}
+
+
+bool Log::is_stopped_ = false;
+Log::WritePtr Log::Write = NULL;
+FILE* Log::output_handle_ = NULL;
+LogDynamicBuffer* Log::output_buffer_ = NULL;
+// Must be the same message as in Logger::PauseProfiler.
+const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
+Mutex* Log::mutex_ = NULL;
+char* Log::message_buffer_ = NULL;
+
+
+void Log::Init() {
+  mutex_ = OS::CreateMutex();
+  message_buffer_ = NewArray<char>(kMessageBufferSize);
+}
+
+
+void Log::OpenStdout() {
+  ASSERT(!IsEnabled());
+  output_handle_ = stdout;
+  Write = WriteToFile;
+  Init();
+}
+
+
+void Log::OpenFile(const char* name) {
+  ASSERT(!IsEnabled());
+  output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
+  Write = WriteToFile;
+  Init();
+}
+
+
+void Log::OpenMemoryBuffer() {
+  ASSERT(!IsEnabled());
+  output_buffer_ = new LogDynamicBuffer(
+      kDynamicBufferBlockSize, kMaxDynamicBufferSize,
+      kDynamicBufferSeal, strlen(kDynamicBufferSeal));
+  Write = WriteToMemory;
+  Init();
+}
+
+
+void Log::Close() {
+  if (Write == WriteToFile) {
+    if (output_handle_ != NULL) fclose(output_handle_);
+    output_handle_ = NULL;
+  } else if (Write == WriteToMemory) {
+    delete output_buffer_;
+    output_buffer_ = NULL;
+  } else {
+    ASSERT(Write == NULL);
+  }
+  Write = NULL;
+
+  DeleteArray(message_buffer_);
+  message_buffer_ = NULL;
+
+  delete mutex_;
+  mutex_ = NULL;
+
+  is_stopped_ = false;
+}
+
+
+int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
+  if (Write != WriteToMemory) return 0;
+  ASSERT(output_buffer_ != NULL);
+  ASSERT(from_pos >= 0);
+  ASSERT(max_size >= 0);
+  int actual_size = output_buffer_->Read(from_pos, dest_buf, max_size);
+  ASSERT(actual_size <= max_size);
+  if (actual_size == 0) return 0;
+
+  // Find previous log line boundary.
+  char* end_pos = dest_buf + actual_size - 1;
+  while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
+  actual_size = end_pos - dest_buf + 1;
+  ASSERT(actual_size <= max_size);
+  return actual_size;
+}
+
+
+LogMessageBuilder::WriteFailureHandler
+    LogMessageBuilder::write_failure_handler = NULL;
+
+
+LogMessageBuilder::LogMessageBuilder(): sl(Log::mutex_), pos_(0) {
+  ASSERT(Log::message_buffer_ != NULL);
+}
+
+
+void LogMessageBuilder::Append(const char* format, ...) {
+  Vector<char> buf(Log::message_buffer_ + pos_,
+                   Log::kMessageBufferSize - pos_);
+  va_list args;
+  va_start(args, format);
+  AppendVA(format, args);
+  va_end(args);
+  ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
+void LogMessageBuilder::AppendVA(const char* format, va_list args) {
+  Vector<char> buf(Log::message_buffer_ + pos_,
+                   Log::kMessageBufferSize - pos_);
+  int result = v8::internal::OS::VSNPrintF(buf, format, args);
+
+  // Result is -1 if output was truncated.
+  if (result >= 0) {
+    pos_ += result;
+  } else {
+    pos_ = Log::kMessageBufferSize;
+  }
+  ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
+void LogMessageBuilder::Append(const char c) {
+  if (pos_ < Log::kMessageBufferSize) {
+    Log::message_buffer_[pos_++] = c;
+  }
+  ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
+void LogMessageBuilder::Append(String* str) {
+  AssertNoAllocation no_heap_allocation;  // Ensure string stay valid.
+  int length = str->length();
+  for (int i = 0; i < length; i++) {
+    Append(static_cast<char>(str->Get(i)));
+  }
+}
+
+
+void LogMessageBuilder::AppendAddress(Address addr) {
+  static Address last_address_ = NULL;
+  AppendAddress(addr, last_address_);
+  last_address_ = addr;
+}
+
+
+void LogMessageBuilder::AppendAddress(Address addr, Address bias) {
+  if (!FLAG_compress_log) {
+    Append("0x%" V8PRIxPTR, addr);
+  } else if (bias == NULL) {
+    Append("%" V8PRIxPTR, addr);
+  } else {
+    uintptr_t delta;
+    char sign;
+    if (addr >= bias) {
+      delta = addr - bias;
+      sign = '+';
+    } else {
+      delta = bias - addr;
+      sign = '-';
+    }
+    Append("%c%" V8PRIxPTR, sign, delta);
+  }
+}
+
+
+void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
+  AssertNoAllocation no_heap_allocation;  // Ensure string stay valid.
+  int len = str->length();
+  if (len > 0x1000)
+    len = 0x1000;
+  if (show_impl_info) {
+    Append(str->IsAsciiRepresentation() ? 'a' : '2');
+    if (StringShape(str).IsExternal())
+      Append('e');
+    if (StringShape(str).IsSymbol())
+      Append('#');
+    Append(":%i:", str->length());
+  }
+  for (int i = 0; i < len; i++) {
+    uc32 c = str->Get(i);
+    if (c > 0xff) {
+      Append("\\u%04x", c);
+    } else if (c < 32 || c > 126) {
+      Append("\\x%02x", c);
+    } else if (c == ',') {
+      Append("\\,");
+    } else if (c == '\\') {
+      Append("\\\\");
+    } else {
+      Append("%lc", c);
+    }
+  }
+}
+
+
+void LogMessageBuilder::AppendStringPart(const char* str, int len) {
+  if (pos_ + len > Log::kMessageBufferSize) {
+    len = Log::kMessageBufferSize - pos_;
+    ASSERT(len >= 0);
+    if (len == 0) return;
+  }
+  Vector<char> buf(Log::message_buffer_ + pos_,
+                   Log::kMessageBufferSize - pos_);
+  OS::StrNCpy(buf, str, len);
+  pos_ += len;
+  ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
+bool LogMessageBuilder::StoreInCompressor(LogRecordCompressor* compressor) {
+  return compressor->Store(Vector<const char>(Log::message_buffer_, pos_));
+}
+
+
+bool LogMessageBuilder::RetrieveCompressedPrevious(
+    LogRecordCompressor* compressor, const char* prefix) {
+  pos_ = 0;
+  if (prefix[0] != '\0') Append(prefix);
+  Vector<char> prev_record(Log::message_buffer_ + pos_,
+                           Log::kMessageBufferSize - pos_);
+  const bool has_prev = compressor->RetrievePreviousCompressed(&prev_record);
+  if (!has_prev) return false;
+  pos_ += prev_record.length();
+  return true;
+}
+
+
+void LogMessageBuilder::WriteToLogFile() {
+  ASSERT(pos_ <= Log::kMessageBufferSize);
+  const int written = Log::Write(Log::message_buffer_, pos_);
+  if (written != pos_ && write_failure_handler != NULL) {
+    write_failure_handler();
+  }
+}
+
+
+void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
+  const int len = strlen(str);
+  const int written = Log::Write(str, len);
+  if (written != len && write_failure_handler != NULL) {
+    write_failure_handler();
+  }
+}
+
+
+// Formatting string for back references to the whole line. E.g. "#2" means
+// "the second line above".
+const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d";
+
+// Formatting string for back references. E.g. "#2:10" means
+// "the second line above, start from char 10 (0-based)".
+const char* LogRecordCompressor::kBackwardReferenceFormat = "#%d:%d";
+
+
+LogRecordCompressor::~LogRecordCompressor() {
+  for (int i = 0; i < buffer_.length(); ++i) {
+    buffer_[i].Dispose();
+  }
+}
+
+
+static int GetNumberLength(int number) {
+  ASSERT(number >= 0);
+  ASSERT(number < 10000);
+  if (number < 10) return 1;
+  if (number < 100) return 2;
+  if (number < 1000) return 3;
+  return 4;
+}
+
+
+int LogRecordCompressor::GetBackwardReferenceSize(int distance, int pos) {
+  // See kLineBackwardReferenceFormat and kBackwardReferenceFormat.
+  return pos == 0 ? GetNumberLength(distance) + 1
+      : GetNumberLength(distance) + GetNumberLength(pos) + 2;
+}
+
+
+void LogRecordCompressor::PrintBackwardReference(Vector<char> dest,
+                                                 int distance,
+                                                 int pos) {
+  if (pos == 0) {
+    OS::SNPrintF(dest, kLineBackwardReferenceFormat, distance);
+  } else {
+    OS::SNPrintF(dest, kBackwardReferenceFormat, distance, pos);
+  }
+}
+
+
+bool LogRecordCompressor::Store(const Vector<const char>& record) {
+  // Check if the record is the same as the last stored one.
+  if (curr_ != -1) {
+    Vector<const char>& curr = buffer_[curr_];
+    if (record.length() == curr.length()
+        && strncmp(record.start(), curr.start(), record.length()) == 0) {
+      return false;
+    }
+  }
+  // buffer_ is circular.
+  prev_ = curr_++;
+  curr_ %= buffer_.length();
+  Vector<char> record_copy = Vector<char>::New(record.length());
+  memcpy(record_copy.start(), record.start(), record.length());
+  buffer_[curr_].Dispose();
+  buffer_[curr_] =
+      Vector<const char>(record_copy.start(), record_copy.length());
+  return true;
+}
+
+
+bool LogRecordCompressor::RetrievePreviousCompressed(
+    Vector<char>* prev_record) {
+  if (prev_ == -1) return false;
+
+  int index = prev_;
+  // Distance from prev_.
+  int distance = 0;
+  // Best compression result among records in the buffer.
+  struct {
+    intptr_t truncated_len;
+    int distance;
+    int copy_from_pos;
+    int backref_size;
+  } best = {-1, 0, 0, 0};
+  Vector<const char>& prev = buffer_[prev_];
+  const char* const prev_start = prev.start();
+  const char* const prev_end = prev.start() + prev.length();
+  do {
+    // We're moving backwards until we reach the current record.
+    // Remember that buffer_ is circular.
+    if (--index == -1) index = buffer_.length() - 1;
+    ++distance;
+    if (index == curr_) break;
+
+    Vector<const char>& data = buffer_[index];
+    if (data.start() == NULL) break;
+    const char* const data_end = data.start() + data.length();
+    const char* prev_ptr = prev_end;
+    const char* data_ptr = data_end;
+    // Compare strings backwards, stop on the last matching character.
+    while (prev_ptr != prev_start && data_ptr != data.start()
+          && *(prev_ptr - 1) == *(data_ptr - 1)) {
+      --prev_ptr;
+      --data_ptr;
+    }
+    const intptr_t truncated_len = prev_end - prev_ptr;
+    const int copy_from_pos = data_ptr - data.start();
+    // Check if the length of compressed tail is enough.
+    if (truncated_len <= kMaxBackwardReferenceSize
+        && truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) {
+      continue;
+    }
+
+    // Record compression results.
+    if (truncated_len > best.truncated_len) {
+      best.truncated_len = truncated_len;
+      best.distance = distance;
+      best.copy_from_pos = copy_from_pos;
+      best.backref_size = GetBackwardReferenceSize(distance, copy_from_pos);
+    }
+  } while (true);
+
+  if (best.distance == 0) {
+    // Can't compress the previous record. Return as is.
+    ASSERT(prev_record->length() >= prev.length());
+    memcpy(prev_record->start(), prev.start(), prev.length());
+    prev_record->Truncate(prev.length());
+  } else {
+    // Copy the uncompressible part unchanged.
+    const intptr_t unchanged_len = prev.length() - best.truncated_len;
+    // + 1 for '\0'.
+    ASSERT(prev_record->length() >= unchanged_len + best.backref_size + 1);
+    memcpy(prev_record->start(), prev.start(), unchanged_len);
+    // Append the backward reference.
+    Vector<char> backref(
+        prev_record->start() + unchanged_len, best.backref_size + 1);
+    PrintBackwardReference(backref, best.distance, best.copy_from_pos);
+    ASSERT(strlen(backref.start()) - best.backref_size == 0);
+    prev_record->Truncate(unchanged_len + best.backref_size);
+  }
+  return true;
+}
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+} }  // namespace v8::internal
diff --git a/src/log-utils.h b/src/log-utils.h
new file mode 100644
index 0000000..117f098
--- /dev/null
+++ b/src/log-utils.h
@@ -0,0 +1,291 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LOG_UTILS_H_
+#define V8_LOG_UTILS_H_
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+// A memory buffer that increments its size as you write in it.  Size
+// is incremented with 'block_size' steps, never exceeding 'max_size'.
+// During growth, memory contents are never copied.  At the end of the
+// buffer an amount of memory specified in 'seal_size' is reserved.
+// When writing position reaches max_size - seal_size, buffer auto-seals
+// itself with 'seal' and allows no further writes. Data pointed by
+// 'seal' must be available during entire LogDynamicBuffer lifetime.
+//
+// An instance of this class is created dynamically by Log.
+class LogDynamicBuffer {
+ public:
+  LogDynamicBuffer(
+      int block_size, int max_size, const char* seal, int seal_size);
+
+  ~LogDynamicBuffer();
+
+  // Reads contents of the buffer starting from 'from_pos'.  Upon
+  // return, 'dest_buf' is filled with the data. Actual amount of data
+  // filled is returned, it is <= 'buf_size'.
+  int Read(int from_pos, char* dest_buf, int buf_size);
+
+  // Writes 'data' to the buffer, making it larger if necessary.  If
+  // data is too big to fit in the buffer, it doesn't get written at
+  // all. In that case, buffer auto-seals itself and stops to accept
+  // any incoming writes. Returns amount of data written (it is either
+  // 'data_size', or 0, if 'data' is too big).
+  int Write(const char* data, int data_size);
+
+ private:
+  void AllocateBlock(int index) {
+    blocks_[index] = NewArray<char>(block_size_);
+  }
+
+  int BlockIndex(int pos) const { return pos / block_size_; }
+
+  int BlocksCount() const { return BlockIndex(max_size_) + 1; }
+
+  int PosInBlock(int pos) const { return pos % block_size_; }
+
+  int Seal();
+
+  int WriteInternal(const char* data, int data_size);
+
+  const int block_size_;
+  const int max_size_;
+  const char* seal_;
+  const int seal_size_;
+  ScopedVector<char*> blocks_;
+  int write_pos_;
+  int block_index_;
+  int block_write_pos_;
+  bool is_sealed_;
+};
+
+
+// Functions and data for performing output of log messages.
+class Log : public AllStatic {
+ public:
+  // Opens stdout for logging.
+  static void OpenStdout();
+
+  // Opens file for logging.
+  static void OpenFile(const char* name);
+
+  // Opens memory buffer for logging.
+  static void OpenMemoryBuffer();
+
+  // Disables logging, but preserves acquired resources.
+  static void stop() { is_stopped_ = true; }
+
+  // Frees all resources acquired in Open... functions.
+  static void Close();
+
+  // See description in include/v8.h.
+  static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+
+  // Returns whether logging is enabled.
+  static bool IsEnabled() {
+    return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL);
+  }
+
+  // Size of buffer used for formatting log messages.
+  static const int kMessageBufferSize = 2048;
+
+ private:
+  typedef int (*WritePtr)(const char* msg, int length);
+
+  // Initialization function called from Open... functions.
+  static void Init();
+
+  // Write functions assume that mutex_ is acquired by the caller.
+  static WritePtr Write;
+
+  // Implementation of writing to a log file.
+  static int WriteToFile(const char* msg, int length) {
+    ASSERT(output_handle_ != NULL);
+    int rv = fwrite(msg, 1, length, output_handle_);
+    ASSERT(length == rv);
+    return rv;
+  }
+
+  // Implementation of writing to a memory buffer.
+  static int WriteToMemory(const char* msg, int length) {
+    ASSERT(output_buffer_ != NULL);
+    return output_buffer_->Write(msg, length);
+  }
+
+  // Whether logging is stopped (e.g. due to insufficient resources).
+  static bool is_stopped_;
+
+  // When logging is active, either output_handle_ or output_buffer_ is used
+  // to store a pointer to log destination. If logging was opened via OpenStdout
+  // or OpenFile, then output_handle_ is used. If logging was opened
+  // via OpenMemoryBuffer, then output_buffer_ is used.
+  // mutex_ should be acquired before using output_handle_ or output_buffer_.
+  static FILE* output_handle_;
+
+  static LogDynamicBuffer* output_buffer_;
+
+  // Size of dynamic buffer block (and dynamic buffer initial size).
+  static const int kDynamicBufferBlockSize = 65536;
+
+  // Maximum size of dynamic buffer.
+  static const int kMaxDynamicBufferSize = 50 * 1024 * 1024;
+
+  // Message to "seal" dynamic buffer with.
+  static const char* kDynamicBufferSeal;
+
+  // mutex_ is a Mutex used for enforcing exclusive
+  // access to the formatting buffer and the log file or log memory buffer.
+  static Mutex* mutex_;
+
+  // Buffer used for formatting log messages. This is a singleton buffer and
+  // mutex_ should be acquired before using it.
+  static char* message_buffer_;
+
+  friend class LogMessageBuilder;
+  friend class LogRecordCompressor;
+};
+
+
+// An utility class for performing backward reference compression
+// of string ends. It operates using a window of previous strings.
+class LogRecordCompressor {
+ public:
+  // 'window_size' is the size of backward lookup window.
+  explicit LogRecordCompressor(int window_size)
+      : buffer_(window_size + kNoCompressionWindowSize),
+        kMaxBackwardReferenceSize(
+            GetBackwardReferenceSize(window_size, Log::kMessageBufferSize)),
+        curr_(-1), prev_(-1) {
+  }
+
+  ~LogRecordCompressor();
+
+  // Fills vector with a compressed version of the previous record.
+  // Returns false if there is no previous record.
+  bool RetrievePreviousCompressed(Vector<char>* prev_record);
+
+  // Stores a record if it differs from a previous one (or there's no previous).
+  // Returns true, if the record has been stored.
+  bool Store(const Vector<const char>& record);
+
+ private:
+  // The minimum size of a buffer: a place needed for the current and
+  // the previous record. Since there is no place for precedessors of a previous
+  // record, it can't be compressed at all.
+  static const int kNoCompressionWindowSize = 2;
+
+  // Formatting strings for back references.
+  static const char* kLineBackwardReferenceFormat;
+  static const char* kBackwardReferenceFormat;
+
+  static int GetBackwardReferenceSize(int distance, int pos);
+
+  static void PrintBackwardReference(Vector<char> dest, int distance, int pos);
+
+  ScopedVector< Vector<const char> > buffer_;
+  const int kMaxBackwardReferenceSize;
+  int curr_;
+  int prev_;
+};
+
+
+// Utility class for formatting log messages. It fills the message into the
+// static buffer in Log.
+class LogMessageBuilder BASE_EMBEDDED {
+ public:
+  // Create a message builder starting from position 0. This acquires the mutex
+  // in the log as well.
+  explicit LogMessageBuilder();
+  ~LogMessageBuilder() { }
+
+  // Append string data to the log message.
+  void Append(const char* format, ...);
+
+  // Append string data to the log message.
+  void AppendVA(const char* format, va_list args);
+
+  // Append a character to the log message.
+  void Append(const char c);
+
+  // Append a heap string.
+  void Append(String* str);
+
+  // Appends an address, compressing it if needed by offsetting
+  // from Logger::last_address_.
+  void AppendAddress(Address addr);
+
+  // Appends an address, compressing it if needed.
+  void AppendAddress(Address addr, Address bias);
+
+  void AppendDetailed(String* str, bool show_impl_info);
+
+  // Append a portion of a string.
+  void AppendStringPart(const char* str, int len);
+
+  // Stores log message into compressor, returns true if the message
+  // was stored (i.e. doesn't repeat the previous one).
+  bool StoreInCompressor(LogRecordCompressor* compressor);
+
+  // Sets log message to a previous version of compressed message.
+  // Returns false, if there is no previous message.
+  bool RetrieveCompressedPrevious(LogRecordCompressor* compressor) {
+    return RetrieveCompressedPrevious(compressor, "");
+  }
+
+  // Does the same at the version without arguments, and sets a prefix.
+  bool RetrieveCompressedPrevious(LogRecordCompressor* compressor,
+                                  const char* prefix);
+
+  // Write the log message to the log file currently opened.
+  void WriteToLogFile();
+
+  // Write a null-terminated string to to the log file currently opened.
+  void WriteCStringToLogFile(const char* str);
+
+  // A handler that is called when Log::Write fails.
+  typedef void (*WriteFailureHandler)();
+
+  static void set_write_failure_handler(WriteFailureHandler handler) {
+    write_failure_handler = handler;
+  }
+
+ private:
+  static WriteFailureHandler write_failure_handler;
+
+  ScopedLock sl;
+  int pos_;
+};
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+} }  // namespace v8::internal
+
+#endif  // V8_LOG_UTILS_H_
diff --git a/src/log.cc b/src/log.cc
new file mode 100644
index 0000000..d225c3b
--- /dev/null
+++ b/src/log.cc
@@ -0,0 +1,1294 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "log.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+#include "string-stream.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+//
+// Sliding state window.  Updates counters to keep track of the last
+// window of kBufferSize states.  This is useful to track where we
+// spent our time.
+//
+class SlidingStateWindow {
+ public:
+  SlidingStateWindow();
+  ~SlidingStateWindow();
+  void AddState(StateTag state);
+
+ private:
+  static const int kBufferSize = 256;
+  int current_index_;
+  bool is_full_;
+  byte buffer_[kBufferSize];
+
+
+  void IncrementStateCounter(StateTag state) {
+    Counters::state_counters[state].Increment();
+  }
+
+
+  void DecrementStateCounter(StateTag state) {
+    Counters::state_counters[state].Decrement();
+  }
+};
+
+
+//
+// The Profiler samples pc and sp values for the main thread.
+// Each sample is appended to a circular buffer.
+// An independent thread removes data and writes it to the log.
+// This design minimizes the time spent in the sampler.
+//
+class Profiler: public Thread {
+ public:
+  Profiler();
+  void Engage();
+  void Disengage();
+
+  // Inserts collected profiling data into buffer.
+  void Insert(TickSample* sample) {
+    if (paused_)
+      return;
+
+    if (Succ(head_) == tail_) {
+      overflow_ = true;
+    } else {
+      buffer_[head_] = *sample;
+      head_ = Succ(head_);
+      buffer_semaphore_->Signal();  // Tell we have an element.
+    }
+  }
+
+  // Waits for a signal and removes profiling data.
+  bool Remove(TickSample* sample) {
+    buffer_semaphore_->Wait();  // Wait for an element.
+    *sample = buffer_[tail_];
+    bool result = overflow_;
+    tail_ = Succ(tail_);
+    overflow_ = false;
+    return result;
+  }
+
+  void Run();
+
+  // Pause and Resume TickSample data collection.
+  static bool paused() { return paused_; }
+  static void pause() { paused_ = true; }
+  static void resume() { paused_ = false; }
+
+ private:
+  // Returns the next index in the cyclic buffer.
+  int Succ(int index) { return (index + 1) % kBufferSize; }
+
+  // Cyclic buffer for communicating profiling samples
+  // between the signal handler and the worker thread.
+  static const int kBufferSize = 128;
+  TickSample buffer_[kBufferSize];  // Buffer storage.
+  int head_;  // Index to the buffer head.
+  int tail_;  // Index to the buffer tail.
+  bool overflow_;  // Tell whether a buffer overflow has occurred.
+  Semaphore* buffer_semaphore_;  // Sempahore used for buffer synchronization.
+
+  // Tells whether worker thread should continue running.
+  bool running_;
+
+  // Tells whether we are currently recording tick samples.
+  static bool paused_;
+};
+
+bool Profiler::paused_ = false;
+
+
+//
+// StackTracer implementation
+//
+void StackTracer::Trace(TickSample* sample) {
+  if (sample->state == GC) {
+    sample->frames_count = 0;
+    return;
+  }
+
+  const Address js_entry_sp = Top::js_entry_sp(Top::GetCurrentThread());
+  if (js_entry_sp == 0) {
+    // Not executing JS now.
+    sample->frames_count = 0;
+    return;
+  }
+
+  SafeStackTraceFrameIterator it(
+      reinterpret_cast<Address>(sample->fp),
+      reinterpret_cast<Address>(sample->sp),
+      reinterpret_cast<Address>(sample->sp),
+      js_entry_sp);
+  int i = 0;
+  while (!it.done() && i < TickSample::kMaxFramesCount) {
+    sample->stack[i++] = it.frame()->pc();
+    it.Advance();
+  }
+  sample->frames_count = i;
+}
+
+
+//
+// Ticker used to provide ticks to the profiler and the sliding state
+// window.
+//
+class Ticker: public Sampler {
+ public:
+  explicit Ticker(int interval):
+      Sampler(interval, FLAG_prof), window_(NULL), profiler_(NULL) {}
+
+  ~Ticker() { if (IsActive()) Stop(); }
+
+  void SampleStack(TickSample* sample) {
+    StackTracer::Trace(sample);
+  }
+
+  void Tick(TickSample* sample) {
+    if (profiler_) profiler_->Insert(sample);
+    if (window_) window_->AddState(sample->state);
+  }
+
+  void SetWindow(SlidingStateWindow* window) {
+    window_ = window;
+    if (!IsActive()) Start();
+  }
+
+  void ClearWindow() {
+    window_ = NULL;
+    if (!profiler_ && IsActive()) Stop();
+  }
+
+  void SetProfiler(Profiler* profiler) {
+    profiler_ = profiler;
+    if (!FLAG_prof_lazy && !IsActive()) Start();
+  }
+
+  void ClearProfiler() {
+    profiler_ = NULL;
+    if (!window_ && IsActive()) Stop();
+  }
+
+ private:
+  SlidingStateWindow* window_;
+  Profiler* profiler_;
+};
+
+
+//
+// SlidingStateWindow implementation.
+//
+SlidingStateWindow::SlidingStateWindow(): current_index_(0), is_full_(false) {
+  for (int i = 0; i < kBufferSize; i++) {
+    buffer_[i] = static_cast<byte>(OTHER);
+  }
+  Logger::ticker_->SetWindow(this);
+}
+
+
+SlidingStateWindow::~SlidingStateWindow() {
+  Logger::ticker_->ClearWindow();
+}
+
+
+void SlidingStateWindow::AddState(StateTag state) {
+  if (is_full_) {
+    DecrementStateCounter(static_cast<StateTag>(buffer_[current_index_]));
+  } else if (current_index_ == kBufferSize - 1) {
+    is_full_ = true;
+  }
+  buffer_[current_index_] = static_cast<byte>(state);
+  IncrementStateCounter(state);
+  ASSERT(IsPowerOf2(kBufferSize));
+  current_index_ = (current_index_ + 1) & (kBufferSize - 1);
+}
+
+
+//
+// Profiler implementation.
+//
+Profiler::Profiler() {
+  buffer_semaphore_ = OS::CreateSemaphore(0);
+  head_ = 0;
+  tail_ = 0;
+  overflow_ = false;
+  running_ = false;
+}
+
+
+void Profiler::Engage() {
+  OS::LogSharedLibraryAddresses();
+
+  // Start thread processing the profiler buffer.
+  running_ = true;
+  Start();
+
+  // Register to get ticks.
+  Logger::ticker_->SetProfiler(this);
+
+  Logger::ProfilerBeginEvent();
+  Logger::LogAliases();
+}
+
+
+void Profiler::Disengage() {
+  // Stop receiving ticks.
+  Logger::ticker_->ClearProfiler();
+
+  // Terminate the worker thread by setting running_ to false,
+  // inserting a fake element in the queue and then wait for
+  // the thread to terminate.
+  running_ = false;
+  TickSample sample;
+  // Reset 'paused_' flag, otherwise semaphore may not be signalled.
+  resume();
+  Insert(&sample);
+  Join();
+
+  LOG(UncheckedStringEvent("profiler", "end"));
+}
+
+
+void Profiler::Run() {
+  TickSample sample;
+  bool overflow = Logger::profiler_->Remove(&sample);
+  while (running_) {
+    LOG(TickEvent(&sample, overflow));
+    overflow = Logger::profiler_->Remove(&sample);
+  }
+}
+
+
+//
+// Logger class implementation.
+//
+Ticker* Logger::ticker_ = NULL;
+Profiler* Logger::profiler_ = NULL;
+VMState* Logger::current_state_ = NULL;
+VMState Logger::bottom_state_(EXTERNAL);
+SlidingStateWindow* Logger::sliding_state_window_ = NULL;
+const char** Logger::log_events_ = NULL;
+CompressionHelper* Logger::compression_helper_ = NULL;
+bool Logger::is_logging_ = false;
+
+#define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
+const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+  LOG_EVENTS_AND_TAGS_LIST(DECLARE_LONG_EVENT)
+};
+#undef DECLARE_LONG_EVENT
+
+#define DECLARE_SHORT_EVENT(ignore1, ignore2, short_name) short_name,
+const char* kCompressedLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+  LOG_EVENTS_AND_TAGS_LIST(DECLARE_SHORT_EVENT)
+};
+#undef DECLARE_SHORT_EVENT
+
+
+void Logger::ProfilerBeginEvent() {
+  if (!Log::IsEnabled()) return;
+  LogMessageBuilder msg;
+  msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
+  if (FLAG_compress_log) {
+    msg.Append("profiler,\"compression\",%d\n", kCompressionWindowSize);
+  }
+  msg.WriteToLogFile();
+}
+
+
+void Logger::LogAliases() {
+  if (!Log::IsEnabled() || !FLAG_compress_log) return;
+  LogMessageBuilder msg;
+  for (int i = 0; i < NUMBER_OF_LOG_EVENTS; ++i) {
+    msg.Append("alias,%s,%s\n",
+               kCompressedLogEventsNames[i], kLongLogEventsNames[i]);
+  }
+  msg.WriteToLogFile();
+}
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+
+void Logger::Preamble(const char* content) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
+  LogMessageBuilder msg;
+  msg.WriteCStringToLogFile(content);
+#endif
+}
+
+
+void Logger::StringEvent(const char* name, const char* value) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (FLAG_log) UncheckedStringEvent(name, value);
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::UncheckedStringEvent(const char* name, const char* value) {
+  if (!Log::IsEnabled()) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,\"%s\"\n", name, value);
+  msg.WriteToLogFile();
+}
+#endif
+
+
+void Logger::IntEvent(const char* name, int value) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,%d\n", name, value);
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::HandleEvent(const char* name, Object** location) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_handles) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
+  msg.WriteToLogFile();
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+// ApiEvent is private so all the calls come from the Logger class.  It is the
+// caller's responsibility to ensure that log is enabled and that
+// FLAG_log_api is true.
+void Logger::ApiEvent(const char* format, ...) {
+  ASSERT(Log::IsEnabled() && FLAG_log_api);
+  LogMessageBuilder msg;
+  va_list ap;
+  va_start(ap, format);
+  msg.AppendVA(format, ap);
+  va_end(ap);
+  msg.WriteToLogFile();
+}
+#endif
+
+
+void Logger::ApiNamedSecurityCheck(Object* key) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
+  if (key->IsString()) {
+    SmartPointer<char> str =
+        String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+    ApiEvent("api,check-security,\"%s\"\n", *str);
+  } else if (key->IsUndefined()) {
+    ApiEvent("api,check-security,undefined\n");
+  } else {
+    ApiEvent("api,check-security,['no-name']\n");
+  }
+#endif
+}
+
+
+void Logger::SharedLibraryEvent(const char* library_path,
+                                uintptr_t start,
+                                uintptr_t end) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_prof) return;
+  LogMessageBuilder msg;
+  msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
+             library_path,
+             start,
+             end);
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::SharedLibraryEvent(const wchar_t* library_path,
+                                uintptr_t start,
+                                uintptr_t end) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_prof) return;
+  LogMessageBuilder msg;
+  msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
+             library_path,
+             start,
+             end);
+  msg.WriteToLogFile();
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
+  // Prints "/" + re.source + "/" +
+  //      (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
+  LogMessageBuilder msg;
+
+  Handle<Object> source = GetProperty(regexp, "source");
+  if (!source->IsString()) {
+    msg.Append("no source");
+    return;
+  }
+
+  switch (regexp->TypeTag()) {
+    case JSRegExp::ATOM:
+      msg.Append('a');
+      break;
+    default:
+      break;
+  }
+  msg.Append('/');
+  msg.AppendDetailed(*Handle<String>::cast(source), false);
+  msg.Append('/');
+
+  // global flag
+  Handle<Object> global = GetProperty(regexp, "global");
+  if (global->IsTrue()) {
+    msg.Append('g');
+  }
+  // ignorecase flag
+  Handle<Object> ignorecase = GetProperty(regexp, "ignoreCase");
+  if (ignorecase->IsTrue()) {
+    msg.Append('i');
+  }
+  // multiline flag
+  Handle<Object> multiline = GetProperty(regexp, "multiline");
+  if (multiline->IsTrue()) {
+    msg.Append('m');
+  }
+
+  msg.WriteToLogFile();
+}
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+
+void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_regexp) return;
+  LogMessageBuilder msg;
+  msg.Append("regexp-compile,");
+  LogRegExpSource(regexp);
+  msg.Append(in_cache ? ",hit\n" : ",miss\n");
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_runtime) return;
+  HandleScope scope;
+  LogMessageBuilder msg;
+  for (int i = 0; i < format.length(); i++) {
+    char c = format[i];
+    if (c == '%' && i <= format.length() - 2) {
+      i++;
+      ASSERT('0' <= format[i] && format[i] <= '9');
+      Object* obj = args->GetElement(format[i] - '0');
+      i++;
+      switch (format[i]) {
+        case 's':
+          msg.AppendDetailed(String::cast(obj), false);
+          break;
+        case 'S':
+          msg.AppendDetailed(String::cast(obj), true);
+          break;
+        case 'r':
+          Logger::LogRegExpSource(Handle<JSRegExp>(JSRegExp::cast(obj)));
+          break;
+        case 'x':
+          msg.Append("0x%x", Smi::cast(obj)->value());
+          break;
+        case 'i':
+          msg.Append("%i", Smi::cast(obj)->value());
+          break;
+        default:
+          UNREACHABLE();
+      }
+    } else {
+      msg.Append(c);
+    }
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::ApiIndexedSecurityCheck(uint32_t index) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
+  ApiEvent("api,check-security,%u\n", index);
+#endif
+}
+
+
+void Logger::ApiNamedPropertyAccess(const char* tag,
+                                    JSObject* holder,
+                                    Object* name) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  ASSERT(name->IsString());
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
+  String* class_name_obj = holder->class_name();
+  SmartPointer<char> class_name =
+      class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+  SmartPointer<char> property_name =
+      String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+  Logger::ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
+#endif
+}
+
+void Logger::ApiIndexedPropertyAccess(const char* tag,
+                                      JSObject* holder,
+                                      uint32_t index) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
+  String* class_name_obj = holder->class_name();
+  SmartPointer<char> class_name =
+      class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+  Logger::ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
+#endif
+}
+
+void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
+  String* class_name_obj = object->class_name();
+  SmartPointer<char> class_name =
+      class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+  Logger::ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
+#endif
+}
+
+
+void Logger::ApiEntryCall(const char* name) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
+  Logger::ApiEvent("api,%s\n", name);
+#endif
+}
+
+
+void Logger::NewEvent(const char* name, void* object, size_t size) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log) return;
+  LogMessageBuilder msg;
+  msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
+             static_cast<unsigned int>(size));
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::DeleteEvent(const char* name, void* object) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log) return;
+  LogMessageBuilder msg;
+  msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
+  msg.WriteToLogFile();
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+// A class that contains all common code dealing with record compression.
+class CompressionHelper {
+ public:
+  explicit CompressionHelper(int window_size)
+      : compressor_(window_size), repeat_count_(0) { }
+
+  // Handles storing message in compressor, retrieving the previous one and
+  // prefixing it with repeat count, if needed.
+  // Returns true if message needs to be written to log.
+  bool HandleMessage(LogMessageBuilder* msg) {
+    if (!msg->StoreInCompressor(&compressor_)) {
+      // Current message repeats the previous one, don't write it.
+      ++repeat_count_;
+      return false;
+    }
+    if (repeat_count_ == 0) {
+      return msg->RetrieveCompressedPrevious(&compressor_);
+    }
+    OS::SNPrintF(prefix_, "%s,%d,",
+                 Logger::log_events_[Logger::REPEAT_META_EVENT],
+                 repeat_count_ + 1);
+    repeat_count_ = 0;
+    return msg->RetrieveCompressedPrevious(&compressor_, prefix_.start());
+  }
+
+ private:
+  LogRecordCompressor compressor_;
+  int repeat_count_;
+  EmbeddedVector<char, 20> prefix_;
+};
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+                             Code* code,
+                             const char* comment) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"", code->ExecutableSize());
+  for (const char* p = comment; *p != '\0'; p++) {
+    if (*p == '"') {
+      msg.Append('\\');
+    }
+    msg.Append(*p);
+  }
+  msg.Append('"');
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
+  LogMessageBuilder msg;
+  SmartPointer<char> str =
+      name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"%s\"", code->ExecutableSize(), *str);
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+                             Code* code, String* name,
+                             String* source, int line) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
+  LogMessageBuilder msg;
+  SmartPointer<char> str =
+      name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+  SmartPointer<char> sourcestr =
+      source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"%s %s:%d\"",
+             code->ExecutableSize(), *str, *sourcestr, line);
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,%s,",
+             log_events_[CODE_CREATION_EVENT], log_events_[REG_EXP_TAG]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"", code->ExecutableSize());
+  msg.AppendDetailed(source, false);
+  msg.Append('\"');
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::CodeMoveEvent(Address from, Address to) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static Address prev_to_ = NULL;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,", log_events_[CODE_MOVE_EVENT]);
+  msg.AppendAddress(from);
+  msg.Append(',');
+  msg.AppendAddress(to, prev_to_);
+  prev_to_ = to;
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::CodeDeleteEvent(Address from) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,", log_events_[CODE_DELETE_EVENT]);
+  msg.AppendAddress(from);
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::ResourceEvent(const char* name, const char* tag) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,%s,", name, tag);
+
+  uint32_t sec, usec;
+  if (OS::GetUserTime(&sec, &usec) != -1) {
+    msg.Append("%d,%d,", sec, usec);
+  }
+  msg.Append("%.0f", OS::TimeCurrentMillis());
+
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::SuspectReadEvent(String* name, Object* obj) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_suspect) return;
+  LogMessageBuilder msg;
+  String* class_name = obj->IsJSObject()
+                       ? JSObject::cast(obj)->class_name()
+                       : Heap::empty_string();
+  msg.Append("suspect-read,");
+  msg.Append(class_name);
+  msg.Append(',');
+  msg.Append('"');
+  msg.Append(name);
+  msg.Append('"');
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
+  LogMessageBuilder msg;
+  // Using non-relative system time in order to be able to synchronize with
+  // external memory profiling events (e.g. DOM memory size).
+  msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
+             space, kind, OS::TimeCurrentMillis());
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::HeapSampleStats(const char* space, const char* kind,
+                             int capacity, int used) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
+  LogMessageBuilder msg;
+  msg.Append("heap-sample-stats,\"%s\",\"%s\",%d,%d\n",
+             space, kind, capacity, used);
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
+  LogMessageBuilder msg;
+  msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
+  LogMessageBuilder msg;
+  msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::HeapSampleJSConstructorEvent(const char* constructor,
+                                          int number, int bytes) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
+  LogMessageBuilder msg;
+  msg.Append("heap-js-cons-item,%s,%d,%d\n", constructor, number, bytes);
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::HeapSampleJSRetainersEvent(
+    const char* constructor, const char* event) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
+  // Event starts with comma, so we don't have it in the format string.
+  static const char* event_text = "heap-js-ret-item,%s";
+  // We take placeholder strings into account, but it's OK to be conservative.
+  static const int event_text_len = strlen(event_text);
+  const int cons_len = strlen(constructor), event_len = strlen(event);
+  int pos = 0;
+  // Retainer lists can be long. We may need to split them into multiple events.
+  do {
+    LogMessageBuilder msg;
+    msg.Append(event_text, constructor);
+    int to_write = event_len - pos;
+    if (to_write > Log::kMessageBufferSize - (cons_len + event_text_len)) {
+      int cut_pos = pos + Log::kMessageBufferSize - (cons_len + event_text_len);
+      ASSERT(cut_pos < event_len);
+      while (cut_pos > pos && event[cut_pos] != ',') --cut_pos;
+      if (event[cut_pos] != ',') {
+        // Crash in debug mode, skip in release mode.
+        ASSERT(false);
+        return;
+      }
+      // Append a piece of event that fits, without trailing comma.
+      msg.AppendStringPart(event + pos, cut_pos - pos);
+      // Start next piece with comma.
+      pos = cut_pos;
+    } else {
+      msg.Append("%s", event + pos);
+      pos += event_len;
+    }
+    msg.Append('\n');
+    msg.WriteToLogFile();
+  } while (pos < event_len);
+#endif
+}
+
+
+void Logger::DebugTag(const char* call_site_tag) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log) return;
+  LogMessageBuilder msg;
+  msg.Append("debug-tag,%s\n", call_site_tag);
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log) return;
+  StringBuilder s(parameter.length() + 1);
+  for (int i = 0; i < parameter.length(); ++i) {
+    s.AddCharacter(static_cast<char>(parameter[i]));
+  }
+  char* parameter_string = s.Finalize();
+  LogMessageBuilder msg;
+  msg.Append("debug-queue-event,%s,%15.3f,%s\n",
+             event_type,
+             OS::TimeCurrentMillis(),
+             parameter_string);
+  DeleteArray(parameter_string);
+  msg.WriteToLogFile();
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::TickEvent(TickSample* sample, bool overflow) {
+  if (!Log::IsEnabled() || !FLAG_prof) return;
+  static Address prev_sp = NULL;
+  LogMessageBuilder msg;
+  msg.Append("%s,", log_events_[TICK_EVENT]);
+  Address prev_addr = reinterpret_cast<Address>(sample->pc);
+  msg.AppendAddress(prev_addr);
+  msg.Append(',');
+  msg.AppendAddress(reinterpret_cast<Address>(sample->sp), prev_sp);
+  prev_sp = reinterpret_cast<Address>(sample->sp);
+  msg.Append(",%d", static_cast<int>(sample->state));
+  if (overflow) {
+    msg.Append(",overflow");
+  }
+  for (int i = 0; i < sample->frames_count; ++i) {
+    msg.Append(',');
+    msg.AppendAddress(sample->stack[i], prev_addr);
+    prev_addr = sample->stack[i];
+  }
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+}
+
+
+int Logger::GetActiveProfilerModules() {
+  int result = PROFILER_MODULE_NONE;
+  if (!profiler_->paused()) {
+    result |= PROFILER_MODULE_CPU;
+  }
+  if (FLAG_log_gc) {
+    result |= PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS;
+  }
+  return result;
+}
+
+
+void Logger::PauseProfiler(int flags) {
+  if (!Log::IsEnabled()) return;
+  const int active_modules = GetActiveProfilerModules();
+  const int modules_to_disable = active_modules & flags;
+  if (modules_to_disable == PROFILER_MODULE_NONE) return;
+
+  if (modules_to_disable & PROFILER_MODULE_CPU) {
+    profiler_->pause();
+    if (FLAG_prof_lazy) {
+      if (!FLAG_sliding_state_window) ticker_->Stop();
+      FLAG_log_code = false;
+      // Must be the same message as Log::kDynamicBufferSeal.
+      LOG(UncheckedStringEvent("profiler", "pause"));
+    }
+  }
+  if (modules_to_disable &
+      (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
+    FLAG_log_gc = false;
+  }
+  // Turn off logging if no active modules remain.
+  if ((active_modules & ~flags) == PROFILER_MODULE_NONE) {
+    is_logging_ = false;
+  }
+}
+
+
+void Logger::ResumeProfiler(int flags) {
+  if (!Log::IsEnabled()) return;
+  const int modules_to_enable = ~GetActiveProfilerModules() & flags;
+  if (modules_to_enable != PROFILER_MODULE_NONE) {
+    is_logging_ = true;
+  }
+  if (modules_to_enable & PROFILER_MODULE_CPU) {
+    if (FLAG_prof_lazy) {
+      LOG(UncheckedStringEvent("profiler", "resume"));
+      FLAG_log_code = true;
+      LogCompiledFunctions();
+      if (!FLAG_sliding_state_window) ticker_->Start();
+    }
+    profiler_->resume();
+  }
+  if (modules_to_enable &
+      (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
+    FLAG_log_gc = true;
+  }
+}
+
+
+// This function can be called when Log's mutex is acquired,
+// either from main or Profiler's thread.
+void Logger::StopLoggingAndProfiling() {
+  Log::stop();
+  PauseProfiler(PROFILER_MODULE_CPU);
+}
+
+
+bool Logger::IsProfilerSamplerActive() {
+  return ticker_->IsActive();
+}
+
+
+int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
+  return Log::GetLogLines(from_pos, dest_buf, max_size);
+}
+
+
+void Logger::LogCompiledFunctions() {
+  HandleScope scope;
+  Handle<SharedFunctionInfo>* sfis = NULL;
+  int compiled_funcs_count = 0;
+
+  {
+    AssertNoAllocation no_alloc;
+
+    HeapIterator iterator;
+    while (iterator.has_next()) {
+      HeapObject* obj = iterator.next();
+      ASSERT(obj != NULL);
+      if (obj->IsSharedFunctionInfo()
+          && SharedFunctionInfo::cast(obj)->is_compiled()) {
+        ++compiled_funcs_count;
+      }
+    }
+
+    sfis = NewArray< Handle<SharedFunctionInfo> >(compiled_funcs_count);
+    iterator.reset();
+
+    int i = 0;
+    while (iterator.has_next()) {
+      HeapObject* obj = iterator.next();
+      ASSERT(obj != NULL);
+      if (obj->IsSharedFunctionInfo()
+          && SharedFunctionInfo::cast(obj)->is_compiled()) {
+        sfis[i++] = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj));
+      }
+    }
+  }
+
+  // During iteration, there can be heap allocation due to
+  // GetScriptLineNumber call.
+  for (int i = 0; i < compiled_funcs_count; ++i) {
+    Handle<SharedFunctionInfo> shared = sfis[i];
+    Handle<String> name(String::cast(shared->name()));
+    Handle<String> func_name(name->length() > 0 ?
+                             *name : shared->inferred_name());
+    if (shared->script()->IsScript()) {
+      Handle<Script> script(Script::cast(shared->script()));
+      if (script->name()->IsString()) {
+        Handle<String> script_name(String::cast(script->name()));
+        int line_num = GetScriptLineNumber(script, shared->start_position());
+        if (line_num > 0) {
+          LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+                              shared->code(), *func_name,
+                              *script_name, line_num + 1));
+        } else {
+          // Can't distinguish enum and script here, so always use Script.
+          LOG(CodeCreateEvent(Logger::SCRIPT_TAG,
+                              shared->code(), *script_name));
+        }
+        continue;
+      }
+    }
+    // If no script or script has no name.
+    LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
+  }
+
+  DeleteArray(sfis);
+}
+
+#endif
+
+
+bool Logger::Setup() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // --log-all enables all the log flags.
+  if (FLAG_log_all) {
+    FLAG_log_runtime = true;
+    FLAG_log_api = true;
+    FLAG_log_code = true;
+    FLAG_log_gc = true;
+    FLAG_log_suspect = true;
+    FLAG_log_handles = true;
+    FLAG_log_regexp = true;
+  }
+
+  // --prof implies --log-code.
+  if (FLAG_prof) FLAG_log_code = true;
+
+  // --prof_lazy controls --log-code, implies --noprof_auto.
+  if (FLAG_prof_lazy) {
+    FLAG_log_code = false;
+    FLAG_prof_auto = false;
+  }
+
+  bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
+      || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
+      || FLAG_log_regexp || FLAG_log_state_changes;
+
+  bool open_log_file = start_logging || FLAG_prof_lazy;
+
+  // If we're logging anything, we need to open the log file.
+  if (open_log_file) {
+    if (strcmp(FLAG_logfile, "-") == 0) {
+      Log::OpenStdout();
+    } else if (strcmp(FLAG_logfile, "*") == 0) {
+      Log::OpenMemoryBuffer();
+    } else if (strchr(FLAG_logfile, '%') != NULL) {
+      // If there's a '%' in the log file name we have to expand
+      // placeholders.
+      HeapStringAllocator allocator;
+      StringStream stream(&allocator);
+      for (const char* p = FLAG_logfile; *p; p++) {
+        if (*p == '%') {
+          p++;
+          switch (*p) {
+            case '\0':
+              // If there's a % at the end of the string we back up
+              // one character so we can escape the loop properly.
+              p--;
+              break;
+            case 't': {
+              // %t expands to the current time in milliseconds.
+              double time = OS::TimeCurrentMillis();
+              stream.Add("%.0f", FmtElm(time));
+              break;
+            }
+            case '%':
+              // %% expands (contracts really) to %.
+              stream.Put('%');
+              break;
+            default:
+              // All other %'s expand to themselves.
+              stream.Put('%');
+              stream.Put(*p);
+              break;
+          }
+        } else {
+          stream.Put(*p);
+        }
+      }
+      SmartPointer<const char> expanded = stream.ToCString();
+      Log::OpenFile(*expanded);
+    } else {
+      Log::OpenFile(FLAG_logfile);
+    }
+  }
+
+  current_state_ = &bottom_state_;
+
+  ticker_ = new Ticker(kSamplingIntervalMs);
+
+  if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
+    sliding_state_window_ = new SlidingStateWindow();
+  }
+
+  log_events_ = FLAG_compress_log ?
+      kCompressedLogEventsNames : kLongLogEventsNames;
+  if (FLAG_compress_log) {
+    compression_helper_ = new CompressionHelper(kCompressionWindowSize);
+  }
+
+  is_logging_ = start_logging;
+
+  if (FLAG_prof) {
+    profiler_ = new Profiler();
+    if (!FLAG_prof_auto) {
+      profiler_->pause();
+    } else {
+      is_logging_ = true;
+    }
+    profiler_->Engage();
+  }
+
+  LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);
+
+  return true;
+
+#else
+  return false;
+#endif
+}
+
+
+void Logger::TearDown() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  LogMessageBuilder::set_write_failure_handler(NULL);
+
+  // Stop the profiler before closing the file.
+  if (profiler_ != NULL) {
+    profiler_->Disengage();
+    delete profiler_;
+    profiler_ = NULL;
+  }
+
+  delete compression_helper_;
+  compression_helper_ = NULL;
+
+  delete sliding_state_window_;
+  sliding_state_window_ = NULL;
+
+  delete ticker_;
+  ticker_ = NULL;
+
+  Log::Close();
+#endif
+}
+
+
+void Logger::EnableSlidingStateWindow() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If the ticker is NULL, Logger::Setup has not been called yet.  In
+  // that case, we set the sliding_state_window flag so that the
+  // sliding window computation will be started when Logger::Setup is
+  // called.
+  if (ticker_ == NULL) {
+    FLAG_sliding_state_window = true;
+    return;
+  }
+  // Otherwise, if the sliding state window computation has not been
+  // started we do it now.
+  if (sliding_state_window_ == NULL) {
+    sliding_state_window_ = new SlidingStateWindow();
+  }
+#endif
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/log.h b/src/log.h
new file mode 100644
index 0000000..07a0429
--- /dev/null
+++ b/src/log.h
@@ -0,0 +1,349 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LOG_H_
+#define V8_LOG_H_
+
+#include "platform.h"
+#include "log-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Logger is used for collecting logging information from V8 during
+// execution. The result is dumped to a file.
+//
+// Available command line flags:
+//
+//  --log
+// Minimal logging (no API, code, or GC sample events), default is off.
+//
+// --log-all
+// Log all events to the file, default is off.  This is the same as combining
+// --log-api, --log-code, --log-gc, and --log-regexp.
+//
+// --log-api
+// Log API events to the logfile, default is off.  --log-api implies --log.
+//
+// --log-code
+// Log code (create, move, and delete) events to the logfile, default is off.
+// --log-code implies --log.
+//
+// --log-gc
+// Log GC heap samples after each GC that can be processed by hp2ps, default
+// is off.  --log-gc implies --log.
+//
+// --log-regexp
+// Log creation and use of regular expressions, Default is off.
+// --log-regexp implies --log.
+//
+// --logfile <filename>
+// Specify the name of the logfile, default is "v8.log".
+//
+// --prof
+// Collect statistical profiling information (ticks), default is off.  The
+// tick profiler requires code events, so --prof implies --log-code.
+
+// Forward declarations.
+class Ticker;
+class Profiler;
+class Semaphore;
+class SlidingStateWindow;
+class LogMessageBuilder;
+class CompressionHelper;
+
+#undef LOG
+#ifdef ENABLE_LOGGING_AND_PROFILING
+#define LOG(Call)                           \
+  do {                                      \
+    if (v8::internal::Logger::is_logging()) \
+      v8::internal::Logger::Call;           \
+  } while (false)
+#else
+#define LOG(Call) ((void) 0)
+#endif
+
+
+class VMState BASE_EMBEDDED {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ public:
+  inline explicit VMState(StateTag state);
+  inline ~VMState();
+
+  StateTag state() { return state_; }
+
+ private:
+  bool disabled_;
+  StateTag state_;
+  VMState* previous_;
+#else
+ public:
+  explicit VMState(StateTag state) {}
+#endif
+};
+
+
+#define LOG_EVENTS_AND_TAGS_LIST(V) \
+  V(CODE_CREATION_EVENT,            "code-creation",          "cc")       \
+  V(CODE_MOVE_EVENT,                "code-move",              "cm")       \
+  V(CODE_DELETE_EVENT,              "code-delete",            "cd")       \
+  V(TICK_EVENT,                     "tick",                   "t")        \
+  V(REPEAT_META_EVENT,              "repeat",                 "r")        \
+  V(BUILTIN_TAG,                    "Builtin",                "bi")       \
+  V(CALL_DEBUG_BREAK_TAG,           "CallDebugBreak",         "cdb")      \
+  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn", "cdbsi")    \
+  V(CALL_IC_TAG,                    "CallIC",                 "cic")      \
+  V(CALL_INITIALIZE_TAG,            "CallInitialize",         "ci")       \
+  V(CALL_MEGAMORPHIC_TAG,           "CallMegamorphic",        "cmm")      \
+  V(CALL_MISS_TAG,                  "CallMiss",               "cm")       \
+  V(CALL_NORMAL_TAG,                "CallNormal",             "cn")       \
+  V(CALL_PRE_MONOMORPHIC_TAG,       "CallPreMonomorphic",     "cpm")      \
+  V(EVAL_TAG,                       "Eval",                   "e")        \
+  V(FUNCTION_TAG,                   "Function",               "f")        \
+  V(KEYED_LOAD_IC_TAG,              "KeyedLoadIC",            "klic")     \
+  V(KEYED_STORE_IC_TAG,             "KeyedStoreIC",           "ksic")     \
+  V(LAZY_COMPILE_TAG,               "LazyCompile",            "lc")       \
+  V(LOAD_IC_TAG,                    "LoadIC",                 "lic")      \
+  V(REG_EXP_TAG,                    "RegExp",                 "re")       \
+  V(SCRIPT_TAG,                     "Script",                 "sc")       \
+  V(STORE_IC_TAG,                   "StoreIC",                "sic")      \
+  V(STUB_TAG,                       "Stub",                   "s")
+
+class Logger {
+ public:
+#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+  enum LogEventsAndTags {
+    LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
+    NUMBER_OF_LOG_EVENTS
+  };
+#undef DECLARE_ENUM
+
+  // Acquires resources for logging if the right flags are set.
+  static bool Setup();
+
+  // Frees resources acquired in Setup.
+  static void TearDown();
+
+  // Enable the computation of a sliding window of states.
+  static void EnableSlidingStateWindow();
+
+  // Write a raw string to the log to be used as a preamble.
+  // No check is made that the 'preamble' is actually at the beginning
+  // of the log. The preample is used to write code events saved in the
+  // snapshot.
+  static void Preamble(const char* content);
+
+  // Emits an event with a string value -> (name, value).
+  static void StringEvent(const char* name, const char* value);
+
+  // Emits an event with an int value -> (name, value).
+  static void IntEvent(const char* name, int value);
+
+  // Emits an event with an handle value -> (name, location).
+  static void HandleEvent(const char* name, Object** location);
+
+  // Emits memory management events for C allocated structures.
+  static void NewEvent(const char* name, void* object, size_t size);
+  static void DeleteEvent(const char* name, void* object);
+
+  // Emits an event with a tag, and some resource usage information.
+  // -> (name, tag, <rusage information>).
+  // Currently, the resource usage information is a process time stamp
+  // and a real time timestamp.
+  static void ResourceEvent(const char* name, const char* tag);
+
+  // Emits an event that an undefined property was read from an
+  // object.
+  static void SuspectReadEvent(String* name, Object* obj);
+
+  // Emits an event when a message is put on or read from a debugging queue.
+  // DebugTag lets us put a call-site specific label on the event.
+  static void DebugTag(const char* call_site_tag);
+  static void DebugEvent(const char* event_type, Vector<uint16_t> parameter);
+
+
+  // ==== Events logged by --log-api. ====
+  static void ApiNamedSecurityCheck(Object* key);
+  static void ApiIndexedSecurityCheck(uint32_t index);
+  static void ApiNamedPropertyAccess(const char* tag,
+                                     JSObject* holder,
+                                     Object* name);
+  static void ApiIndexedPropertyAccess(const char* tag,
+                                       JSObject* holder,
+                                       uint32_t index);
+  static void ApiObjectAccess(const char* tag, JSObject* obj);
+  static void ApiEntryCall(const char* name);
+
+
+  // ==== Events logged by --log-code. ====
+  // Emits a code create event.
+  static void CodeCreateEvent(LogEventsAndTags tag,
+                              Code* code, const char* source);
+  static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name);
+  static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name,
+                              String* source, int line);
+  static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
+  // Emits a code create event for a RegExp.
+  static void RegExpCodeCreateEvent(Code* code, String* source);
+  // Emits a code move event.
+  static void CodeMoveEvent(Address from, Address to);
+  // Emits a code delete event.
+  static void CodeDeleteEvent(Address from);
+
+  // ==== Events logged by --log-gc. ====
+  // Heap sampling events: start, end, and individual types.
+  static void HeapSampleBeginEvent(const char* space, const char* kind);
+  static void HeapSampleEndEvent(const char* space, const char* kind);
+  static void HeapSampleItemEvent(const char* type, int number, int bytes);
+  static void HeapSampleJSConstructorEvent(const char* constructor,
+                                           int number, int bytes);
+  static void HeapSampleJSRetainersEvent(const char* constructor,
+                                         const char* event);
+  static void HeapSampleStats(const char* space, const char* kind,
+                              int capacity, int used);
+
+  static void SharedLibraryEvent(const char* library_path,
+                                 uintptr_t start,
+                                 uintptr_t end);
+  static void SharedLibraryEvent(const wchar_t* library_path,
+                                 uintptr_t start,
+                                 uintptr_t end);
+
+  // ==== Events logged by --log-regexp ====
+  // Regexp compilation and execution events.
+
+  static void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
+
+  // Log an event reported from generated code
+  static void LogRuntime(Vector<const char> format, JSArray* args);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static StateTag state() {
+    return current_state_ ? current_state_->state() : OTHER;
+  }
+
+  static bool is_logging() {
+    return is_logging_;
+  }
+
+  // Pause/Resume collection of profiling data.
+  // When data collection is paused, CPU Tick events are discarded until
+  // data collection is Resumed.
+  static void PauseProfiler(int flags);
+  static void ResumeProfiler(int flags);
+  static int GetActiveProfilerModules();
+
+  // If logging is performed into a memory buffer, allows to
+  // retrieve previously written messages. See v8.h.
+  static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+
+  // Logs all compiled functions found in the heap.
+  static void LogCompiledFunctions();
+
+ private:
+
+  // Profiler's sampling interval (in milliseconds).
+  static const int kSamplingIntervalMs = 1;
+
+  // Size of window used for log records compression.
+  static const int kCompressionWindowSize = 4;
+
+  // Emits the profiler's first message.
+  static void ProfilerBeginEvent();
+
+  // Emits aliases for compressed messages.
+  static void LogAliases();
+
+  // Emits the source code of a regexp. Used by regexp events.
+  static void LogRegExpSource(Handle<JSRegExp> regexp);
+
+  // Emits a profiler tick event. Used by the profiler thread.
+  static void TickEvent(TickSample* sample, bool overflow);
+
+  static void ApiEvent(const char* name, ...);
+
+  // Logs a StringEvent regardless of whether FLAG_log is true.
+  static void UncheckedStringEvent(const char* name, const char* value);
+
+  // Stops logging and profiling in case of insufficient resources.
+  static void StopLoggingAndProfiling();
+
+  // Returns whether profiler's sampler is active.
+  static bool IsProfilerSamplerActive();
+
+  // The sampler used by the profiler and the sliding state window.
+  static Ticker* ticker_;
+
+  // When the statistical profile is active, profiler_
+  // points to a Profiler, that handles collection
+  // of samples.
+  static Profiler* profiler_;
+
+  // A stack of VM states.
+  static VMState* current_state_;
+
+  // Singleton bottom or default vm state.
+  static VMState bottom_state_;
+
+  // SlidingStateWindow instance keeping a sliding window of the most
+  // recent VM states.
+  static SlidingStateWindow* sliding_state_window_;
+
+  // An array of log events names.
+  static const char** log_events_;
+
+  // An instance of helper created if log compression is enabled.
+  static CompressionHelper* compression_helper_;
+
+  // Internal implementation classes with access to
+  // private members.
+  friend class CompressionHelper;
+  friend class EventLog;
+  friend class TimeLog;
+  friend class Profiler;
+  friend class SlidingStateWindow;
+  friend class VMState;
+
+  friend class LoggerTestHelper;
+
+  static bool is_logging_;
+#else
+  static bool is_logging() { return false; }
+#endif
+};
+
+
+// Class that extracts stack trace, used for profiling.
+class StackTracer : public AllStatic {
+ public:
+  static void Trace(TickSample* sample);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_LOG_H_
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
new file mode 100644
index 0000000..63a6d6e
--- /dev/null
+++ b/src/macro-assembler.h
@@ -0,0 +1,88 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MACRO_ASSEMBLER_H_
+#define V8_MACRO_ASSEMBLER_H_
+
+
+// Helper types to make boolean flag easier to read at call-site.
+enum InvokeFlag {
+  CALL_FUNCTION,
+  JUMP_FUNCTION
+};
+
+
+enum CodeLocation {
+  IN_JAVASCRIPT,
+  IN_JS_ENTRY,
+  IN_C_ENTRY
+};
+
+
+enum HandlerType {
+  TRY_CATCH_HANDLER,
+  TRY_FINALLY_HANDLER,
+  JS_ENTRY_HANDLER
+};
+
+
+// Flags used for the AllocateInNewSpace functions.
+enum AllocationFlags {
+  // No special flags.
+  NO_ALLOCATION_FLAGS = 0,
+  // Return the pointer to the allocated already tagged as a heap object.
+  TAG_OBJECT = 1 << 0,
+  // The content of the result register already contains the allocation top in
+  // new space.
+  RESULT_CONTAINS_TOP = 1 << 1
+};
+
+
+#if V8_TARGET_ARCH_IA32
+#include "assembler.h"
+#include "ia32/assembler-ia32.h"
+#include "ia32/assembler-ia32-inl.h"
+#include "code.h"  // must be after assembler_*.h
+#include "ia32/macro-assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "assembler.h"
+#include "x64/assembler-x64.h"
+#include "x64/assembler-x64-inl.h"
+#include "code.h"  // must be after assembler_*.h
+#include "x64/macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/constants-arm.h"
+#include "assembler.h"
+#include "arm/assembler-arm.h"
+#include "arm/assembler-arm-inl.h"
+#include "code.h"  // must be after assembler_*.h
+#include "arm/macro-assembler-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+#endif  // V8_MACRO_ASSEMBLER_H_
diff --git a/src/macros.py b/src/macros.py
new file mode 100644
index 0000000..c75f0ea
--- /dev/null
+++ b/src/macros.py
@@ -0,0 +1,130 @@
+# Copyright 2006-2009 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Dictionary that is passed as defines for js2c.py.
+# Used for defines that must be defined for all native js files.
+
+const NONE        = 0;
+const READ_ONLY   = 1;
+const DONT_ENUM   = 2;
+const DONT_DELETE = 4;
+
+# Constants used for getter and setter operations.
+const GETTER = 0;
+const SETTER = 1;
+
+# These definitions must match the index of the properties in objects.h.
+const kApiTagOffset               = 0;
+const kApiPropertyListOffset      = 1;
+const kApiSerialNumberOffset      = 2;
+const kApiConstructorOffset       = 2;
+const kApiPrototypeTemplateOffset = 5;
+const kApiParentTemplateOffset    = 6;
+
+const NO_HINT     = 0;
+const NUMBER_HINT = 1;
+const STRING_HINT = 2;
+
+const kFunctionTag  = 0;
+const kNewObjectTag = 1;
+
+# For date.js.
+const HoursPerDay      = 24;
+const MinutesPerHour   = 60;
+const SecondsPerMinute = 60;
+const msPerSecond      = 1000;
+const msPerMinute      = 60000;
+const msPerHour        = 3600000;
+const msPerDay         = 86400000;
+const msPerMonth       = 2592000000;
+
+# For apinatives.js
+const kUninitialized = -1;
+
+# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
+const kInvalidDate        = 'Invalid Date';
+const kDayZeroInJulianDay = 2440588;
+const kMonthMask          = 0x1e0;
+const kDayMask            = 0x01f;
+const kYearShift          = 9;
+const kMonthShift         = 5;
+
+# Type query macros.
+macro IS_NULL(arg)              = (arg === null);
+macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
+macro IS_UNDEFINED(arg)         = (typeof(arg) === 'undefined');
+macro IS_FUNCTION(arg)          = (typeof(arg) === 'function');
+macro IS_NUMBER(arg)            = (typeof(arg) === 'number');
+macro IS_STRING(arg)            = (typeof(arg) === 'string');
+macro IS_OBJECT(arg)            = (typeof(arg) === 'object');
+macro IS_BOOLEAN(arg)           = (typeof(arg) === 'boolean');
+macro IS_ARRAY(arg)             = (%_IsArray(arg));
+macro IS_REGEXP(arg)            = (%_ClassOf(arg) === 'RegExp');
+macro IS_DATE(arg)              = (%_ClassOf(arg) === 'Date');
+macro IS_NUMBER_WRAPPER(arg)    = (%_ClassOf(arg) === 'Number');
+macro IS_STRING_WRAPPER(arg)    = (%_ClassOf(arg) === 'String');
+macro IS_BOOLEAN_WRAPPER(arg)   = (%_ClassOf(arg) === 'Boolean');
+macro IS_ERROR(arg)             = (%_ClassOf(arg) === 'Error');
+macro IS_SCRIPT(arg)            = (%_ClassOf(arg) === 'Script');
+macro IS_ARGUMENTS(arg)         = (%_ClassOf(arg) === 'Arguments');
+macro IS_GLOBAL(arg)            = (%_ClassOf(arg) === 'global');
+macro FLOOR(arg)                = %Math_floor(arg);
+
+# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
+macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
+macro TO_INTEGER(arg)    = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
+macro TO_INT32(arg)      = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInt32(arg));
+
+# Macros implemented in Python.
+python macro CHAR_CODE(str) = ord(str[1]);
+
+# Accessors for original global properties that ensure they have been loaded.
+const ORIGINAL_REGEXP = (global.RegExp, $RegExp);
+const ORIGINAL_DATE   = (global.Date, $Date);
+
+# Constants used on an array to implement the properties of the RegExp object.
+const REGEXP_NUMBER_OF_CAPTURES = 0;
+const REGEXP_FIRST_CAPTURE = 3;
+
+# We can't put macros in macros so we use constants here.
+# REGEXP_NUMBER_OF_CAPTURES
+macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
+
+# Gets the value of a Date object. If arg is not a Date object
+# a type error is thrown.
+macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
+
+# Last input and last subject are after the captures so we can omit them on
+# results returned from global searches.  Beware - these evaluate their
+# arguments twice.
+macro LAST_SUBJECT(array) = ((array)[1]);
+macro LAST_INPUT(array) = ((array)[2]);
+
+# REGEXP_FIRST_CAPTURE
+macro CAPTURE(index) = (3 + (index));
+const CAPTURE0 = 3;
+const CAPTURE1 = 4;
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
new file mode 100644
index 0000000..cbd47a8
--- /dev/null
+++ b/src/mark-compact.cc
@@ -0,0 +1,1809 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "execution.h"
+#include "global-handles.h"
+#include "ic-inl.h"
+#include "mark-compact.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// MarkCompactCollector
+
+bool MarkCompactCollector::force_compaction_ = false;
+bool MarkCompactCollector::compacting_collection_ = false;
+bool MarkCompactCollector::compact_on_next_gc_ = false;
+
+int MarkCompactCollector::previous_marked_count_ = 0;
+GCTracer* MarkCompactCollector::tracer_ = NULL;
+
+
+#ifdef DEBUG
+MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
+
+// Counters used for debugging the marking phase of mark-compact or mark-sweep
+// collection.
+int MarkCompactCollector::live_bytes_ = 0;
+int MarkCompactCollector::live_young_objects_ = 0;
+int MarkCompactCollector::live_old_data_objects_ = 0;
+int MarkCompactCollector::live_old_pointer_objects_ = 0;
+int MarkCompactCollector::live_code_objects_ = 0;
+int MarkCompactCollector::live_map_objects_ = 0;
+int MarkCompactCollector::live_cell_objects_ = 0;
+int MarkCompactCollector::live_lo_objects_ = 0;
+#endif
+
+void MarkCompactCollector::CollectGarbage() {
+  // Make sure that Prepare() has been called. The individual steps below will
+  // update the state as they proceed.
+  ASSERT(state_ == PREPARE_GC);
+
+  // Prepare has selected whether to compact the old generation or not.
+  // Tell the tracer.
+  if (IsCompacting()) tracer_->set_is_compacting();
+
+  MarkLiveObjects();
+
+  if (FLAG_collect_maps) ClearNonLiveTransitions();
+
+  SweepLargeObjectSpace();
+
+  if (IsCompacting()) {
+    EncodeForwardingAddresses();
+
+    UpdatePointers();
+
+    RelocateObjects();
+
+    RebuildRSets();
+
+  } else {
+    SweepSpaces();
+  }
+
+  Finish();
+
+  // Save the count of marked objects remaining after the collection and
+  // null out the GC tracer.
+  previous_marked_count_ = tracer_->marked_count();
+  ASSERT(previous_marked_count_ == 0);
+  tracer_ = NULL;
+}
+
+
+void MarkCompactCollector::Prepare(GCTracer* tracer) {
+  // Rather than passing the tracer around we stash it in a static member
+  // variable.
+  tracer_ = tracer;
+
+#ifdef DEBUG
+  ASSERT(state_ == IDLE);
+  state_ = PREPARE_GC;
+#endif
+  ASSERT(!FLAG_always_compact || !FLAG_never_compact);
+
+  compacting_collection_ =
+      FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
+  compact_on_next_gc_ = false;
+
+  if (FLAG_never_compact) compacting_collection_ = false;
+  if (FLAG_collect_maps) CreateBackPointers();
+
+#ifdef DEBUG
+  if (compacting_collection_) {
+    // We will write bookkeeping information to the remembered set area
+    // starting now.
+    Page::set_rset_state(Page::NOT_IN_USE);
+  }
+#endif
+
+  PagedSpaces spaces;
+  while (PagedSpace* space = spaces.next()) {
+    space->PrepareForMarkCompact(compacting_collection_);
+  }
+
+#ifdef DEBUG
+  live_bytes_ = 0;
+  live_young_objects_ = 0;
+  live_old_pointer_objects_ = 0;
+  live_old_data_objects_ = 0;
+  live_code_objects_ = 0;
+  live_map_objects_ = 0;
+  live_cell_objects_ = 0;
+  live_lo_objects_ = 0;
+#endif
+}
+
+
+void MarkCompactCollector::Finish() {
+#ifdef DEBUG
+  ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
+  state_ = IDLE;
+#endif
+  // The stub cache is not traversed during GC; clear the cache to
+  // force lazy re-initialization of it. This must be done after the
+  // GC, because it relies on the new address of certain old space
+  // objects (empty string, illegal builtin).
+  StubCache::Clear();
+
+  // If we've just compacted old space there's no reason to check the
+  // fragmentation limit. Just return.
+  if (HasCompacted()) return;
+
+  // We compact the old generation on the next GC if it has gotten too
+  // fragmented (ie, we could recover an expected amount of space by
+  // reclaiming the waste and free list blocks).
+  static const int kFragmentationLimit = 15;        // Percent.
+  static const int kFragmentationAllowed = 1 * MB;  // Absolute.
+  int old_gen_recoverable = 0;
+  int old_gen_used = 0;
+
+  OldSpaces spaces;
+  while (OldSpace* space = spaces.next()) {
+    old_gen_recoverable += space->Waste() + space->AvailableFree();
+    old_gen_used += space->Size();
+  }
+
+  int old_gen_fragmentation =
+      static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
+  if (old_gen_fragmentation > kFragmentationLimit &&
+      old_gen_recoverable > kFragmentationAllowed) {
+    compact_on_next_gc_ = true;
+  }
+}
+
+
+// -------------------------------------------------------------------------
+// Phase 1: tracing and marking live objects.
+//   before: all objects are in normal state.
+//   after: a live object's map pointer is marked as '00'.
+
+// Marking all live objects in the heap as part of mark-sweep or mark-compact
+// collection.  Before marking, all objects are in their normal state.  After
+// marking, live objects' map pointers are marked indicating that the object
+// has been found reachable.
+//
+// The marking algorithm is a (mostly) depth-first (because of possible stack
+// overflow) traversal of the graph of objects reachable from the roots.  It
+// uses an explicit stack of pointers rather than recursion.  The young
+// generation's inactive ('from') space is used as a marking stack.  The
+// objects in the marking stack are the ones that have been reached and marked
+// but their children have not yet been visited.
+//
+// The marking stack can overflow during traversal.  In that case, we set an
+// overflow flag.  When the overflow flag is set, we continue marking objects
+// reachable from the objects on the marking stack, but no longer push them on
+// the marking stack.  Instead, we mark them as both marked and overflowed.
+// When the stack is in the overflowed state, objects marked as overflowed
+// have been reached and marked but their children have not been visited yet.
+// After emptying the marking stack, we clear the overflow flag and traverse
+// the heap looking for objects marked as overflowed, push them on the stack,
+// and continue with marking.  This process repeats until all reachable
+// objects have been marked.
+
+static MarkingStack marking_stack;
+
+
+static inline HeapObject* ShortCircuitConsString(Object** p) {
+  // Optimization: If the heap object pointed to by p is a non-symbol
+  // cons string whose right substring is Heap::empty_string, update
+  // it in place to its left substring.  Return the updated value.
+  //
+  // Here we assume that if we change *p, we replace it with a heap object
+  // (ie, the left substring of a cons string is always a heap object).
+  //
+  // The check performed is:
+  //   object->IsConsString() && !object->IsSymbol() &&
+  //   (ConsString::cast(object)->second() == Heap::empty_string())
+  // except the maps for the object and its possible substrings might be
+  // marked.
+  HeapObject* object = HeapObject::cast(*p);
+  MapWord map_word = object->map_word();
+  map_word.ClearMark();
+  InstanceType type = map_word.ToMap()->instance_type();
+  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
+
+  Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
+  if (second != Heap::raw_unchecked_empty_string()) {
+    return object;
+  }
+
+  // Since we don't have the object's start, it is impossible to update the
+  // remembered set.  Therefore, we only replace the string with its left
+  // substring when the remembered set does not change.
+  Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
+  if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
+
+  *p = first;
+  return HeapObject::cast(first);
+}
+
+
+// Helper class for marking pointers in HeapObjects.
+class MarkingVisitor : public ObjectVisitor {
+ public:
+  void VisitPointer(Object** p) {
+    MarkObjectByPointer(p);
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    // Mark all objects pointed to in [start, end).
+    const int kMinRangeForMarkingRecursion = 64;
+    if (end - start >= kMinRangeForMarkingRecursion) {
+      if (VisitUnmarkedObjects(start, end)) return;
+      // We are close to a stack overflow, so just mark the objects.
+    }
+    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
+      IC::Clear(rinfo->pc());
+      // Please note targets for cleared inline cached do not have to be
+      // marked since they are contained in Heap::non_monomorphic_cache().
+    } else {
+      MarkCompactCollector::MarkObject(code);
+    }
+  }
+
+  void VisitDebugTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
+           rinfo->IsCallInstruction());
+    HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    MarkCompactCollector::MarkObject(code);
+    // When compacting we convert the call to a real object pointer.
+    if (IsCompacting()) rinfo->set_call_object(code);
+  }
+
+ private:
+  // Mark object pointed to by p.
+  void MarkObjectByPointer(Object** p) {
+    if (!(*p)->IsHeapObject()) return;
+    HeapObject* object = ShortCircuitConsString(p);
+    MarkCompactCollector::MarkObject(object);
+  }
+
+  // Tells whether the mark sweep collection will perform compaction.
+  bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
+
+  // Visit an unmarked object.
+  void VisitUnmarkedObject(HeapObject* obj) {
+#ifdef DEBUG
+    ASSERT(Heap::Contains(obj));
+    ASSERT(!obj->IsMarked());
+#endif
+    Map* map = obj->map();
+    MarkCompactCollector::SetMark(obj);
+    // Mark the map pointer and the body.
+    MarkCompactCollector::MarkObject(map);
+    obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
+  }
+
+  // Visit all unmarked objects pointed to by [start, end).
+  // Returns false if the operation fails (lack of stack space).
+  inline bool VisitUnmarkedObjects(Object** start, Object** end) {
+    // Return false is we are close to the stack limit.
+    StackLimitCheck check;
+    if (check.HasOverflowed()) return false;
+
+    // Visit the unmarked objects.
+    for (Object** p = start; p < end; p++) {
+      if (!(*p)->IsHeapObject()) continue;
+      HeapObject* obj = HeapObject::cast(*p);
+      if (obj->IsMarked()) continue;
+      VisitUnmarkedObject(obj);
+    }
+    return true;
+  }
+};
+
+
+// Visitor class for marking heap roots.
+class RootMarkingVisitor : public ObjectVisitor {
+ public:
+  void VisitPointer(Object** p) {
+    MarkObjectByPointer(p);
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+  }
+
+  MarkingVisitor* stack_visitor() { return &stack_visitor_; }
+
+ private:
+  MarkingVisitor stack_visitor_;
+
+  void MarkObjectByPointer(Object** p) {
+    if (!(*p)->IsHeapObject()) return;
+
+    // Replace flat cons strings in place.
+    HeapObject* object = ShortCircuitConsString(p);
+    if (object->IsMarked()) return;
+
+    Map* map = object->map();
+    // Mark the object.
+    MarkCompactCollector::SetMark(object);
+    // Mark the map pointer and body, and push them on the marking stack.
+    MarkCompactCollector::MarkObject(map);
+    object->IterateBody(map->instance_type(), object->SizeFromMap(map),
+                        &stack_visitor_);
+
+    // Mark all the objects reachable from the map and body.  May leave
+    // overflowed objects in the heap.
+    MarkCompactCollector::EmptyMarkingStack(&stack_visitor_);
+  }
+};
+
+
+// Helper class for pruning the symbol table.
+class SymbolTableCleaner : public ObjectVisitor {
+ public:
+  SymbolTableCleaner() : pointers_removed_(0) { }
+  void VisitPointers(Object** start, Object** end) {
+    // Visit all HeapObject pointers in [start, end).
+    for (Object** p = start; p < end; p++) {
+      if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
+        // Check if the symbol being pruned is an external symbol. We need to
+        // delete the associated external data as this symbol is going away.
+
+        // Since the object is not marked we can access its map word safely
+        // without having to worry about marking bits in the object header.
+        Map* map = HeapObject::cast(*p)->map();
+        // Since no objects have yet been moved we can safely access the map of
+        // the object.
+        uint32_t type = map->instance_type();
+        bool is_external = (type & kStringRepresentationMask) ==
+                           kExternalStringTag;
+        if (is_external) {
+          bool is_two_byte = (type & kStringEncodingMask) == kTwoByteStringTag;
+          byte* resource_addr = reinterpret_cast<byte*>(*p) +
+                                ExternalString::kResourceOffset -
+                                kHeapObjectTag;
+          if (is_two_byte) {
+            v8::String::ExternalStringResource** resource =
+                reinterpret_cast<v8::String::ExternalStringResource**>
+                (resource_addr);
+            delete *resource;
+            // Clear the resource pointer in the symbol.
+            *resource = NULL;
+          } else {
+            v8::String::ExternalAsciiStringResource** resource =
+                reinterpret_cast<v8::String::ExternalAsciiStringResource**>
+                (resource_addr);
+            delete *resource;
+            // Clear the resource pointer in the symbol.
+            *resource = NULL;
+          }
+        }
+        // Set the entry to null_value (as deleted).
+        *p = Heap::raw_unchecked_null_value();
+        pointers_removed_++;
+      }
+    }
+  }
+
+  int PointersRemoved() {
+    return pointers_removed_;
+  }
+ private:
+  int pointers_removed_;
+};
+
+
+void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
+  ASSERT(!object->IsMarked());
+  ASSERT(Heap::Contains(object));
+  if (object->IsMap()) {
+    Map* map = Map::cast(object);
+    if (FLAG_cleanup_caches_in_maps_at_gc) {
+      map->ClearCodeCache();
+    }
+    SetMark(map);
+    if (FLAG_collect_maps &&
+        map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
+        map->instance_type() <= JS_FUNCTION_TYPE) {
+      MarkMapContents(map);
+    } else {
+      marking_stack.Push(map);
+    }
+  } else {
+    SetMark(object);
+    marking_stack.Push(object);
+  }
+}
+
+
+void MarkCompactCollector::MarkMapContents(Map* map) {
+  MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
+      *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
+
+  // Mark the Object* fields of the Map.
+  // Since the descriptor array has been marked already, it is fine
+  // that one of these fields contains a pointer to it.
+  MarkingVisitor visitor;  // Has no state or contents.
+  visitor.VisitPointers(HeapObject::RawField(map, Map::kPrototypeOffset),
+                        HeapObject::RawField(map, Map::kSize));
+}
+
+
+void MarkCompactCollector::MarkDescriptorArray(
+    DescriptorArray* descriptors) {
+  if (descriptors->IsMarked()) return;
+  // Empty descriptor array is marked as a root before any maps are marked.
+  ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
+  SetMark(descriptors);
+
+  FixedArray* contents = reinterpret_cast<FixedArray*>(
+      descriptors->get(DescriptorArray::kContentArrayIndex));
+  ASSERT(contents->IsHeapObject());
+  ASSERT(!contents->IsMarked());
+  ASSERT(contents->IsFixedArray());
+  ASSERT(contents->length() >= 2);
+  SetMark(contents);
+  // Contents contains (value, details) pairs.  If the details say
+  // that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
+  // or NULL_DESCRIPTOR, we don't mark the value as live.  Only for
+  // type MAP_TRANSITION is the value a Object* (a Map*).
+  for (int i = 0; i < contents->length(); i += 2) {
+    // If the pair (value, details) at index i, i+1 is not
+    // a transition or null descriptor, mark the value.
+    PropertyDetails details(Smi::cast(contents->get(i + 1)));
+    if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
+      HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
+      if (object->IsHeapObject() && !object->IsMarked()) {
+        SetMark(object);
+        marking_stack.Push(object);
+      }
+    }
+  }
+  // The DescriptorArray descriptors contains a pointer to its contents array,
+  // but the contents array is already marked.
+  marking_stack.Push(descriptors);
+}
+
+
+void MarkCompactCollector::CreateBackPointers() {
+  HeapObjectIterator iterator(Heap::map_space());
+  while (iterator.has_next()) {
+    Object* next_object = iterator.next();
+    if (next_object->IsMap()) {  // Could also be ByteArray on free list.
+      Map* map = Map::cast(next_object);
+      if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
+          map->instance_type() <= JS_FUNCTION_TYPE) {
+        map->CreateBackPointers();
+      } else {
+        ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
+      }
+    }
+  }
+}
+
+
+static int OverflowObjectSize(HeapObject* obj) {
+  // Recover the normal map pointer, it might be marked as live and
+  // overflowed.
+  MapWord map_word = obj->map_word();
+  map_word.ClearMark();
+  map_word.ClearOverflow();
+  return obj->SizeFromMap(map_word.ToMap());
+}
+
+
+// Fill the marking stack with overflowed objects returned by the given
+// iterator.  Stop when the marking stack is filled or the end of the space
+// is reached, whichever comes first.
+template<class T>
+static void ScanOverflowedObjects(T* it) {
+  // The caller should ensure that the marking stack is initially not full,
+  // so that we don't waste effort pointlessly scanning for objects.
+  ASSERT(!marking_stack.is_full());
+
+  while (it->has_next()) {
+    HeapObject* object = it->next();
+    if (object->IsOverflowed()) {
+      object->ClearOverflow();
+      ASSERT(object->IsMarked());
+      ASSERT(Heap::Contains(object));
+      marking_stack.Push(object);
+      if (marking_stack.is_full()) return;
+    }
+  }
+}
+
+
+bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
+  return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
+}
+
+
+class SymbolMarkingVisitor : public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    MarkingVisitor marker;
+    for (Object** p = start; p < end; p++) {
+      if (!(*p)->IsHeapObject()) continue;
+
+      HeapObject* object = HeapObject::cast(*p);
+      // If the object is marked, we have marked or are in the process
+      // of marking subparts.
+      if (object->IsMarked()) continue;
+
+      // The object is unmarked, we do not need to unmark to use its
+      // map.
+      Map* map = object->map();
+      object->IterateBody(map->instance_type(),
+                          object->SizeFromMap(map),
+                          &marker);
+    }
+  }
+};
+
+
+void MarkCompactCollector::MarkSymbolTable() {
+  // Objects reachable from symbols are marked as live so as to ensure
+  // that if the symbol itself remains alive after GC for any reason,
+  // and if it is a sliced string or a cons string backed by an
+  // external string (even indirectly), then the external string does
+  // not receive a weak reference callback.
+  SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
+  // Mark the symbol table itself.
+  SetMark(symbol_table);
+  // Explicitly mark the prefix.
+  MarkingVisitor marker;
+  symbol_table->IteratePrefix(&marker);
+  ProcessMarkingStack(&marker);
+  // Mark subparts of the symbols but not the symbols themselves
+  // (unless reachable from another symbol).
+  SymbolMarkingVisitor symbol_marker;
+  symbol_table->IterateElements(&symbol_marker);
+  ProcessMarkingStack(&marker);
+}
+
+
+void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
+  // Mark the heap roots including global variables, stack variables,
+  // etc., and all objects reachable from them.
+  Heap::IterateStrongRoots(visitor);
+
+  // Handle the symbol table specially.
+  MarkSymbolTable();
+
+  // There may be overflowed objects in the heap.  Visit them now.
+  while (marking_stack.overflowed()) {
+    RefillMarkingStack();
+    EmptyMarkingStack(visitor->stack_visitor());
+  }
+}
+
+
+void MarkCompactCollector::MarkObjectGroups() {
+  List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
+
+  for (int i = 0; i < object_groups->length(); i++) {
+    ObjectGroup* entry = object_groups->at(i);
+    if (entry == NULL) continue;
+
+    List<Object**>& objects = entry->objects_;
+    bool group_marked = false;
+    for (int j = 0; j < objects.length(); j++) {
+      Object* object = *objects[j];
+      if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
+        group_marked = true;
+        break;
+      }
+    }
+
+    if (!group_marked) continue;
+
+    // An object in the group is marked, so mark as gray all white heap
+    // objects in the group.
+    for (int j = 0; j < objects.length(); ++j) {
+      if ((*objects[j])->IsHeapObject()) {
+        MarkObject(HeapObject::cast(*objects[j]));
+      }
+    }
+    // Once the entire group has been colored gray, set the object group
+    // to NULL so it won't be processed again.
+    delete object_groups->at(i);
+    object_groups->at(i) = NULL;
+  }
+}
+
+
+// Mark all objects reachable from the objects on the marking stack.
+// Before: the marking stack contains zero or more heap object pointers.
+// After: the marking stack is empty, and all objects reachable from the
+// marking stack have been marked, or are overflowed in the heap.
+void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) {
+  while (!marking_stack.is_empty()) {
+    HeapObject* object = marking_stack.Pop();
+    ASSERT(object->IsHeapObject());
+    ASSERT(Heap::Contains(object));
+    ASSERT(object->IsMarked());
+    ASSERT(!object->IsOverflowed());
+
+    // Because the object is marked, we have to recover the original map
+    // pointer and use it to mark the object's body.
+    MapWord map_word = object->map_word();
+    map_word.ClearMark();
+    Map* map = map_word.ToMap();
+    MarkObject(map);
+    object->IterateBody(map->instance_type(), object->SizeFromMap(map),
+                        visitor);
+  }
+}
+
+
+// Sweep the heap for overflowed objects, clear their overflow bits, and
+// push them on the marking stack.  Stop early if the marking stack fills
+// before sweeping completes.  If sweeping completes, there are no remaining
+// overflowed objects in the heap so the overflow flag on the markings stack
+// is cleared.
+void MarkCompactCollector::RefillMarkingStack() {
+  ASSERT(marking_stack.overflowed());
+
+  SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
+  ScanOverflowedObjects(&new_it);
+  if (marking_stack.is_full()) return;
+
+  HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
+                                    &OverflowObjectSize);
+  ScanOverflowedObjects(&old_pointer_it);
+  if (marking_stack.is_full()) return;
+
+  HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
+  ScanOverflowedObjects(&old_data_it);
+  if (marking_stack.is_full()) return;
+
+  HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
+  ScanOverflowedObjects(&code_it);
+  if (marking_stack.is_full()) return;
+
+  HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
+  ScanOverflowedObjects(&map_it);
+  if (marking_stack.is_full()) return;
+
+  HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
+  ScanOverflowedObjects(&cell_it);
+  if (marking_stack.is_full()) return;
+
+  LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
+  ScanOverflowedObjects(&lo_it);
+  if (marking_stack.is_full()) return;
+
+  marking_stack.clear_overflowed();
+}
+
+
+// Mark all objects reachable (transitively) from objects on the marking
+// stack.  Before: the marking stack contains zero or more heap object
+// pointers.  After: the marking stack is empty and there are no overflowed
+// objects in the heap.
+void MarkCompactCollector::ProcessMarkingStack(MarkingVisitor* visitor) {
+  EmptyMarkingStack(visitor);
+  while (marking_stack.overflowed()) {
+    RefillMarkingStack();
+    EmptyMarkingStack(visitor);
+  }
+}
+
+
+void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) {
+  bool work_to_do = true;
+  ASSERT(marking_stack.is_empty());
+  while (work_to_do) {
+    MarkObjectGroups();
+    work_to_do = !marking_stack.is_empty();
+    ProcessMarkingStack(visitor);
+  }
+}
+
+
+void MarkCompactCollector::MarkLiveObjects() {
+#ifdef DEBUG
+  ASSERT(state_ == PREPARE_GC);
+  state_ = MARK_LIVE_OBJECTS;
+#endif
+  // The to space contains live objects, the from space is used as a marking
+  // stack.
+  marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
+                           Heap::new_space()->FromSpaceHigh());
+
+  ASSERT(!marking_stack.overflowed());
+
+  RootMarkingVisitor root_visitor;
+  MarkRoots(&root_visitor);
+
+  // The objects reachable from the roots are marked, yet unreachable
+  // objects are unmarked.  Mark objects reachable from object groups
+  // containing at least one marked object, and continue until no new
+  // objects are reachable from the object groups.
+  ProcessObjectGroups(root_visitor.stack_visitor());
+
+  // The objects reachable from the roots or object groups are marked,
+  // yet unreachable objects are unmarked.  Mark objects reachable
+  // only from weak global handles.
+  //
+  // First we identify nonlive weak handles and mark them as pending
+  // destruction.
+  GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
+  // Then we mark the objects and process the transitive closure.
+  GlobalHandles::IterateWeakRoots(&root_visitor);
+  while (marking_stack.overflowed()) {
+    RefillMarkingStack();
+    EmptyMarkingStack(root_visitor.stack_visitor());
+  }
+
+  // Repeat the object groups to mark unmarked groups reachable from the
+  // weak roots.
+  ProcessObjectGroups(root_visitor.stack_visitor());
+
+  // Prune the symbol table removing all symbols only pointed to by the
+  // symbol table.  Cannot use symbol_table() here because the symbol
+  // table is marked.
+  SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
+  SymbolTableCleaner v;
+  symbol_table->IterateElements(&v);
+  symbol_table->ElementsRemoved(v.PointersRemoved());
+
+  // Remove object groups after marking phase.
+  GlobalHandles::RemoveObjectGroups();
+}
+
+
+static int CountMarkedCallback(HeapObject* obj) {
+  MapWord map_word = obj->map_word();
+  map_word.ClearMark();
+  return obj->SizeFromMap(map_word.ToMap());
+}
+
+
+#ifdef DEBUG
+void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
+  live_bytes_ += obj->Size();
+  if (Heap::new_space()->Contains(obj)) {
+    live_young_objects_++;
+  } else if (Heap::map_space()->Contains(obj)) {
+    ASSERT(obj->IsMap());
+    live_map_objects_++;
+  } else if (Heap::cell_space()->Contains(obj)) {
+    ASSERT(obj->IsJSGlobalPropertyCell());
+    live_cell_objects_++;
+  } else if (Heap::old_pointer_space()->Contains(obj)) {
+    live_old_pointer_objects_++;
+  } else if (Heap::old_data_space()->Contains(obj)) {
+    live_old_data_objects_++;
+  } else if (Heap::code_space()->Contains(obj)) {
+    live_code_objects_++;
+  } else if (Heap::lo_space()->Contains(obj)) {
+    live_lo_objects_++;
+  } else {
+    UNREACHABLE();
+  }
+}
+#endif  // DEBUG
+
+
+void MarkCompactCollector::SweepLargeObjectSpace() {
+#ifdef DEBUG
+  ASSERT(state_ == MARK_LIVE_OBJECTS);
+  state_ =
+      compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
+#endif
+  // Deallocate unmarked objects and clear marked bits for marked objects.
+  Heap::lo_space()->FreeUnmarkedObjects();
+}
+
+// Safe to use during marking phase only.
+bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
+  MapWord metamap = object->map_word();
+  metamap.ClearMark();
+  return metamap.ToMap()->instance_type() == MAP_TYPE;
+}
+
+void MarkCompactCollector::ClearNonLiveTransitions() {
+  HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
+  // Iterate over the map space, setting map transitions that go from
+  // a marked map to an unmarked map to null transitions.  At the same time,
+  // set all the prototype fields of maps back to their original value,
+  // dropping the back pointers temporarily stored in the prototype field.
+  // Setting the prototype field requires following the linked list of
+  // back pointers, reversing them all at once.  This allows us to find
+  // those maps with map transitions that need to be nulled, and only
+  // scan the descriptor arrays of those maps, not all maps.
+  // All of these actions are carried out only on maps of JSObects
+  // and related subtypes.
+  while (map_iterator.has_next()) {
+    Map* map = reinterpret_cast<Map*>(map_iterator.next());
+    if (!map->IsMarked() && map->IsByteArray()) continue;
+
+    ASSERT(SafeIsMap(map));
+    // Only JSObject and subtypes have map transitions and back pointers.
+    if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
+    if (map->instance_type() > JS_FUNCTION_TYPE) continue;
+    // Follow the chain of back pointers to find the prototype.
+    Map* current = map;
+    while (SafeIsMap(current)) {
+      current = reinterpret_cast<Map*>(current->prototype());
+      ASSERT(current->IsHeapObject());
+    }
+    Object* real_prototype = current;
+
+    // Follow back pointers, setting them to prototype,
+    // clearing map transitions when necessary.
+    current = map;
+    bool on_dead_path = !current->IsMarked();
+    Object* next;
+    while (SafeIsMap(current)) {
+      next = current->prototype();
+      // There should never be a dead map above a live map.
+      ASSERT(on_dead_path || current->IsMarked());
+
+      // A live map above a dead map indicates a dead transition.
+      // This test will always be false on the first iteration.
+      if (on_dead_path && current->IsMarked()) {
+        on_dead_path = false;
+        current->ClearNonLiveTransitions(real_prototype);
+      }
+      *HeapObject::RawField(current, Map::kPrototypeOffset) =
+          real_prototype;
+      current = reinterpret_cast<Map*>(next);
+    }
+  }
+}
+
+// -------------------------------------------------------------------------
+// Phase 2: Encode forwarding addresses.
+// When compacting, forwarding addresses for objects in old space and map
+// space are encoded in their map pointer word (along with an encoding of
+// their map pointers).
+//
+//  31             21 20              10 9               0
+// +-----------------+------------------+-----------------+
+// |forwarding offset|page offset of map|page index of map|
+// +-----------------+------------------+-----------------+
+//  11 bits           11 bits            10 bits
+//
+// An address range [start, end) can have both live and non-live objects.
+// Maximal non-live regions are marked so they can be skipped on subsequent
+// sweeps of the heap.  A distinguished map-pointer encoding is used to mark
+// free regions of one-word size (in which case the next word is the start
+// of a live object).  A second distinguished map-pointer encoding is used
+// to mark free regions larger than one word, and the size of the free
+// region (including the first word) is written to the second word of the
+// region.
+//
+// Any valid map page offset must lie in the object area of the page, so map
+// page offsets less than Page::kObjectStartOffset are invalid.  We use a
+// pair of distinguished invalid map encodings (for single word and multiple
+// words) to indicate free regions in the page found during computation of
+// forwarding addresses and skipped over in subsequent sweeps.
+static const uint32_t kSingleFreeEncoding = 0;
+static const uint32_t kMultiFreeEncoding = 1;
+
+
+// Encode a free region, defined by the given start address and size, in the
+// first word or two of the region.
+void EncodeFreeRegion(Address free_start, int free_size) {
+  ASSERT(free_size >= kIntSize);
+  if (free_size == kIntSize) {
+    Memory::uint32_at(free_start) = kSingleFreeEncoding;
+  } else {
+    ASSERT(free_size >= 2 * kIntSize);
+    Memory::uint32_at(free_start) = kMultiFreeEncoding;
+    Memory::int_at(free_start + kIntSize) = free_size;
+  }
+
+#ifdef DEBUG
+  // Zap the body of the free region.
+  if (FLAG_enable_slow_asserts) {
+    for (int offset = 2 * kIntSize;
+         offset < free_size;
+         offset += kPointerSize) {
+      Memory::Address_at(free_start + offset) = kZapValue;
+    }
+  }
+#endif
+}
+
+
+// Try to promote all objects in new space.  Heap numbers and sequential
+// strings are promoted to the code space, large objects to large object space,
+// and all others to the old space.
+inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
+  Object* forwarded;
+  if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
+    forwarded = Failure::Exception();
+  } else {
+    OldSpace* target_space = Heap::TargetSpace(object);
+    ASSERT(target_space == Heap::old_pointer_space() ||
+           target_space == Heap::old_data_space());
+    forwarded = target_space->MCAllocateRaw(object_size);
+  }
+  if (forwarded->IsFailure()) {
+    forwarded = Heap::new_space()->MCAllocateRaw(object_size);
+  }
+  return forwarded;
+}
+
+
+// Allocation functions for the paged spaces call the space's MCAllocateRaw.
+inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
+                                             int object_size) {
+  return Heap::old_pointer_space()->MCAllocateRaw(object_size);
+}
+
+
+inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
+  return Heap::old_data_space()->MCAllocateRaw(object_size);
+}
+
+
+inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
+  return Heap::code_space()->MCAllocateRaw(object_size);
+}
+
+
+inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
+  return Heap::map_space()->MCAllocateRaw(object_size);
+}
+
+
+inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
+  return Heap::cell_space()->MCAllocateRaw(object_size);
+}
+
+
+// The forwarding address is encoded at the same offset as the current
+// to-space object, but in from space.
+inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
+                                              int object_size,
+                                              Object* new_object,
+                                              int* ignored) {
+  int offset =
+      Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
+  Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
+      HeapObject::cast(new_object)->address();
+}
+
+
+// The forwarding address is encoded in the map pointer of the object as an
+// offset (in terms of live bytes) from the address of the first live object
+// in the page.
+inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
+                                                int object_size,
+                                                Object* new_object,
+                                                int* offset) {
+  // Record the forwarding address of the first live object if necessary.
+  if (*offset == 0) {
+    Page::FromAddress(old_object->address())->mc_first_forwarded =
+        HeapObject::cast(new_object)->address();
+  }
+
+  MapWord encoding =
+      MapWord::EncodeAddress(old_object->map()->address(), *offset);
+  old_object->set_map_word(encoding);
+  *offset += object_size;
+  ASSERT(*offset <= Page::kObjectAreaSize);
+}
+
+
+// Most non-live objects are ignored.
+inline void IgnoreNonLiveObject(HeapObject* object) {}
+
+
+// A code deletion event is logged for non-live code objects.
+inline void LogNonLiveCodeObject(HeapObject* object) {
+  if (object->IsCode()) LOG(CodeDeleteEvent(object->address()));
+}
+
+
+// Function template that, given a range of addresses (eg, a semispace or a
+// paged space page), iterates through the objects in the range to clear
+// mark bits and compute and encode forwarding addresses.  As a side effect,
+// maximal free chunks are marked so that they can be skipped on subsequent
+// sweeps.
+//
+// The template parameters are an allocation function, a forwarding address
+// encoding function, and a function to process non-live objects.
+template<MarkCompactCollector::AllocationFunction Alloc,
+         MarkCompactCollector::EncodingFunction Encode,
+         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
+inline void EncodeForwardingAddressesInRange(Address start,
+                                             Address end,
+                                             int* offset) {
+  // The start address of the current free region while sweeping the space.
+  // This address is set when a transition from live to non-live objects is
+  // encountered.  A value (an encoding of the 'next free region' pointer)
+  // is written to memory at this address when a transition from non-live to
+  // live objects is encountered.
+  Address free_start = NULL;
+
+  // A flag giving the state of the previously swept object.  Initially true
+  // to ensure that free_start is initialized to a proper address before
+  // trying to write to it.
+  bool is_prev_alive = true;
+
+  int object_size;  // Will be set on each iteration of the loop.
+  for (Address current = start; current < end; current += object_size) {
+    HeapObject* object = HeapObject::FromAddress(current);
+    if (object->IsMarked()) {
+      object->ClearMark();
+      MarkCompactCollector::tracer()->decrement_marked_count();
+      object_size = object->Size();
+
+      Object* forwarded = Alloc(object, object_size);
+      // Allocation cannot fail, because we are compacting the space.
+      ASSERT(!forwarded->IsFailure());
+      Encode(object, object_size, forwarded, offset);
+
+#ifdef DEBUG
+      if (FLAG_gc_verbose) {
+        PrintF("forward %p -> %p.\n", object->address(),
+               HeapObject::cast(forwarded)->address());
+      }
+#endif
+      if (!is_prev_alive) {  // Transition from non-live to live.
+        EncodeFreeRegion(free_start, current - free_start);
+        is_prev_alive = true;
+      }
+    } else {  // Non-live object.
+      object_size = object->Size();
+      ProcessNonLive(object);
+      if (is_prev_alive) {  // Transition from live to non-live.
+        free_start = current;
+        is_prev_alive = false;
+      }
+    }
+  }
+
+  // If we ended on a free region, mark it.
+  if (!is_prev_alive) EncodeFreeRegion(free_start, end - free_start);
+}
+
+
+// Functions to encode the forwarding pointers in each compactable space.
+void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
+  int ignored;
+  EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
+                                   EncodeForwardingAddressInNewSpace,
+                                   IgnoreNonLiveObject>(
+      Heap::new_space()->bottom(),
+      Heap::new_space()->top(),
+      &ignored);
+}
+
+
+template<MarkCompactCollector::AllocationFunction Alloc,
+         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
+void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
+    PagedSpace* space) {
+  PageIterator it(space, PageIterator::PAGES_IN_USE);
+  while (it.has_next()) {
+    Page* p = it.next();
+    // The offset of each live object in the page from the first live object
+    // in the page.
+    int offset = 0;
+    EncodeForwardingAddressesInRange<Alloc,
+                                     EncodeForwardingAddressInPagedSpace,
+                                     ProcessNonLive>(
+        p->ObjectAreaStart(),
+        p->AllocationTop(),
+        &offset);
+  }
+}
+
+
+static void SweepSpace(NewSpace* space) {
+  HeapObject* object;
+  for (Address current = space->bottom();
+       current < space->top();
+       current += object->Size()) {
+    object = HeapObject::FromAddress(current);
+    if (object->IsMarked()) {
+      object->ClearMark();
+      MarkCompactCollector::tracer()->decrement_marked_count();
+    } else {
+      // We give non-live objects a map that will correctly give their size,
+      // since their existing map might not be live after the collection.
+      int size = object->Size();
+      if (size >= ByteArray::kHeaderSize) {
+        object->set_map(Heap::raw_unchecked_byte_array_map());
+        ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
+      } else {
+        ASSERT(size == kPointerSize);
+        object->set_map(Heap::raw_unchecked_one_pointer_filler_map());
+      }
+      ASSERT(object->Size() == size);
+    }
+    // The object is now unmarked for the call to Size() at the top of the
+    // loop.
+  }
+}
+
+
+static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
+  PageIterator it(space, PageIterator::PAGES_IN_USE);
+  while (it.has_next()) {
+    Page* p = it.next();
+
+    bool is_previous_alive = true;
+    Address free_start = NULL;
+    HeapObject* object;
+
+    for (Address current = p->ObjectAreaStart();
+         current < p->AllocationTop();
+         current += object->Size()) {
+      object = HeapObject::FromAddress(current);
+      if (object->IsMarked()) {
+        object->ClearMark();
+        MarkCompactCollector::tracer()->decrement_marked_count();
+        if (!is_previous_alive) {  // Transition from free to live.
+          dealloc(free_start, current - free_start);
+          is_previous_alive = true;
+        }
+      } else {
+        if (object->IsCode()) {
+          // Notify the logger that compiled code has been collected.
+          LOG(CodeDeleteEvent(Code::cast(object)->address()));
+        }
+        if (is_previous_alive) {  // Transition from live to free.
+          free_start = current;
+          is_previous_alive = false;
+        }
+      }
+      // The object is now unmarked for the call to Size() at the top of the
+      // loop.
+    }
+
+    // If the last region was not live we need to deallocate from
+    // free_start to the allocation top in the page.
+    if (!is_previous_alive) {
+      int free_size = p->AllocationTop() - free_start;
+      if (free_size > 0) {
+        dealloc(free_start, free_size);
+      }
+    }
+  }
+}
+
+
+void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
+                                                     int size_in_bytes) {
+  Heap::ClearRSetRange(start, size_in_bytes);
+  Heap::old_pointer_space()->Free(start, size_in_bytes);
+}
+
+
+void MarkCompactCollector::DeallocateOldDataBlock(Address start,
+                                                  int size_in_bytes) {
+  Heap::old_data_space()->Free(start, size_in_bytes);
+}
+
+
+void MarkCompactCollector::DeallocateCodeBlock(Address start,
+                                               int size_in_bytes) {
+  Heap::code_space()->Free(start, size_in_bytes);
+}
+
+
+void MarkCompactCollector::DeallocateMapBlock(Address start,
+                                              int size_in_bytes) {
+  // Objects in map space are frequently assumed to have size Map::kSize and a
+  // valid map in their first word.  Thus, we break the free block up into
+  // chunks and free them separately.
+  ASSERT(size_in_bytes % Map::kSize == 0);
+  Heap::ClearRSetRange(start, size_in_bytes);
+  Address end = start + size_in_bytes;
+  for (Address a = start; a < end; a += Map::kSize) {
+    Heap::map_space()->Free(a);
+  }
+}
+
+
+void MarkCompactCollector::DeallocateCellBlock(Address start,
+                                               int size_in_bytes) {
+  // Free-list elements in cell space are assumed to have a fixed size.
+  // We break the free block into chunks and add them to the free list
+  // individually.
+  int size = Heap::cell_space()->object_size_in_bytes();
+  ASSERT(size_in_bytes % size == 0);
+  Heap::ClearRSetRange(start, size_in_bytes);
+  Address end = start + size_in_bytes;
+  for (Address a = start; a < end; a += size) {
+    Heap::cell_space()->Free(a);
+  }
+}
+
+
+void MarkCompactCollector::EncodeForwardingAddresses() {
+  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
+  // Objects in the active semispace of the young generation may be
+  // relocated to the inactive semispace (if not promoted).  Set the
+  // relocation info to the beginning of the inactive semispace.
+  Heap::new_space()->MCResetRelocationInfo();
+
+  // Compute the forwarding pointers in each space.
+  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
+                                        IgnoreNonLiveObject>(
+      Heap::old_pointer_space());
+
+  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
+                                        IgnoreNonLiveObject>(
+      Heap::old_data_space());
+
+  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
+                                        LogNonLiveCodeObject>(
+      Heap::code_space());
+
+  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
+                                        IgnoreNonLiveObject>(
+      Heap::cell_space());
+
+
+  // Compute new space next to last after the old and code spaces have been
+  // compacted.  Objects in new space can be promoted to old or code space.
+  EncodeForwardingAddressesInNewSpace();
+
+  // Compute map space last because computing forwarding addresses
+  // overwrites non-live objects.  Objects in the other spaces rely on
+  // non-live map pointers to get the sizes of non-live objects.
+  EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
+                                        IgnoreNonLiveObject>(
+      Heap::map_space());
+
+  // Write relocation info to the top page, so we can use it later.  This is
+  // done after promoting objects from the new space so we get the correct
+  // allocation top.
+  Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
+  Heap::old_data_space()->MCWriteRelocationInfoToPage();
+  Heap::code_space()->MCWriteRelocationInfoToPage();
+  Heap::map_space()->MCWriteRelocationInfoToPage();
+  Heap::cell_space()->MCWriteRelocationInfoToPage();
+}
+
+
+void MarkCompactCollector::SweepSpaces() {
+  ASSERT(state_ == SWEEP_SPACES);
+  ASSERT(!IsCompacting());
+  // Noncompacting collections simply sweep the spaces to clear the mark
+  // bits and free the nonlive blocks (for old and map spaces).  We sweep
+  // the map space last because freeing non-live maps overwrites them and
+  // the other spaces rely on possibly non-live maps to get the sizes for
+  // non-live objects.
+  SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
+  SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
+  SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
+  SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
+  SweepSpace(Heap::new_space());
+  SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+}
+
+
+// Iterate the live objects in a range of addresses (eg, a page or a
+// semispace).  The live regions of the range have been linked into a list.
+// The first live region is [first_live_start, first_live_end), and the last
+// address in the range is top.  The callback function is used to get the
+// size of each live object.
+int MarkCompactCollector::IterateLiveObjectsInRange(
+    Address start,
+    Address end,
+    HeapObjectCallback size_func) {
+  int live_objects = 0;
+  Address current = start;
+  while (current < end) {
+    uint32_t encoded_map = Memory::uint32_at(current);
+    if (encoded_map == kSingleFreeEncoding) {
+      current += kPointerSize;
+    } else if (encoded_map == kMultiFreeEncoding) {
+      current += Memory::int_at(current + kIntSize);
+    } else {
+      live_objects++;
+      current += size_func(HeapObject::FromAddress(current));
+    }
+  }
+  return live_objects;
+}
+
+
+int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
+                                             HeapObjectCallback size_f) {
+  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
+  return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
+}
+
+
+int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
+                                             HeapObjectCallback size_f) {
+  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
+  int total = 0;
+  PageIterator it(space, PageIterator::PAGES_IN_USE);
+  while (it.has_next()) {
+    Page* p = it.next();
+    total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
+                                       p->AllocationTop(),
+                                       size_f);
+  }
+  return total;
+}
+
+
+// -------------------------------------------------------------------------
+// Phase 3: Update pointers
+
+// Helper class for updating pointers in HeapObjects.
+class UpdatingVisitor: public ObjectVisitor {
+ public:
+  void VisitPointer(Object** p) {
+    UpdatePointer(p);
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    // Mark all HeapObject pointers in [start, end)
+    for (Object** p = start; p < end; p++) UpdatePointer(p);
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    VisitPointer(&target);
+    rinfo->set_target_address(
+        reinterpret_cast<Code*>(target)->instruction_start());
+  }
+
+ private:
+  void UpdatePointer(Object** p) {
+    if (!(*p)->IsHeapObject()) return;
+
+    HeapObject* obj = HeapObject::cast(*p);
+    Address old_addr = obj->address();
+    Address new_addr;
+    ASSERT(!Heap::InFromSpace(obj));
+
+    if (Heap::new_space()->Contains(obj)) {
+      Address forwarding_pointer_addr =
+          Heap::new_space()->FromSpaceLow() +
+          Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
+      new_addr = Memory::Address_at(forwarding_pointer_addr);
+
+#ifdef DEBUG
+      ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
+             Heap::old_data_space()->Contains(new_addr) ||
+             Heap::new_space()->FromSpaceContains(new_addr) ||
+             Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
+
+      if (Heap::new_space()->FromSpaceContains(new_addr)) {
+        ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
+               Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
+      }
+#endif
+
+    } else if (Heap::lo_space()->Contains(obj)) {
+      // Don't move objects in the large object space.
+      return;
+
+    } else {
+#ifdef DEBUG
+      PagedSpaces spaces;
+      PagedSpace* original_space = spaces.next();
+      while (original_space != NULL) {
+        if (original_space->Contains(obj)) break;
+        original_space = spaces.next();
+      }
+      ASSERT(original_space != NULL);
+#endif
+      new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
+      ASSERT(original_space->Contains(new_addr));
+      ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
+             original_space->MCSpaceOffsetForAddress(old_addr));
+    }
+
+    *p = HeapObject::FromAddress(new_addr);
+
+#ifdef DEBUG
+    if (FLAG_gc_verbose) {
+      PrintF("update %p : %p -> %p\n",
+             reinterpret_cast<Address>(p), old_addr, new_addr);
+    }
+#endif
+  }
+};
+
+
+void MarkCompactCollector::UpdatePointers() {
+#ifdef DEBUG
+  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
+  state_ = UPDATE_POINTERS;
+#endif
+  UpdatingVisitor updating_visitor;
+  Heap::IterateRoots(&updating_visitor);
+  GlobalHandles::IterateWeakRoots(&updating_visitor);
+
+  int live_maps = IterateLiveObjects(Heap::map_space(),
+                                     &UpdatePointersInOldObject);
+  int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
+                                             &UpdatePointersInOldObject);
+  int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
+                                          &UpdatePointersInOldObject);
+  int live_codes = IterateLiveObjects(Heap::code_space(),
+                                      &UpdatePointersInOldObject);
+  int live_cells = IterateLiveObjects(Heap::cell_space(),
+                                      &UpdatePointersInOldObject);
+  int live_news = IterateLiveObjects(Heap::new_space(),
+                                     &UpdatePointersInNewObject);
+
+  // Large objects do not move, the map word can be updated directly.
+  LargeObjectIterator it(Heap::lo_space());
+  while (it.has_next()) UpdatePointersInNewObject(it.next());
+
+  USE(live_maps);
+  USE(live_pointer_olds);
+  USE(live_data_olds);
+  USE(live_codes);
+  USE(live_cells);
+  USE(live_news);
+  ASSERT(live_maps == live_map_objects_);
+  ASSERT(live_data_olds == live_old_data_objects_);
+  ASSERT(live_pointer_olds == live_old_pointer_objects_);
+  ASSERT(live_codes == live_code_objects_);
+  ASSERT(live_cells == live_cell_objects_);
+  ASSERT(live_news == live_young_objects_);
+}
+
+
+int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
+  // Keep old map pointers
+  Map* old_map = obj->map();
+  ASSERT(old_map->IsHeapObject());
+
+  Address forwarded = GetForwardingAddressInOldSpace(old_map);
+
+  ASSERT(Heap::map_space()->Contains(old_map));
+  ASSERT(Heap::map_space()->Contains(forwarded));
+#ifdef DEBUG
+  if (FLAG_gc_verbose) {
+    PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
+           forwarded);
+  }
+#endif
+  // Update the map pointer.
+  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
+
+  // We have to compute the object size relying on the old map because
+  // map objects are not relocated yet.
+  int obj_size = obj->SizeFromMap(old_map);
+
+  // Update pointers in the object body.
+  UpdatingVisitor updating_visitor;
+  obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
+  return obj_size;
+}
+
+
+int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
+  // Decode the map pointer.
+  MapWord encoding = obj->map_word();
+  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
+  ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+
+  // At this point, the first word of map_addr is also encoded, cannot
+  // cast it to Map* using Map::cast.
+  Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
+  int obj_size = obj->SizeFromMap(map);
+  InstanceType type = map->instance_type();
+
+  // Update map pointer.
+  Address new_map_addr = GetForwardingAddressInOldSpace(map);
+  int offset = encoding.DecodeOffset();
+  obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
+
+#ifdef DEBUG
+  if (FLAG_gc_verbose) {
+    PrintF("update %p : %p -> %p\n", obj->address(),
+           map_addr, new_map_addr);
+  }
+#endif
+
+  // Update pointers in the object body.
+  UpdatingVisitor updating_visitor;
+  obj->IterateBody(type, obj_size, &updating_visitor);
+  return obj_size;
+}
+
+
+Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
+  // Object should either in old or map space.
+  MapWord encoding = obj->map_word();
+
+  // Offset to the first live object's forwarding address.
+  int offset = encoding.DecodeOffset();
+  Address obj_addr = obj->address();
+
+  // Find the first live object's forwarding address.
+  Page* p = Page::FromAddress(obj_addr);
+  Address first_forwarded = p->mc_first_forwarded;
+
+  // Page start address of forwarded address.
+  Page* forwarded_page = Page::FromAddress(first_forwarded);
+  int forwarded_offset = forwarded_page->Offset(first_forwarded);
+
+  // Find end of allocation of in the page of first_forwarded.
+  Address mc_top = forwarded_page->mc_relocation_top;
+  int mc_top_offset = forwarded_page->Offset(mc_top);
+
+  // Check if current object's forward pointer is in the same page
+  // as the first live object's forwarding pointer
+  if (forwarded_offset + offset < mc_top_offset) {
+    // In the same page.
+    return first_forwarded + offset;
+  }
+
+  // Must be in the next page, NOTE: this may cross chunks.
+  Page* next_page = forwarded_page->next_page();
+  ASSERT(next_page->is_valid());
+
+  offset -= (mc_top_offset - forwarded_offset);
+  offset += Page::kObjectStartOffset;
+
+  ASSERT_PAGE_OFFSET(offset);
+  ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
+
+  return next_page->OffsetToAddress(offset);
+}
+
+
+// -------------------------------------------------------------------------
+// Phase 4: Relocate objects
+
+void MarkCompactCollector::RelocateObjects() {
+#ifdef DEBUG
+  ASSERT(state_ == UPDATE_POINTERS);
+  state_ = RELOCATE_OBJECTS;
+#endif
+  // Relocates objects, always relocate map objects first. Relocating
+  // objects in other space relies on map objects to get object size.
+  int live_maps = IterateLiveObjects(Heap::map_space(), &RelocateMapObject);
+  int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
+                                             &RelocateOldPointerObject);
+  int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
+                                          &RelocateOldDataObject);
+  int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
+  int live_cells = IterateLiveObjects(Heap::cell_space(), &RelocateCellObject);
+  int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
+
+  USE(live_maps);
+  USE(live_data_olds);
+  USE(live_pointer_olds);
+  USE(live_codes);
+  USE(live_cells);
+  USE(live_news);
+  ASSERT(live_maps == live_map_objects_);
+  ASSERT(live_data_olds == live_old_data_objects_);
+  ASSERT(live_pointer_olds == live_old_pointer_objects_);
+  ASSERT(live_codes == live_code_objects_);
+  ASSERT(live_cells == live_cell_objects_);
+  ASSERT(live_news == live_young_objects_);
+
+  // Flip from and to spaces
+  Heap::new_space()->Flip();
+
+  // Set age_mark to bottom in to space
+  Address mark = Heap::new_space()->bottom();
+  Heap::new_space()->set_age_mark(mark);
+
+  Heap::new_space()->MCCommitRelocationInfo();
+#ifdef DEBUG
+  // It is safe to write to the remembered sets as remembered sets on a
+  // page-by-page basis after committing the m-c forwarding pointer.
+  Page::set_rset_state(Page::IN_USE);
+#endif
+  PagedSpaces spaces;
+  while (PagedSpace* space = spaces.next()) space->MCCommitRelocationInfo();
+}
+
+
+int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
+  // Recover map pointer.
+  MapWord encoding = obj->map_word();
+  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
+  ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+
+  // Get forwarding address before resetting map pointer
+  Address new_addr = GetForwardingAddressInOldSpace(obj);
+
+  // Reset map pointer.  The meta map object may not be copied yet so
+  // Map::cast does not yet work.
+  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
+
+  Address old_addr = obj->address();
+
+  if (new_addr != old_addr) {
+    memmove(new_addr, old_addr, Map::kSize);  // copy contents
+  }
+
+#ifdef DEBUG
+  if (FLAG_gc_verbose) {
+    PrintF("relocate %p -> %p\n", old_addr, new_addr);
+  }
+#endif
+
+  return Map::kSize;
+}
+
+
+static inline int RestoreMap(HeapObject* obj,
+                             PagedSpace* space,
+                             Address new_addr,
+                             Address map_addr) {
+  // This must be a non-map object, and the function relies on the
+  // assumption that the Map space is compacted before the other paged
+  // spaces (see RelocateObjects).
+
+  // Reset map pointer.
+  obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
+
+  int obj_size = obj->Size();
+  ASSERT_OBJECT_SIZE(obj_size);
+
+  ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
+         space->MCSpaceOffsetForAddress(obj->address()));
+
+#ifdef DEBUG
+  if (FLAG_gc_verbose) {
+    PrintF("relocate %p -> %p\n", obj->address(), new_addr);
+  }
+#endif
+
+  return obj_size;
+}
+
+
+int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
+                                                   PagedSpace* space) {
+  // Recover map pointer.
+  MapWord encoding = obj->map_word();
+  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
+  ASSERT(Heap::map_space()->Contains(map_addr));
+
+  // Get forwarding address before resetting map pointer.
+  Address new_addr = GetForwardingAddressInOldSpace(obj);
+
+  // Reset the map pointer.
+  int obj_size = RestoreMap(obj, space, new_addr, map_addr);
+
+  Address old_addr = obj->address();
+
+  if (new_addr != old_addr) {
+    memmove(new_addr, old_addr, obj_size);  // Copy contents
+  }
+
+  ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
+
+  return obj_size;
+}
+
+
+int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
+  return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
+}
+
+
+int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
+  return RelocateOldNonCodeObject(obj, Heap::old_data_space());
+}
+
+
+int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
+  return RelocateOldNonCodeObject(obj, Heap::cell_space());
+}
+
+
+int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
+  // Recover map pointer.
+  MapWord encoding = obj->map_word();
+  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
+  ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+
+  // Get forwarding address before resetting map pointer
+  Address new_addr = GetForwardingAddressInOldSpace(obj);
+
+  // Reset the map pointer.
+  int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
+
+  Address old_addr = obj->address();
+
+  if (new_addr != old_addr) {
+    memmove(new_addr, old_addr, obj_size);  // Copy contents.
+  }
+
+  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+  if (copied_to->IsCode()) {
+    // May also update inline cache target.
+    Code::cast(copied_to)->Relocate(new_addr - old_addr);
+    // Notify the logger that compiled code has moved.
+    LOG(CodeMoveEvent(old_addr, new_addr));
+  }
+
+  return obj_size;
+}
+
+
+int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
+  int obj_size = obj->Size();
+
+  // Get forwarding address
+  Address old_addr = obj->address();
+  int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
+
+  Address new_addr =
+    Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
+
+#ifdef DEBUG
+  if (Heap::new_space()->FromSpaceContains(new_addr)) {
+    ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
+           Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
+  } else {
+    ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
+           Heap::TargetSpace(obj) == Heap::old_data_space());
+  }
+#endif
+
+  // New and old addresses cannot overlap.
+  memcpy(reinterpret_cast<void*>(new_addr),
+         reinterpret_cast<void*>(old_addr),
+         obj_size);
+
+#ifdef DEBUG
+  if (FLAG_gc_verbose) {
+    PrintF("relocate %p -> %p\n", old_addr, new_addr);
+  }
+#endif
+
+  return obj_size;
+}
+
+
+// -------------------------------------------------------------------------
+// Phase 5: rebuild remembered sets
+
+void MarkCompactCollector::RebuildRSets() {
+#ifdef DEBUG
+  ASSERT(state_ == RELOCATE_OBJECTS);
+  state_ = REBUILD_RSETS;
+#endif
+  Heap::RebuildRSets();
+}
+
+} }  // namespace v8::internal
diff --git a/src/mark-compact.h b/src/mark-compact.h
new file mode 100644
index 0000000..2da2b1f
--- /dev/null
+++ b/src/mark-compact.h
@@ -0,0 +1,434 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MARK_COMPACT_H_
+#define V8_MARK_COMPACT_H_
+
+namespace v8 {
+namespace internal {
+
+// Callback function, returns whether an object is alive. The heap size
+// of the object is returned in size. It optionally updates the offset
+// to the first live object in the page (only used for old and map objects).
+typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
+
+// Callback function for non-live blocks in the old generation.
+typedef void (*DeallocateFunction)(Address start, int size_in_bytes);
+
+
+// Forward declarations.
+class RootMarkingVisitor;
+class MarkingVisitor;
+
+
+// -------------------------------------------------------------------------
+// Mark-Compact collector
+//
+// All methods are static.
+
+class MarkCompactCollector: public AllStatic {
+ public:
+  // Type of functions to compute forwarding addresses of objects in
+  // compacted spaces.  Given an object and its size, return a (non-failure)
+  // Object* that will be the object after forwarding.  There is a separate
+  // allocation function for each (compactable) space based on the location
+  // of the object before compaction.
+  typedef Object* (*AllocationFunction)(HeapObject* object, int object_size);
+
+  // Type of functions to encode the forwarding address for an object.
+  // Given the object, its size, and the new (non-failure) object it will be
+  // forwarded to, encode the forwarding address.  For paged spaces, the
+  // 'offset' input/output parameter contains the offset of the forwarded
+  // object from the forwarding address of the previous live object in the
+  // page as input, and is updated to contain the offset to be used for the
+  // next live object in the same page.  For spaces using a different
+  // encoding (ie, contiguous spaces), the offset parameter is ignored.
+  typedef void (*EncodingFunction)(HeapObject* old_object,
+                                   int object_size,
+                                   Object* new_object,
+                                   int* offset);
+
+  // Type of functions to process non-live objects.
+  typedef void (*ProcessNonLiveFunction)(HeapObject* object);
+
+  // Set the global force_compaction flag, it must be called before Prepare
+  // to take effect.
+  static void SetForceCompaction(bool value) {
+    force_compaction_ = value;
+  }
+
+  // Prepares for GC by resetting relocation info in old and map spaces and
+  // choosing spaces to compact.
+  static void Prepare(GCTracer* tracer);
+
+  // Performs a global garbage collection.
+  static void CollectGarbage();
+
+  // True if the last full GC performed heap compaction.
+  static bool HasCompacted() { return compacting_collection_; }
+
+  // True after the Prepare phase if the compaction is taking place.
+  static bool IsCompacting() { return compacting_collection_; }
+
+  // The count of the number of objects left marked at the end of the last
+  // completed full GC (expected to be zero).
+  static int previous_marked_count() { return previous_marked_count_; }
+
+  // During a full GC, there is a stack-allocated GCTracer that is used for
+  // bookkeeping information.  Return a pointer to that tracer.
+  static GCTracer* tracer() { return tracer_; }
+
+#ifdef DEBUG
+  // Checks whether performing mark-compact collection.
+  static bool in_use() { return state_ > PREPARE_GC; }
+#endif
+
+ private:
+#ifdef DEBUG
+  enum CollectorState {
+    IDLE,
+    PREPARE_GC,
+    MARK_LIVE_OBJECTS,
+    SWEEP_SPACES,
+    ENCODE_FORWARDING_ADDRESSES,
+    UPDATE_POINTERS,
+    RELOCATE_OBJECTS,
+    REBUILD_RSETS
+  };
+
+  // The current stage of the collector.
+  static CollectorState state_;
+#endif
+
+  // Global flag that forces a compaction.
+  static bool force_compaction_;
+
+  // Global flag indicating whether spaces were compacted on the last GC.
+  static bool compacting_collection_;
+
+  // Global flag indicating whether spaces will be compacted on the next GC.
+  static bool compact_on_next_gc_;
+
+  // The number of objects left marked at the end of the last completed full
+  // GC (expected to be zero).
+  static int previous_marked_count_;
+
+  // A pointer to the current stack-allocated GC tracer object during a full
+  // collection (NULL before and after).
+  static GCTracer* tracer_;
+
+  // Finishes GC, performs heap verification if enabled.
+  static void Finish();
+
+  // -----------------------------------------------------------------------
+  // Phase 1: Marking live objects.
+  //
+  //  Before: The heap has been prepared for garbage collection by
+  //          MarkCompactCollector::Prepare() and is otherwise in its
+  //          normal state.
+  //
+  //   After: Live objects are marked and non-live objects are unmarked.
+
+
+  friend class RootMarkingVisitor;
+  friend class MarkingVisitor;
+
+  // Marking operations for objects reachable from roots.
+  static void MarkLiveObjects();
+
+  static void MarkUnmarkedObject(HeapObject* obj);
+
+  static inline void MarkObject(HeapObject* obj) {
+    if (!obj->IsMarked()) MarkUnmarkedObject(obj);
+  }
+
+  static inline void SetMark(HeapObject* obj) {
+    tracer_->increment_marked_count();
+#ifdef DEBUG
+    UpdateLiveObjectCount(obj);
+#endif
+    obj->SetMark();
+  }
+
+  // Creates back pointers for all map transitions, stores them in
+  // the prototype field.  The original prototype pointers are restored
+  // in ClearNonLiveTransitions().  All JSObject maps
+  // connected by map transitions have the same prototype object, which
+  // is why we can use this field temporarily for back pointers.
+  static void CreateBackPointers();
+
+  // Mark a Map and its DescriptorArray together, skipping transitions.
+  static void MarkMapContents(Map* map);
+  static void MarkDescriptorArray(DescriptorArray* descriptors);
+
+  // Mark the heap roots and all objects reachable from them.
+  static void MarkRoots(RootMarkingVisitor* visitor);
+
+  // Mark the symbol table specially.  References to symbols from the
+  // symbol table are weak.
+  static void MarkSymbolTable();
+
+  // Mark objects in object groups that have at least one object in the
+  // group marked.
+  static void MarkObjectGroups();
+
+  // Mark all objects in an object group with at least one marked
+  // object, then all objects reachable from marked objects in object
+  // groups, and repeat.
+  static void ProcessObjectGroups(MarkingVisitor* visitor);
+
+  // Mark objects reachable (transitively) from objects in the marking stack
+  // or overflowed in the heap.
+  static void ProcessMarkingStack(MarkingVisitor* visitor);
+
+  // Mark objects reachable (transitively) from objects in the marking
+  // stack.  This function empties the marking stack, but may leave
+  // overflowed objects in the heap, in which case the marking stack's
+  // overflow flag will be set.
+  static void EmptyMarkingStack(MarkingVisitor* visitor);
+
+  // Refill the marking stack with overflowed objects from the heap.  This
+  // function either leaves the marking stack full or clears the overflow
+  // flag on the marking stack.
+  static void RefillMarkingStack();
+
+  // Callback function for telling whether the object *p is an unmarked
+  // heap object.
+  static bool IsUnmarkedHeapObject(Object** p);
+
+#ifdef DEBUG
+  static void UpdateLiveObjectCount(HeapObject* obj);
+#endif
+
+  // We sweep the large object space in the same way whether we are
+  // compacting or not, because the large object space is never compacted.
+  static void SweepLargeObjectSpace();
+
+  // Test whether a (possibly marked) object is a Map.
+  static inline bool SafeIsMap(HeapObject* object);
+
+  // Map transitions from a live map to a dead map must be killed.
+  // We replace them with a null descriptor, with the same key.
+  static void ClearNonLiveTransitions();
+
+  // -----------------------------------------------------------------------
+  // Phase 2: Sweeping to clear mark bits and free non-live objects for
+  // a non-compacting collection, or else computing and encoding
+  // forwarding addresses for a compacting collection.
+  //
+  //  Before: Live objects are marked and non-live objects are unmarked.
+  //
+  //   After: (Non-compacting collection.)  Live objects are unmarked,
+  //          non-live regions have been added to their space's free
+  //          list.
+  //
+  //   After: (Compacting collection.)  The forwarding address of live
+  //          objects in the paged spaces is encoded in their map word
+  //          along with their (non-forwarded) map pointer.
+  //
+  //          The forwarding address of live objects in the new space is
+  //          written to their map word's offset in the inactive
+  //          semispace.
+  //
+  //          Bookkeeping data is written to the remembered-set are of
+  //          eached paged-space page that contains live objects after
+  //          compaction:
+  //
+  //          The 3rd word of the page (first word of the remembered
+  //          set) contains the relocation top address, the address of
+  //          the first word after the end of the last live object in
+  //          the page after compaction.
+  //
+  //          The 4th word contains the zero-based index of the page in
+  //          its space.  This word is only used for map space pages, in
+  //          order to encode the map addresses in 21 bits to free 11
+  //          bits per map word for the forwarding address.
+  //
+  //          The 5th word contains the (nonencoded) forwarding address
+  //          of the first live object in the page.
+  //
+  //          In both the new space and the paged spaces, a linked list
+  //          of live regions is constructructed (linked through
+  //          pointers in the non-live region immediately following each
+  //          live region) to speed further passes of the collector.
+
+  // Encodes forwarding addresses of objects in compactable parts of the
+  // heap.
+  static void EncodeForwardingAddresses();
+
+  // Encodes the forwarding addresses of objects in new space.
+  static void EncodeForwardingAddressesInNewSpace();
+
+  // Function template to encode the forwarding addresses of objects in
+  // paged spaces, parameterized by allocation and non-live processing
+  // functions.
+  template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
+  static void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
+
+  // Iterates live objects in a space, passes live objects
+  // to a callback function which returns the heap size of the object.
+  // Returns the number of live objects iterated.
+  static int IterateLiveObjects(NewSpace* space, HeapObjectCallback size_f);
+  static int IterateLiveObjects(PagedSpace* space, HeapObjectCallback size_f);
+
+  // Iterates the live objects between a range of addresses, returning the
+  // number of live objects.
+  static int IterateLiveObjectsInRange(Address start, Address end,
+                                       HeapObjectCallback size_func);
+
+  // Callback functions for deallocating non-live blocks in the old
+  // generation.
+  static void DeallocateOldPointerBlock(Address start, int size_in_bytes);
+  static void DeallocateOldDataBlock(Address start, int size_in_bytes);
+  static void DeallocateCodeBlock(Address start, int size_in_bytes);
+  static void DeallocateMapBlock(Address start, int size_in_bytes);
+  static void DeallocateCellBlock(Address start, int size_in_bytes);
+
+  // If we are not compacting the heap, we simply sweep the spaces except
+  // for the large object space, clearing mark bits and adding unmarked
+  // regions to each space's free list.
+  static void SweepSpaces();
+
+  // -----------------------------------------------------------------------
+  // Phase 3: Updating pointers in live objects.
+  //
+  //  Before: Same as after phase 2 (compacting collection).
+  //
+  //   After: All pointers in live objects, including encoded map
+  //          pointers, are updated to point to their target's new
+  //          location.  The remembered set area of each paged-space
+  //          page containing live objects still contains bookkeeping
+  //          information.
+
+  friend class UpdatingVisitor;  // helper for updating visited objects
+
+  // Updates pointers in all spaces.
+  static void UpdatePointers();
+
+  // Updates pointers in an object in new space.
+  // Returns the heap size of the object.
+  static int UpdatePointersInNewObject(HeapObject* obj);
+
+  // Updates pointers in an object in old spaces.
+  // Returns the heap size of the object.
+  static int UpdatePointersInOldObject(HeapObject* obj);
+
+  // Calculates the forwarding address of an object in an old space.
+  static Address GetForwardingAddressInOldSpace(HeapObject* obj);
+
+  // -----------------------------------------------------------------------
+  // Phase 4: Relocating objects.
+  //
+  //  Before: Pointers to live objects are updated to point to their
+  //          target's new location.  The remembered set area of each
+  //          paged-space page containing live objects still contains
+  //          bookkeeping information.
+  //
+  //   After: Objects have been moved to their new addresses. The
+  //          remembered set area of each paged-space page containing
+  //          live objects still contains bookkeeping information.
+
+  // Relocates objects in all spaces.
+  static void RelocateObjects();
+
+  // Converts a code object's inline target to addresses, convention from
+  // address to target happens in the marking phase.
+  static int ConvertCodeICTargetToAddress(HeapObject* obj);
+
+  // Relocate a map object.
+  static int RelocateMapObject(HeapObject* obj);
+
+  // Relocates an old object.
+  static int RelocateOldPointerObject(HeapObject* obj);
+  static int RelocateOldDataObject(HeapObject* obj);
+
+  // Relocate a property cell object.
+  static int RelocateCellObject(HeapObject* obj);
+
+  // Helper function.
+  static inline int RelocateOldNonCodeObject(HeapObject* obj,
+                                             PagedSpace* space);
+
+  // Relocates an object in the code space.
+  static int RelocateCodeObject(HeapObject* obj);
+
+  // Copy a new object.
+  static int RelocateNewObject(HeapObject* obj);
+
+  // -----------------------------------------------------------------------
+  // Phase 5: Rebuilding remembered sets.
+  //
+  //  Before: The heap is in a normal state except that remembered sets
+  //          in the paged spaces are not correct.
+  //
+  //   After: The heap is in a normal state.
+
+  // Rebuild remembered set in old and map spaces.
+  static void RebuildRSets();
+
+#ifdef DEBUG
+  // -----------------------------------------------------------------------
+  // Debugging variables, functions and classes
+  // Counters used for debugging the marking phase of mark-compact or
+  // mark-sweep collection.
+
+  // Number of live objects in Heap::to_space_.
+  static int live_young_objects_;
+
+  // Number of live objects in Heap::old_pointer_space_.
+  static int live_old_pointer_objects_;
+
+  // Number of live objects in Heap::old_data_space_.
+  static int live_old_data_objects_;
+
+  // Number of live objects in Heap::code_space_.
+  static int live_code_objects_;
+
+  // Number of live objects in Heap::map_space_.
+  static int live_map_objects_;
+
+  // Number of live objects in Heap::cell_space_.
+  static int live_cell_objects_;
+
+  // Number of live objects in Heap::lo_space_.
+  static int live_lo_objects_;
+
+  // Number of live bytes in this collection.
+  static int live_bytes_;
+
+  friend class MarkObjectVisitor;
+  static void VisitObject(HeapObject* obj);
+
+  friend class UnmarkObjectVisitor;
+  static void UnmarkObject(HeapObject* obj);
+#endif
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MARK_COMPACT_H_
diff --git a/src/math.js b/src/math.js
new file mode 100644
index 0000000..e3d266e
--- /dev/null
+++ b/src/math.js
@@ -0,0 +1,250 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Keep reference to original values of some global properties.  This
+// has the added benefit that the code in this file is isolated from
+// changes to these properties.
+const $Infinity = global.Infinity;
+const $floor = MathFloor;
+const $random = MathRandom;
+const $abs = MathAbs;
+
+// Instance class name can only be set on functions. That is the only
+// purpose for MathConstructor.
+function MathConstructor() {}
+%FunctionSetInstanceClassName(MathConstructor, 'Math');
+const $Math = new MathConstructor();
+$Math.__proto__ = global.Object.prototype;
+%SetProperty(global, "Math", $Math, DONT_ENUM);
+
+// ECMA 262 - 15.8.2.1
+function MathAbs(x) {
+  if (%_IsSmi(x)) return x >= 0 ? x : -x;
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_abs(x);
+}
+
+// ECMA 262 - 15.8.2.2
+function MathAcos(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_acos(x);
+}
+
+// ECMA 262 - 15.8.2.3
+function MathAsin(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_asin(x);
+}
+
+// ECMA 262 - 15.8.2.4
+function MathAtan(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_atan(x);
+}
+
+// ECMA 262 - 15.8.2.5
+// The naming of y and x matches the spec, as does the order in which
+// ToNumber (valueOf) is called.
+function MathAtan2(y, x) {
+  if (!IS_NUMBER(y)) y = ToNumber(y);
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_atan2(y, x);
+}
+
+// ECMA 262 - 15.8.2.6
+function MathCeil(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_ceil(x);
+}
+
+// ECMA 262 - 15.8.2.7
+function MathCos(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %_Math_cos(x);
+}
+
+// ECMA 262 - 15.8.2.8
+function MathExp(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_exp(x);
+}
+
+// ECMA 262 - 15.8.2.9
+function MathFloor(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  // It's more common to call this with a positive number that's out
+  // of range than negative numbers; check the upper bound first.
+  if (x <= 0x7FFFFFFF && x > 0) {
+    // Numbers in the range [0, 2^31) can be floored by converting
+    // them to an unsigned 32-bit value using the shift operator.
+    // We avoid doing so for -0, because the result of Math.floor(-0)
+    // has to be -0, which wouldn't be the case with the shift.
+    return x << 0;
+  } else {
+    return %Math_floor(x);
+  }
+}
+
+// ECMA 262 - 15.8.2.10
+function MathLog(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_log(x);
+}
+
+// ECMA 262 - 15.8.2.11
+function MathMax(arg1, arg2) {  // length == 2
+  var r = -$Infinity;
+  var length = %_ArgumentsLength();
+  for (var i = 0; i < length; i++) {
+    var n = ToNumber(%_Arguments(i));
+    if (NUMBER_IS_NAN(n)) return n;
+    // Make sure +0 is considered greater than -0.
+    if (n > r || (r === 0 && n === 0 && !%_IsSmi(r))) r = n;
+  }
+  return r;
+}
+
+// ECMA 262 - 15.8.2.12
+function MathMin(arg1, arg2) {  // length == 2
+  var r = $Infinity;
+  var length = %_ArgumentsLength();
+  for (var i = 0; i < length; i++) {
+    var n = ToNumber(%_Arguments(i));
+    if (NUMBER_IS_NAN(n)) return n;
+    // Make sure -0 is considered less than +0.
+    if (n < r || (r === 0 && n === 0 && !%_IsSmi(n))) r = n;
+  }
+  return r;
+}
+
+// ECMA 262 - 15.8.2.13
+function MathPow(x, y) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  if (!IS_NUMBER(y)) y = ToNumber(y);
+  return %Math_pow(x, y);
+}
+
+// ECMA 262 - 15.8.2.14
+function MathRandom() {
+  return %_RandomPositiveSmi() / 0x40000000;
+}
+
+// ECMA 262 - 15.8.2.15
+function MathRound(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_round(x);
+}
+
+// ECMA 262 - 15.8.2.16
+function MathSin(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %_Math_sin(x);
+}
+
+// ECMA 262 - 15.8.2.17
+function MathSqrt(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_sqrt(x);
+}
+
+// ECMA 262 - 15.8.2.18
+function MathTan(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_tan(x);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetupMath() {
+  // Setup math constants.
+  // ECMA-262, section 15.8.1.1.
+  %OptimizeObjectForAddingMultipleProperties($Math, 8);
+  %SetProperty($Math,
+               "E",
+               2.7182818284590452354,
+               DONT_ENUM |  DONT_DELETE | READ_ONLY);
+  // ECMA-262, section 15.8.1.2.
+  %SetProperty($Math,
+               "LN10",
+               2.302585092994046,
+               DONT_ENUM |  DONT_DELETE | READ_ONLY);
+  // ECMA-262, section 15.8.1.3.
+  %SetProperty($Math,
+               "LN2",
+               0.6931471805599453,
+               DONT_ENUM |  DONT_DELETE | READ_ONLY);
+  // ECMA-262, section 15.8.1.4.
+  %SetProperty($Math,
+               "LOG2E",
+               1.4426950408889634,
+               DONT_ENUM |  DONT_DELETE | READ_ONLY);
+  %SetProperty($Math,
+               "LOG10E",
+               0.43429448190325176,
+               DONT_ENUM |  DONT_DELETE | READ_ONLY);
+  %SetProperty($Math,
+               "PI",
+               3.1415926535897932,
+               DONT_ENUM |  DONT_DELETE | READ_ONLY);
+  %SetProperty($Math,
+               "SQRT1_2",
+               0.7071067811865476,
+               DONT_ENUM |  DONT_DELETE | READ_ONLY);
+  %SetProperty($Math,
+               "SQRT2",
+               1.4142135623730951,
+               DONT_ENUM |  DONT_DELETE | READ_ONLY);
+  %TransformToFastProperties($Math);
+
+  // Setup non-enumerable functions of the Math object and
+  // set their names.
+  InstallFunctionsOnHiddenPrototype($Math, DONT_ENUM, $Array(
+    "random", MathRandom,
+    "abs", MathAbs,
+    "acos", MathAcos,
+    "asin", MathAsin,
+    "atan", MathAtan,
+    "ceil", MathCeil,
+    "cos", MathCos,
+    "exp", MathExp,
+    "floor", MathFloor,
+    "log", MathLog,
+    "round", MathRound,
+    "sin", MathSin,
+    "sqrt", MathSqrt,
+    "tan", MathTan,
+    "atan2", MathAtan2,
+    "pow", MathPow,
+    "max", MathMax,
+    "min", MathMin
+  ));
+};
+
+
+SetupMath();
diff --git a/src/memory.h b/src/memory.h
new file mode 100644
index 0000000..c64699e
--- /dev/null
+++ b/src/memory.h
@@ -0,0 +1,70 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MEMORY_H_
+#define V8_MEMORY_H_
+
+namespace v8 {
+namespace internal {
+
+// Memory provides an interface to 'raw' memory. It encapsulates the casts
+// that typically are needed when incompatible pointer types are used.
+
+class Memory {
+ public:
+  static uint16_t& uint16_at(Address addr)  {
+    return *reinterpret_cast<uint16_t*>(addr);
+  }
+
+  static uint32_t& uint32_at(Address addr)  {
+    return *reinterpret_cast<uint32_t*>(addr);
+  }
+
+  static int32_t& int32_at(Address addr)  {
+    return *reinterpret_cast<int32_t*>(addr);
+  }
+
+  static uint64_t& uint64_at(Address addr)  {
+    return *reinterpret_cast<uint64_t*>(addr);
+  }
+
+  static int& int_at(Address addr)  {
+    return *reinterpret_cast<int*>(addr);
+  }
+
+  static Address& Address_at(Address addr)  {
+    return *reinterpret_cast<Address*>(addr);
+  }
+
+  static Object*& Object_at(Address addr)  {
+    return *reinterpret_cast<Object**>(addr);
+  }
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_MEMORY_H_
diff --git a/src/messages.cc b/src/messages.cc
new file mode 100644
index 0000000..e16b1b2
--- /dev/null
+++ b/src/messages.cc
@@ -0,0 +1,177 @@
+
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "execution.h"
+#include "spaces-inl.h"
+#include "top.h"
+
+namespace v8 {
+namespace internal {
+
+
+// If no message listeners have been registered this one is called
+// by default.
+void MessageHandler::DefaultMessageReport(const MessageLocation* loc,
+                                          Handle<Object> message_obj) {
+  SmartPointer<char> str = GetLocalizedMessage(message_obj);
+  if (loc == NULL) {
+    PrintF("%s\n", *str);
+  } else {
+    HandleScope scope;
+    Handle<Object> data(loc->script()->name());
+    SmartPointer<char> data_str;
+    if (data->IsString())
+      data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
+    PrintF("%s:%i: %s\n", *data_str ? *data_str : "<unknown>",
+           loc->start_pos(), *str);
+  }
+}
+
+
+void MessageHandler::ReportMessage(const char* msg) {
+  PrintF("%s\n", msg);
+}
+
+
+Handle<Object> MessageHandler::MakeMessageObject(
+    const char* type,
+    MessageLocation* loc,
+    Vector< Handle<Object> > args,
+    Handle<String> stack_trace) {
+  // Build error message object
+  v8::HandleScope scope;  // Instantiate a closeable HandleScope for EscapeFrom.
+  Handle<Object> type_str = Factory::LookupAsciiSymbol(type);
+  Handle<Object> array = Factory::NewJSArray(args.length());
+  for (int i = 0; i < args.length(); i++)
+    SetElement(Handle<JSArray>::cast(array), i, args[i]);
+
+  Handle<JSFunction> fun(Top::global_context()->make_message_fun());
+  int start, end;
+  Handle<Object> script;
+  if (loc) {
+    start = loc->start_pos();
+    end = loc->end_pos();
+    script = GetScriptWrapper(loc->script());
+  } else {
+    start = end = 0;
+    script = Factory::undefined_value();
+  }
+  Handle<Object> start_handle(Smi::FromInt(start));
+  Handle<Object> end_handle(Smi::FromInt(end));
+  Handle<Object> stack_trace_val = stack_trace.is_null()
+    ? Factory::undefined_value()
+    : Handle<Object>::cast(stack_trace);
+  const int argc = 6;
+  Object** argv[argc] = { type_str.location(),
+                          array.location(),
+                          start_handle.location(),
+                          end_handle.location(),
+                          script.location(),
+                          stack_trace_val.location() };
+
+  // Setup a catch handler to catch exceptions in creating the message. This
+  // handler is non-verbose to avoid calling MakeMessage recursively in case of
+  // an exception.
+  v8::TryCatch catcher;
+  catcher.SetVerbose(false);
+  catcher.SetCaptureMessage(false);
+
+  // Format the message.
+  bool caught_exception = false;
+  Handle<Object> message =
+      Execution::Call(fun, Factory::undefined_value(), argc, argv,
+                      &caught_exception);
+
+  // If creating the message (in JS code) resulted in an exception, we
+  // skip doing the callback. This usually only happens in case of
+  // stack overflow exceptions being thrown by the parser when the
+  // stack is almost full.
+  if (caught_exception) return Handle<Object>();
+
+  return message.EscapeFrom(&scope);
+}
+
+
+void MessageHandler::ReportMessage(MessageLocation* loc,
+                                   Handle<Object> message) {
+  v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
+
+  v8::NeanderArray global_listeners(Factory::message_listeners());
+  int global_length = global_listeners.length();
+  if (global_length == 0) {
+    DefaultMessageReport(loc, message);
+  } else {
+    for (int i = 0; i < global_length; i++) {
+      HandleScope scope;
+      if (global_listeners.get(i)->IsUndefined()) continue;
+      v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
+      Handle<Proxy> callback_obj(Proxy::cast(listener.get(0)));
+      v8::MessageCallback callback =
+          FUNCTION_CAST<v8::MessageCallback>(callback_obj->proxy());
+      Handle<Object> callback_data(listener.get(1));
+      callback(api_message_obj, v8::Utils::ToLocal(callback_data));
+    }
+  }
+}
+
+
+Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
+  Handle<String> fmt_str = Factory::LookupAsciiSymbol("FormatMessage");
+  Handle<JSFunction> fun =
+      Handle<JSFunction>(
+          JSFunction::cast(Top::builtins()->GetProperty(*fmt_str)));
+  Object** argv[1] = { data.location() };
+
+  bool caught_exception;
+  Handle<Object> result =
+      Execution::TryCall(fun, Top::builtins(), 1, argv, &caught_exception);
+
+  if (caught_exception || !result->IsString()) {
+    return Factory::LookupAsciiSymbol("<error>");
+  }
+  Handle<String> result_string = Handle<String>::cast(result);
+  // A string that has been obtained from JS code in this way is
+  // likely to be a complicated ConsString of some sort.  We flatten it
+  // here to improve the efficiency of converting it to a C string and
+  // other operations that are likely to take place (see GetLocalizedMessage
+  // for example).
+  FlattenString(result_string);
+  return result_string;
+}
+
+
+SmartPointer<char> MessageHandler::GetLocalizedMessage(Handle<Object> data) {
+  HandleScope scope;
+  return GetMessage(data)->ToCString(DISALLOW_NULLS);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/messages.h b/src/messages.h
new file mode 100644
index 0000000..80ce8eb
--- /dev/null
+++ b/src/messages.h
@@ -0,0 +1,112 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The infrastructure used for (localized) message reporting in V8.
+//
+// Note: there's a big unresolved issue about ownership of the data
+// structures used by this framework.
+
+#ifndef V8_MESSAGES_H_
+#define V8_MESSAGES_H_
+
+#include "handles-inl.h"
+
+// Forward declaration of MessageLocation.
+namespace v8 {
+namespace internal {
+class MessageLocation;
+} }  // namespace v8::internal
+
+
+class V8Message {
+ public:
+  V8Message(char* type,
+            v8::internal::Handle<v8::internal::JSArray> args,
+            const v8::internal::MessageLocation* loc) :
+      type_(type), args_(args), loc_(loc) { }
+  char* type() const { return type_; }
+  v8::internal::Handle<v8::internal::JSArray> args() const { return args_; }
+  const v8::internal::MessageLocation* loc() const { return loc_; }
+ private:
+  char* type_;
+  v8::internal::Handle<v8::internal::JSArray> const args_;
+  const v8::internal::MessageLocation* loc_;
+};
+
+
+namespace v8 {
+namespace internal {
+
+struct Language;
+class SourceInfo;
+
+class MessageLocation {
+ public:
+  MessageLocation(Handle<Script> script,
+                  int start_pos,
+                  int end_pos)
+      : script_(script),
+        start_pos_(start_pos),
+        end_pos_(end_pos) { }
+  MessageLocation() : start_pos_(-1), end_pos_(-1) { }
+
+  Handle<Script> script() const { return script_; }
+  int start_pos() const { return start_pos_; }
+  int end_pos() const { return end_pos_; }
+
+ private:
+  Handle<Script> script_;
+  int start_pos_;
+  int end_pos_;
+};
+
+
+// A message handler is a convenience interface for accessing the list
+// of message listeners registered in an environment
+class MessageHandler {
+ public:
+  // Report a message (w/o JS heap allocation).
+  static void ReportMessage(const char* msg);
+
+  // Returns a message object for the API to use.
+  static Handle<Object> MakeMessageObject(const char* type,
+                                          MessageLocation* loc,
+                                          Vector< Handle<Object> > args,
+                                          Handle<String> stack_trace);
+
+  // Report a formatted message (needs JS allocation).
+  static void ReportMessage(MessageLocation* loc, Handle<Object> message);
+
+  static void DefaultMessageReport(const MessageLocation* loc,
+                                   Handle<Object> message_obj);
+  static Handle<String> GetMessage(Handle<Object> data);
+  static SmartPointer<char> GetLocalizedMessage(Handle<Object> data);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_MESSAGES_H_
diff --git a/src/messages.js b/src/messages.js
new file mode 100644
index 0000000..2720792
--- /dev/null
+++ b/src/messages.js
@@ -0,0 +1,897 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// -------------------------------------------------------------------
+
+// Lazily initialized.
+var kVowelSounds = 0;
+var kCapitalVowelSounds = 0;
+
+// If this object gets passed to an error constructor the error will
+// get an accessor for .message that constructs a descriptive error
+// message on access.
+var kAddMessageAccessorsMarker = { };
+
+
+function GetInstanceName(cons) {
+  if (cons.length == 0) {
+    return "";
+  }
+  var first = %StringToLowerCase(StringCharAt.call(cons, 0));
+  if (kVowelSounds === 0) {
+    kVowelSounds = {a: true, e: true, i: true, o: true, u: true, y: true};
+    kCapitalVowelSounds = {a: true, e: true, i: true, o: true, u: true, h: true,
+        f: true, l: true, m: true, n: true, r: true, s: true, x: true, y: true};
+  }
+  var vowel_mapping = kVowelSounds;
+  if (cons.length > 1 && (StringCharAt.call(cons, 0) != first)) {
+    // First char is upper case
+    var second = %StringToLowerCase(StringCharAt.call(cons, 1));
+    // Second char is upper case
+    if (StringCharAt.call(cons, 1) != second) {
+      vowel_mapping = kCapitalVowelSounds;
+    }
+  }
+  var s = vowel_mapping[first] ? "an " : "a ";
+  return s + cons;
+}
+
+
+var kMessages = 0;
+
+
+function FormatString(format, args) {
+  var result = format;
+  for (var i = 0; i < args.length; i++) {
+    var str;
+    try { str = ToDetailString(args[i]); }
+    catch (e) { str = "#<error>"; }
+    result = ArrayJoin.call(StringSplit.call(result, "%" + i), str);
+  }
+  return result;
+}
+
+
+function ToDetailString(obj) {
+  if (obj != null && IS_OBJECT(obj) && obj.toString === $Object.prototype.toString) {
+    var constructor = obj.constructor;
+    if (!constructor) return ToString(obj);
+    var constructorName = constructor.name;
+    if (!constructorName) return ToString(obj);
+    return "#<" + GetInstanceName(constructorName) + ">";
+  } else {
+    return ToString(obj);
+  }
+}
+
+
+function MakeGenericError(constructor, type, args) {
+  if (IS_UNDEFINED(args)) {
+    args = [];
+  }
+  var e = new constructor(kAddMessageAccessorsMarker);
+  e.type = type;
+  e.arguments = args;
+  return e;
+}
+
+
+/**
+ * Setup the Script function and constructor.
+ */
+%FunctionSetInstanceClassName(Script, 'Script');
+%SetProperty(Script.prototype, 'constructor', Script, DONT_ENUM);
+%SetCode(Script, function(x) {
+  // Script objects can only be created by the VM.
+  throw new $Error("Not supported");
+});
+
+
+// Helper functions; called from the runtime system.
+function FormatMessage(message) {
+  if (kMessages === 0) {
+    kMessages = {
+      // Error
+      cyclic_proto:                 "Cyclic __proto__ value",
+      // TypeError
+      unexpected_token:             "Unexpected token %0",
+      unexpected_token_number:      "Unexpected number",
+      unexpected_token_string:      "Unexpected string",
+      unexpected_token_identifier:  "Unexpected identifier",
+      unexpected_eos:               "Unexpected end of input",
+      malformed_regexp:             "Invalid regular expression: /%0/: %1",
+      unterminated_regexp:          "Invalid regular expression: missing /",
+      regexp_flags:                 "Cannot supply flags when constructing one RegExp from another",
+      invalid_lhs_in_assignment:    "Invalid left-hand side in assignment",
+      invalid_lhs_in_for_in:        "Invalid left-hand side in for-in",
+      invalid_lhs_in_postfix_op:    "Invalid left-hand side expression in postfix operation",
+      invalid_lhs_in_prefix_op:     "Invalid left-hand side expression in prefix operation",
+      multiple_defaults_in_switch:  "More than one default clause in switch statement",
+      newline_after_throw:          "Illegal newline after throw",
+      redeclaration:                "%0 '%1' has already been declared",
+      no_catch_or_finally:          "Missing catch or finally after try",
+      unknown_label:                "Undefined label '%0'",
+      uncaught_exception:           "Uncaught %0",
+      stack_trace:                  "Stack Trace:\n%0",
+      called_non_callable:          "%0 is not a function",
+      undefined_method:             "Object %1 has no method '%0'",
+      property_not_function:        "Property '%0' of object %1 is not a function",
+      cannot_convert_to_primitive:  "Cannot convert object to primitive value",
+      not_constructor:              "%0 is not a constructor",
+      not_defined:                  "%0 is not defined",
+      non_object_property_load:     "Cannot read property '%0' of %1",
+      non_object_property_store:    "Cannot set property '%0' of %1",
+      non_object_property_call:     "Cannot call method '%0' of %1",
+      with_expression:              "%0 has no properties",
+      illegal_invocation:           "Illegal invocation",
+      no_setter_in_callback:        "Cannot set property %0 of %1 which has only a getter",
+      apply_non_function:           "Function.prototype.apply was called on %0, which is a %1 and not a function",
+      apply_wrong_args:             "Function.prototype.apply: Arguments list has wrong type",
+      invalid_in_operator_use:      "Cannot use 'in' operator to search for '%0' in %1",
+      instanceof_function_expected: "Expecting a function in instanceof check, but got %0",
+      instanceof_nonobject_proto:   "Function has non-object prototype '%0' in instanceof check",
+      null_to_object:               "Cannot convert null to object",
+      reduce_no_initial:            "Reduce of empty array with no initial value",
+      // RangeError
+      invalid_array_length:         "Invalid array length",
+      stack_overflow:               "Maximum call stack size exceeded",
+      apply_overflow:               "Function.prototype.apply cannot support %0 arguments",
+      // SyntaxError
+      unable_to_parse:              "Parse error",
+      duplicate_regexp_flag:        "Duplicate RegExp flag %0",
+      invalid_regexp:               "Invalid RegExp pattern /%0/",
+      illegal_break:                "Illegal break statement",
+      illegal_continue:             "Illegal continue statement",
+      illegal_return:               "Illegal return statement",
+      error_loading_debugger:       "Error loading debugger",
+      no_input_to_regexp:           "No input to %0",
+      result_not_primitive:         "Result of %0 must be a primitive, was %1",
+      invalid_json:                 "String '%0' is not valid JSON",
+      circular_structure:           "Converting circular structure to JSON",
+      object_keys_non_object:       "Object.keys called on non-object"
+    };
+  }
+  var format = kMessages[message.type];
+  if (!format) return "<unknown message " + message.type + ">";
+  return FormatString(format, message.args);
+}
+
+
+function GetLineNumber(message) {
+  if (message.startPos == -1) return -1;
+  var location = message.script.locationFromPosition(message.startPos, true);
+  if (location == null) return -1;
+  return location.line + 1;
+}
+
+
+// Returns the source code line containing the given source
+// position, or the empty string if the position is invalid.
+function GetSourceLine(message) {
+  var location = message.script.locationFromPosition(message.startPos, true);
+  if (location == null) return "";
+  location.restrict();
+  return location.sourceText();
+}
+
+
+function MakeTypeError(type, args) {
+  return MakeGenericError($TypeError, type, args);
+}
+
+
+function MakeRangeError(type, args) {
+  return MakeGenericError($RangeError, type, args);
+}
+
+
+function MakeSyntaxError(type, args) {
+  return MakeGenericError($SyntaxError, type, args);
+}
+
+
+function MakeReferenceError(type, args) {
+  return MakeGenericError($ReferenceError, type, args);
+}
+
+
+function MakeEvalError(type, args) {
+  return MakeGenericError($EvalError, type, args);
+}
+
+
+function MakeError(type, args) {
+  return MakeGenericError($Error, type, args);
+}
+
+/**
+ * Find a line number given a specific source position.
+ * @param {number} position The source position.
+ * @return {number} 0 if input too small, -1 if input too large,
+       else the line number.
+ */
+Script.prototype.lineFromPosition = function(position) {
+  var lower = 0;
+  var upper = this.lineCount() - 1;
+
+  // We'll never find invalid positions so bail right away.
+  if (position > this.line_ends[upper]) {
+    return -1;
+  }
+
+  // This means we don't have to safe-guard indexing line_ends[i - 1].
+  if (position <= this.line_ends[0]) {
+    return 0;
+  }
+
+  // Binary search to find line # from position range.
+  while (upper >= 1) {
+    var i = (lower + upper) >> 1;
+
+    if (position > this.line_ends[i]) {
+      lower = i + 1;
+    } else if (position <= this.line_ends[i - 1]) {
+      upper = i - 1;
+    } else {
+      return i;
+    }
+  }
+  return -1;
+}
+
+/**
+ * Get information on a specific source position.
+ * @param {number} position The source position
+ * @param {boolean} include_resource_offset Set to true to have the resource
+ *     offset added to the location
+ * @return {SourceLocation}
+ *     If line is negative or not in the source null is returned.
+ */
+Script.prototype.locationFromPosition = function (position,
+                                                  include_resource_offset) {
+  var line = this.lineFromPosition(position);
+  if (line == -1) return null;
+
+  // Determine start, end and column.
+  var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
+  var end = this.line_ends[line];
+  if (end > 0 && StringCharAt.call(this.source, end - 1) == '\r') end--;
+  var column = position - start;
+
+  // Adjust according to the offset within the resource.
+  if (include_resource_offset) {
+    line += this.line_offset;
+    if (line == this.line_offset) {
+      column += this.column_offset;
+    }
+  }
+
+  return new SourceLocation(this, position, line, column, start, end);
+};
+
+
+/**
+ * Get information on a specific source line and column possibly offset by a
+ * fixed source position. This function is used to find a source position from
+ * a line and column position. The fixed source position offset is typically
+ * used to find a source position in a function based on a line and column in
+ * the source for the function alone. The offset passed will then be the
+ * start position of the source for the function within the full script source.
+ * @param {number} opt_line The line within the source. Default value is 0
+ * @param {number} opt_column The column in within the line. Default value is 0
+ * @param {number} opt_offset_position The offset from the begining of the
+ *     source from where the line and column calculation starts. Default value is 0
+ * @return {SourceLocation}
+ *     If line is negative or not in the source null is returned.
+ */
+Script.prototype.locationFromLine = function (opt_line, opt_column, opt_offset_position) {
+  // Default is the first line in the script. Lines in the script is relative
+  // to the offset within the resource.
+  var line = 0;
+  if (!IS_UNDEFINED(opt_line)) {
+    line = opt_line - this.line_offset;
+  }
+
+  // Default is first column. If on the first line add the offset within the
+  // resource.
+  var column = opt_column || 0;
+  if (line == 0) {
+    column -= this.column_offset
+  }
+
+  var offset_position = opt_offset_position || 0;
+  if (line < 0 || column < 0 || offset_position < 0) return null;
+  if (line == 0) {
+    return this.locationFromPosition(offset_position + column, false);
+  } else {
+    // Find the line where the offset position is located.
+    var offset_line = this.lineFromPosition(offset_position);
+
+    if (offset_line == -1 || offset_line + line >= this.lineCount()) {
+      return null;
+    }
+
+    return this.locationFromPosition(this.line_ends[offset_line + line - 1] + 1 + column);  // line > 0 here.
+  }
+}
+
+
+/**
+ * Get a slice of source code from the script. The boundaries for the slice is
+ * specified in lines.
+ * @param {number} opt_from_line The first line (zero bound) in the slice.
+ *     Default is 0
+ * @param {number} opt_to_column The last line (zero bound) in the slice (non
+ *     inclusive). Default is the number of lines in the script
+ * @return {SourceSlice} The source slice or null of the parameters where
+ *     invalid
+ */
+Script.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
+  var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset : opt_from_line;
+  var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount() : opt_to_line
+
+  // Adjust according to the offset within the resource.
+  from_line -= this.line_offset;
+  to_line -= this.line_offset;
+  if (from_line < 0) from_line = 0;
+  if (to_line > this.lineCount()) to_line = this.lineCount();
+
+  // Check parameters.
+  if (from_line >= this.lineCount() ||
+      to_line < 0 ||
+      from_line > to_line) {
+    return null;
+  }
+
+  var from_position = from_line == 0 ? 0 : this.line_ends[from_line - 1] + 1;
+  var to_position = to_line == 0 ? 0 : this.line_ends[to_line - 1] + 1;
+
+  // Return a source slice with line numbers re-adjusted to the resource.
+  return new SourceSlice(this, from_line + this.line_offset, to_line + this.line_offset,
+                         from_position, to_position);
+}
+
+
+Script.prototype.sourceLine = function (opt_line) {
+  // Default is the first line in the script. Lines in the script are relative
+  // to the offset within the resource.
+  var line = 0;
+  if (!IS_UNDEFINED(opt_line)) {
+    line = opt_line - this.line_offset;
+  }
+
+  // Check parameter.
+  if (line < 0 || this.lineCount() <= line) {
+    return null;
+  }
+
+  // Return the source line.
+  var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
+  var end = this.line_ends[line];
+  return StringSubstring.call(this.source, start, end);
+}
+
+
+/**
+ * Returns the number of source lines.
+ * @return {number}
+ *     Number of source lines.
+ */
+Script.prototype.lineCount = function() {
+  // Return number of source lines.
+  return this.line_ends.length;
+};
+
+
+/**
+ * Class for source location. A source location is a position within some
+ * source with the following properties:
+ *   script   : script object for the source
+ *   line     : source line number
+ *   column   : source column within the line
+ *   position : position within the source
+ *   start    : position of start of source context (inclusive)
+ *   end      : position of end of source context (not inclusive)
+ * Source text for the source context is the character interval [start, end[. In
+ * most cases end will point to a newline character. It might point just past
+ * the final position of the source if the last source line does not end with a
+ * newline character.
+ * @param {Script} script The Script object for which this is a location
+ * @param {number} position Source position for the location
+ * @param {number} line The line number for the location
+ * @param {number} column The column within the line for the location
+ * @param {number} start Source position for start of source context
+ * @param {number} end Source position for end of source context
+ * @constructor
+ */
+function SourceLocation(script, position, line, column, start, end) {
+  this.script = script;
+  this.position = position;
+  this.line = line;
+  this.column = column;
+  this.start = start;
+  this.end = end;
+}
+
+
+const kLineLengthLimit = 78;
+
+/**
+ * Restrict source location start and end positions to make the source slice
+ * no more that a certain number of characters wide.
+ * @param {number} opt_limit The with limit of the source text with a default
+ *     of 78
+ * @param {number} opt_before The number of characters to prefer before the
+ *     position with a default value of 10 less that the limit
+ */
+SourceLocation.prototype.restrict = function (opt_limit, opt_before) {
+  // Find the actual limit to use.
+  var limit;
+  var before;
+  if (!IS_UNDEFINED(opt_limit)) {
+    limit = opt_limit;
+  } else {
+    limit = kLineLengthLimit;
+  }
+  if (!IS_UNDEFINED(opt_before)) {
+    before = opt_before;
+  } else {
+    // If no before is specified center for small limits and perfer more source
+    // before the the position that after for longer limits.
+    if (limit <= 20) {
+      before = $floor(limit / 2);
+    } else {
+      before = limit - 10;
+    }
+  }
+  if (before >= limit) {
+    before = limit - 1;
+  }
+
+  // If the [start, end[ interval is too big we restrict
+  // it in one or both ends. We make sure to always produce
+  // restricted intervals of maximum allowed size.
+  if (this.end - this.start > limit) {
+    var start_limit = this.position - before;
+    var end_limit = this.position + limit - before;
+    if (this.start < start_limit && end_limit < this.end) {
+      this.start = start_limit;
+      this.end = end_limit;
+    } else if (this.start < start_limit) {
+      this.start = this.end - limit;
+    } else {
+      this.end = this.start + limit;
+    }
+  }
+};
+
+
+/**
+ * Get the source text for a SourceLocation
+ * @return {String}
+ *     Source text for this location.
+ */
+SourceLocation.prototype.sourceText = function () {
+  return StringSubstring.call(this.script.source, this.start, this.end);
+};
+
+
+/**
+ * Class for a source slice. A source slice is a part of a script source with
+ * the following properties:
+ *   script        : script object for the source
+ *   from_line     : line number for the first line in the slice
+ *   to_line       : source line number for the last line in the slice
+ *   from_position : position of the first character in the slice
+ *   to_position   : position of the last character in the slice
+ * The to_line and to_position are not included in the slice, that is the lines
+ * in the slice are [from_line, to_line[. Likewise the characters in the slice
+ * are [from_position, to_position[.
+ * @param {Script} script The Script object for the source slice
+ * @param {number} from_line
+ * @param {number} to_line
+ * @param {number} from_position
+ * @param {number} to_position
+ * @constructor
+ */
+function SourceSlice(script, from_line, to_line, from_position, to_position) {
+  this.script = script;
+  this.from_line = from_line;
+  this.to_line = to_line;
+  this.from_position = from_position;
+  this.to_position = to_position;
+}
+
+
+/**
+ * Get the source text for a SourceSlice
+ * @return {String} Source text for this slice. The last line will include
+ *     the line terminating characters (if any)
+ */
+SourceSlice.prototype.sourceText = function () {
+  return StringSubstring.call(this.script.source, this.from_position, this.to_position);
+};
+
+
+// Returns the offset of the given position within the containing
+// line.
+function GetPositionInLine(message) {
+  var location = message.script.locationFromPosition(message.startPos, false);
+  if (location == null) return -1;
+  location.restrict();
+  return message.startPos - location.start;
+}
+
+
+function ErrorMessage(type, args, startPos, endPos, script, stackTrace) {
+  this.startPos = startPos;
+  this.endPos = endPos;
+  this.type = type;
+  this.args = args;
+  this.script = script;
+  this.stackTrace = stackTrace;
+}
+
+
+function MakeMessage(type, args, startPos, endPos, script, stackTrace) {
+  return new ErrorMessage(type, args, startPos, endPos, script, stackTrace);
+}
+
+
+function GetStackTraceLine(recv, fun, pos, isGlobal) {
+  return FormatSourcePosition(new CallSite(recv, fun, pos));
+}
+
+// ----------------------------------------------------------------------------
+// Error implementation
+
+// Defines accessors for a property that is calculated the first time
+// the property is read.
+function DefineOneShotAccessor(obj, name, fun) {
+  // Note that the accessors consistently operate on 'obj', not 'this'.
+  // Since the object may occur in someone else's prototype chain we
+  // can't rely on 'this' being the same as 'obj'.
+  var hasBeenSet = false;
+  var value;
+  obj.__defineGetter__(name, function () {
+    if (hasBeenSet) {
+      return value;
+    }
+    hasBeenSet = true;
+    value = fun(obj);
+    return value;
+  });
+  obj.__defineSetter__(name, function (v) {
+    hasBeenSet = true;
+    value = v;
+  });
+}
+
+function CallSite(receiver, fun, pos) {
+  this.receiver = receiver;
+  this.fun = fun;
+  this.pos = pos;
+}
+
+CallSite.prototype.getThis = function () {
+  return this.receiver;
+};
+
+CallSite.prototype.getTypeName = function () {
+  var constructor = this.receiver.constructor;
+  if (!constructor)
+    return $Object.prototype.toString.call(this.receiver);
+  var constructorName = constructor.name;
+  if (!constructorName)
+    return $Object.prototype.toString.call(this.receiver);
+  return constructorName;
+};
+
+CallSite.prototype.isToplevel = function () {
+  if (this.receiver == null)
+    return true;
+  return IS_GLOBAL(this.receiver);
+};
+
+CallSite.prototype.isEval = function () {
+  var script = %FunctionGetScript(this.fun);
+  return script && script.compilation_type == 1;
+};
+
+CallSite.prototype.getEvalOrigin = function () {
+  var script = %FunctionGetScript(this.fun);
+  if (!script || script.compilation_type != 1)
+    return null;
+  return new CallSite(null, script.eval_from_function,
+      script.eval_from_position);
+};
+
+CallSite.prototype.getFunction = function () {
+  return this.fun;
+};
+
+CallSite.prototype.getFunctionName = function () {
+  // See if the function knows its own name
+  var name = this.fun.name;
+  if (name) {
+    return name;
+  } else {
+    return %FunctionGetInferredName(this.fun);
+  }
+  // Maybe this is an evaluation?
+  var script = %FunctionGetScript(this.fun);
+  if (script && script.compilation_type == 1)
+    return "eval";
+  return null;
+};
+
+CallSite.prototype.getMethodName = function () {
+  // See if we can find a unique property on the receiver that holds
+  // this function.
+  var ownName = this.fun.name;
+  if (ownName && this.receiver && this.receiver[ownName] === this.fun)
+    // To handle DontEnum properties we guess that the method has
+    // the same name as the function.
+    return ownName;
+  var name = null;
+  for (var prop in this.receiver) {
+    if (this.receiver[prop] === this.fun) {
+      // If we find more than one match bail out to avoid confusion
+      if (name)
+        return null;
+      name = prop;
+    }
+  }
+  if (name)
+    return name;
+  return null;
+};
+
+CallSite.prototype.getFileName = function () {
+  var script = %FunctionGetScript(this.fun);
+  return script ? script.name : null;
+};
+
+CallSite.prototype.getLineNumber = function () {
+  if (this.pos == -1)
+    return null;
+  var script = %FunctionGetScript(this.fun);
+  var location = null;
+  if (script) {
+    location = script.locationFromPosition(this.pos, true);
+  }
+  return location ? location.line + 1 : null;
+};
+
+CallSite.prototype.getColumnNumber = function () {
+  if (this.pos == -1)
+    return null;
+  var script = %FunctionGetScript(this.fun);
+  var location = null;
+  if (script) {
+    location = script.locationFromPosition(this.pos, true);
+  }
+  return location ? location.column : null;
+};
+
+CallSite.prototype.isNative = function () {
+  var script = %FunctionGetScript(this.fun);
+  return script ? (script.type == 0) : false;
+};
+
+CallSite.prototype.getPosition = function () {
+  return this.pos;
+};
+
+CallSite.prototype.isConstructor = function () {
+  var constructor = this.receiver ? this.receiver.constructor : null;
+  if (!constructor)
+    return false;
+  return this.fun === constructor;
+};
+
+function FormatSourcePosition(frame) {
+  var fileLocation = "";
+  if (frame.isNative()) {
+    fileLocation = "native";
+  } else if (frame.isEval()) {
+    fileLocation = "eval at " + FormatSourcePosition(frame.getEvalOrigin());
+  } else {
+    var fileName = frame.getFileName();
+    if (fileName) {
+      fileLocation += fileName;
+      var lineNumber = frame.getLineNumber();
+      if (lineNumber != null) {
+        fileLocation += ":" + lineNumber;
+        var columnNumber = frame.getColumnNumber();
+        if (columnNumber) {
+          fileLocation += ":" + columnNumber;
+        }
+      }
+    }
+  }
+  if (!fileLocation) {
+    fileLocation = "unknown source";
+  }
+  var line = "";
+  var functionName = frame.getFunction().name;
+  var methodName = frame.getMethodName();
+  var addPrefix = true;
+  var isConstructor = frame.isConstructor();
+  var isMethodCall = !(frame.isToplevel() || isConstructor);
+  if (isMethodCall) {
+    line += frame.getTypeName() + ".";
+    if (functionName) {
+      line += functionName;
+      if (methodName && (methodName != functionName)) {
+        line += " [as " + methodName + "]";
+      }
+    } else {
+      line += methodName || "<anonymous>";
+    }
+  } else if (isConstructor) {
+    line += "new " + (functionName || "<anonymous>");
+  } else if (functionName) {
+    line += functionName;
+  } else {
+    line += fileLocation;
+    addPrefix = false;
+  }
+  if (addPrefix) {
+    line += " (" + fileLocation + ")";
+  }
+  return line;
+}
+
+function FormatStackTrace(error, frames) {
+  var lines = [];
+  try {
+    lines.push(error.toString());
+  } catch (e) {
+    try {
+      lines.push("<error: " + e + ">");
+    } catch (ee) {
+      lines.push("<error>");
+    }
+  }
+  for (var i = 0; i < frames.length; i++) {
+    var frame = frames[i];
+    var line;
+    try {
+      line = FormatSourcePosition(frame);
+    } catch (e) {
+      try {
+        line = "<error: " + e + ">";
+      } catch (ee) {
+        // Any code that reaches this point is seriously nasty!
+        line = "<error>";
+      }
+    }
+    lines.push("    at " + line);
+  }
+  return lines.join("\n");
+}
+
+function FormatRawStackTrace(error, raw_stack) {
+  var frames = [ ];
+  for (var i = 0; i < raw_stack.length; i += 3) {
+    var recv = raw_stack[i];
+    var fun = raw_stack[i+1];
+    var pc = raw_stack[i+2];
+    var pos = %FunctionGetPositionForOffset(fun, pc);
+    frames.push(new CallSite(recv, fun, pos));
+  }
+  if (IS_FUNCTION($Error.prepareStackTrace)) {
+    return $Error.prepareStackTrace(error, frames);
+  } else {
+    return FormatStackTrace(error, frames);
+  }
+}
+
+function DefineError(f) {
+  // Store the error function in both the global object
+  // and the runtime object. The function is fetched
+  // from the runtime object when throwing errors from
+  // within the runtime system to avoid strange side
+  // effects when overwriting the error functions from
+  // user code.
+  var name = f.name;
+  %SetProperty(global, name, f, DONT_ENUM);
+  this['$' + name] = f;
+  // Configure the error function.
+  if (name == 'Error') {
+    // The prototype of the Error object must itself be an error.
+    // However, it can't be an instance of the Error object because
+    // it hasn't been properly configured yet.  Instead we create a
+    // special not-a-true-error-but-close-enough object.
+    function ErrorPrototype() {}
+    %FunctionSetPrototype(ErrorPrototype, $Object.prototype);
+    %FunctionSetInstanceClassName(ErrorPrototype, 'Error');
+    %FunctionSetPrototype(f, new ErrorPrototype());
+  } else {
+    %FunctionSetPrototype(f, new $Error());
+  }
+  %FunctionSetInstanceClassName(f, 'Error');
+  %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
+  f.prototype.name = name;
+  %SetCode(f, function(m) {
+    if (%_IsConstructCall()) {
+      if (m === kAddMessageAccessorsMarker) {
+        DefineOneShotAccessor(this, 'message', function (obj) {
+          return FormatMessage({type: obj.type, args: obj.arguments});
+        });
+      } else if (!IS_UNDEFINED(m)) {
+        this.message = ToString(m);
+      }
+      captureStackTrace(this, f);
+    } else {
+      return new f(m);
+    }
+  });
+}
+
+function captureStackTrace(obj, cons_opt) {
+  var stackTraceLimit = $Error.stackTraceLimit;
+  if (!stackTraceLimit) return;
+  if (stackTraceLimit < 0 || stackTraceLimit > 10000)
+    stackTraceLimit = 10000;
+  var raw_stack = %CollectStackTrace(cons_opt ? cons_opt : captureStackTrace,
+      stackTraceLimit);
+  DefineOneShotAccessor(obj, 'stack', function (obj) {
+    return FormatRawStackTrace(obj, raw_stack);
+  });
+};
+
+$Math.__proto__ = global.Object.prototype;
+
+DefineError(function Error() { });
+DefineError(function TypeError() { });
+DefineError(function RangeError() { });
+DefineError(function SyntaxError() { });
+DefineError(function ReferenceError() { });
+DefineError(function EvalError() { });
+DefineError(function URIError() { });
+
+$Error.captureStackTrace = captureStackTrace;
+
+// Setup extra properties of the Error.prototype object.
+$Error.prototype.message = '';
+
+%SetProperty($Error.prototype, 'toString', function toString() {
+  var type = this.type;
+  if (type && !this.hasOwnProperty("message")) {
+    return this.name + ": " + FormatMessage({ type: type, args: this.arguments });
+  }
+  var message = this.message;
+  return this.name + (message ? (": " + message) : "");
+}, DONT_ENUM);
+
+
+// Boilerplate for exceptions for stack overflows. Used from
+// Top::StackOverflow().
+const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
new file mode 100644
index 0000000..c4ab7b8
--- /dev/null
+++ b/src/mirror-delay.js
@@ -0,0 +1,2277 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Touch the RegExp and Date functions to make sure that date-delay.js and
+// regexp-delay.js has been loaded. This is required as the mirrors use
+// functions within these files through the builtins object.
+RegExp;
+Date;
+
+
+// Handle id counters.
+var next_handle_ = 0;
+var next_transient_handle_ = -1;
+
+// Mirror cache.
+var mirror_cache_ = [];
+
+
+/**
+ * Clear the mirror handle cache.
+ */
+function ClearMirrorCache() {
+  next_handle_ = 0;
+  mirror_cache_ = [];
+}
+
+
+/**
+ * Returns the mirror for a specified value or object.
+ *
+ * @param {value or Object} value the value or object to retreive the mirror for
+ * @param {boolean} transient indicate whether this object is transient and
+ *    should not be added to the mirror cache. The default is not transient.
+ * @returns {Mirror} the mirror reflects the passed value or object
+ */
+function MakeMirror(value, opt_transient) {
+  var mirror;
+
+  // Look for non transient mirrors in the mirror cache.
+  if (!opt_transient) {
+    for (id in mirror_cache_) {
+      mirror = mirror_cache_[id];
+      if (mirror.value() === value) {
+        return mirror;
+      }
+      // Special check for NaN as NaN == NaN is false.
+      if (mirror.isNumber() && isNaN(mirror.value()) &&
+          typeof value == 'number' && isNaN(value)) {
+        return mirror;
+      }
+    }
+  }
+  
+  if (IS_UNDEFINED(value)) {
+    mirror = new UndefinedMirror();
+  } else if (IS_NULL(value)) {
+    mirror = new NullMirror();
+  } else if (IS_BOOLEAN(value)) {
+    mirror = new BooleanMirror(value);
+  } else if (IS_NUMBER(value)) {
+    mirror = new NumberMirror(value);
+  } else if (IS_STRING(value)) {
+    mirror = new StringMirror(value);
+  } else if (IS_ARRAY(value)) {
+    mirror = new ArrayMirror(value);
+  } else if (IS_DATE(value)) {
+    mirror = new DateMirror(value);
+  } else if (IS_FUNCTION(value)) {
+    mirror = new FunctionMirror(value);
+  } else if (IS_REGEXP(value)) {
+    mirror = new RegExpMirror(value);
+  } else if (IS_ERROR(value)) {
+    mirror = new ErrorMirror(value);
+  } else if (IS_SCRIPT(value)) {
+    mirror = new ScriptMirror(value);
+  } else {
+    mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient);
+  }
+
+  mirror_cache_[mirror.handle()] = mirror;
+  return mirror;
+}
+
+
+/**
+ * Returns the mirror for a specified mirror handle.
+ *
+ * @param {number} handle the handle to find the mirror for
+ * @returns {Mirror or undefiend} the mirror with the requested handle or
+ *     undefined if no mirror with the requested handle was found
+ */
+function LookupMirror(handle) {
+  return mirror_cache_[handle];
+}
+
+  
+/**
+ * Returns the mirror for the undefined value.
+ *
+ * @returns {Mirror} the mirror reflects the undefined value
+ */
+function GetUndefinedMirror() {
+  return MakeMirror(void 0);
+}
+
+
+/**
+ * Inherit the prototype methods from one constructor into another.
+ *
+ * The Function.prototype.inherits from lang.js rewritten as a standalone
+ * function (not on Function.prototype). NOTE: If this file is to be loaded
+ * during bootstrapping this function needs to be revritten using some native
+ * functions as prototype setup using normal JavaScript does not work as
+ * expected during bootstrapping (see mirror.js in r114903).
+ *
+ * @param {function} ctor Constructor function which needs to inherit the
+ *     prototype
+ * @param {function} superCtor Constructor function to inherit prototype from
+ */
+function inherits(ctor, superCtor) {
+  var tempCtor = function(){};
+  tempCtor.prototype = superCtor.prototype;
+  ctor.super_ = superCtor.prototype;
+  ctor.prototype = new tempCtor();
+  ctor.prototype.constructor = ctor;
+}
+
+
+// Type names of the different mirrors.
+const UNDEFINED_TYPE = 'undefined';
+const NULL_TYPE = 'null';
+const BOOLEAN_TYPE = 'boolean';
+const NUMBER_TYPE = 'number';
+const STRING_TYPE = 'string';
+const OBJECT_TYPE = 'object';
+const FUNCTION_TYPE = 'function';
+const REGEXP_TYPE = 'regexp';
+const ERROR_TYPE = 'error';
+const PROPERTY_TYPE = 'property';
+const FRAME_TYPE = 'frame';
+const SCRIPT_TYPE = 'script';
+const CONTEXT_TYPE = 'context';
+const SCOPE_TYPE = 'scope';
+
+// Maximum length when sending strings through the JSON protocol.
+const kMaxProtocolStringLength = 80;
+
+// Different kind of properties.
+PropertyKind = {};
+PropertyKind.Named   = 1;
+PropertyKind.Indexed = 2;
+
+
+// A copy of the PropertyType enum from global.h
+PropertyType = {};
+PropertyType.Normal             = 0;
+PropertyType.Field              = 1;
+PropertyType.ConstantFunction   = 2;
+PropertyType.Callbacks          = 3;
+PropertyType.Interceptor        = 4;
+PropertyType.MapTransition      = 5;
+PropertyType.ConstantTransition = 6;
+PropertyType.NullDescriptor     = 7;
+
+
+// Different attributes for a property.
+PropertyAttribute = {};
+PropertyAttribute.None       = NONE;
+PropertyAttribute.ReadOnly   = READ_ONLY;
+PropertyAttribute.DontEnum   = DONT_ENUM;
+PropertyAttribute.DontDelete = DONT_DELETE;
+
+
+// A copy of the scope types from runtime.cc.
+ScopeType = { Global: 0,
+              Local: 1,
+              With: 2,
+              Closure: 3,
+              Catch: 4 };
+
+
+// Mirror hierarchy:
+//   - Mirror
+//     - ValueMirror
+//       - UndefinedMirror
+//       - NullMirror
+//       - NumberMirror
+//       - StringMirror
+//       - ObjectMirror
+//         - FunctionMirror
+//           - UnresolvedFunctionMirror
+//         - ArrayMirror
+//         - DateMirror
+//         - RegExpMirror
+//         - ErrorMirror
+//     - PropertyMirror
+//     - FrameMirror
+//     - ScriptMirror
+
+
+/**
+ * Base class for all mirror objects.
+ * @param {string} type The type of the mirror
+ * @constructor
+ */
+function Mirror(type) {
+  this.type_ = type;
+};
+
+
+Mirror.prototype.type = function() {
+  return this.type_;
+};
+
+
+/**
+ * Check whether the mirror reflects a value.
+ * @returns {boolean} True if the mirror reflects a value.
+ */
+Mirror.prototype.isValue = function() {
+  return this instanceof ValueMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects the undefined value.
+ * @returns {boolean} True if the mirror reflects the undefined value.
+ */
+Mirror.prototype.isUndefined = function() {
+  return this instanceof UndefinedMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects the null value.
+ * @returns {boolean} True if the mirror reflects the null value
+ */
+Mirror.prototype.isNull = function() {
+  return this instanceof NullMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a boolean value.
+ * @returns {boolean} True if the mirror reflects a boolean value
+ */
+Mirror.prototype.isBoolean = function() {
+  return this instanceof BooleanMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a number value.
+ * @returns {boolean} True if the mirror reflects a number value
+ */
+Mirror.prototype.isNumber = function() {
+  return this instanceof NumberMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a string value.
+ * @returns {boolean} True if the mirror reflects a string value
+ */
+Mirror.prototype.isString = function() {
+  return this instanceof StringMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an object.
+ * @returns {boolean} True if the mirror reflects an object
+ */
+Mirror.prototype.isObject = function() {
+  return this instanceof ObjectMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a function.
+ * @returns {boolean} True if the mirror reflects a function
+ */
+Mirror.prototype.isFunction = function() {
+  return this instanceof FunctionMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an unresolved function.
+ * @returns {boolean} True if the mirror reflects an unresolved function
+ */
+Mirror.prototype.isUnresolvedFunction = function() {
+  return this instanceof UnresolvedFunctionMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an array.
+ * @returns {boolean} True if the mirror reflects an array
+ */
+Mirror.prototype.isArray = function() {
+  return this instanceof ArrayMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a date.
+ * @returns {boolean} True if the mirror reflects a date
+ */
+Mirror.prototype.isDate = function() {
+  return this instanceof DateMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a regular expression.
+ * @returns {boolean} True if the mirror reflects a regular expression
+ */
+Mirror.prototype.isRegExp = function() {
+  return this instanceof RegExpMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an error.
+ * @returns {boolean} True if the mirror reflects an error
+ */
+Mirror.prototype.isError = function() {
+  return this instanceof ErrorMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a property.
+ * @returns {boolean} True if the mirror reflects a property
+ */
+Mirror.prototype.isProperty = function() {
+  return this instanceof PropertyMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a stack frame.
+ * @returns {boolean} True if the mirror reflects a stack frame
+ */
+Mirror.prototype.isFrame = function() {
+  return this instanceof FrameMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a script.
+ * @returns {boolean} True if the mirror reflects a script
+ */
+Mirror.prototype.isScript = function() {
+  return this instanceof ScriptMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a context.
+ * @returns {boolean} True if the mirror reflects a context
+ */
+Mirror.prototype.isContext = function() {
+  return this instanceof ContextMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a scope.
+ * @returns {boolean} True if the mirror reflects a scope
+ */
+Mirror.prototype.isScope = function() {
+  return this instanceof ScopeMirror;
+}
+
+
+/**
+ * Allocate a handle id for this object.
+ */
+Mirror.prototype.allocateHandle_ = function() {
+  this.handle_ = next_handle_++;
+}
+
+
+/**
+ * Allocate a transient handle id for this object. Transient handles are
+ * negative.
+ */
+Mirror.prototype.allocateTransientHandle_ = function() {
+  this.handle_ = next_transient_handle_--;
+}
+
+
+Mirror.prototype.toText = function() {
+  // Simpel to text which is used when on specialization in subclass.
+  return "#<" + builtins.GetInstanceName(this.constructor.name) + ">";
+}
+
+
+/**
+ * Base class for all value mirror objects.
+ * @param {string} type The type of the mirror
+ * @param {value} value The value reflected by this mirror
+ * @param {boolean} transient indicate whether this object is transient with a
+ *    transient handle
+ * @constructor
+ * @extends Mirror
+ */
+function ValueMirror(type, value, transient) {
+  Mirror.call(this, type);
+  this.value_ = value;
+  if (!transient) {
+    this.allocateHandle_();
+  } else {
+    this.allocateTransientHandle_();
+  }
+}
+inherits(ValueMirror, Mirror);
+
+
+Mirror.prototype.handle = function() {
+  return this.handle_;
+};
+
+
+/**
+ * Check whether this is a primitive value.
+ * @return {boolean} True if the mirror reflects a primitive value
+ */
+ValueMirror.prototype.isPrimitive = function() {
+  var type = this.type();
+  return type === 'undefined' ||
+         type === 'null' ||
+         type === 'boolean' ||
+         type === 'number' ||
+         type === 'string';
+};
+
+
+/**
+ * Get the actual value reflected by this mirror.
+ * @return {value} The value reflected by this mirror
+ */
+ValueMirror.prototype.value = function() {
+  return this.value_;
+};
+
+
+/**
+ * Mirror object for Undefined.
+ * @constructor
+ * @extends ValueMirror
+ */
+function UndefinedMirror() {
+  ValueMirror.call(this, UNDEFINED_TYPE, void 0);
+}
+inherits(UndefinedMirror, ValueMirror);
+
+
+UndefinedMirror.prototype.toText = function() {
+  return 'undefined';
+}
+
+
+/**
+ * Mirror object for null.
+ * @constructor
+ * @extends ValueMirror
+ */
+function NullMirror() {
+  ValueMirror.call(this, NULL_TYPE, null);
+}
+inherits(NullMirror, ValueMirror);
+
+
+NullMirror.prototype.toText = function() {
+  return 'null';
+}
+
+
+/**
+ * Mirror object for boolean values.
+ * @param {boolean} value The boolean value reflected by this mirror
+ * @constructor
+ * @extends ValueMirror
+ */
+function BooleanMirror(value) {
+  ValueMirror.call(this, BOOLEAN_TYPE, value);
+}
+inherits(BooleanMirror, ValueMirror);
+
+
+BooleanMirror.prototype.toText = function() {
+  return this.value_ ? 'true' : 'false';
+}
+
+
+/**
+ * Mirror object for number values.
+ * @param {number} value The number value reflected by this mirror
+ * @constructor
+ * @extends ValueMirror
+ */
+function NumberMirror(value) {
+  ValueMirror.call(this, NUMBER_TYPE, value);
+}
+inherits(NumberMirror, ValueMirror);
+
+
+NumberMirror.prototype.toText = function() {
+  return %NumberToString(this.value_);
+}
+
+
+/**
+ * Mirror object for string values.
+ * @param {string} value The string value reflected by this mirror
+ * @constructor
+ * @extends ValueMirror
+ */
+function StringMirror(value) {
+  ValueMirror.call(this, STRING_TYPE, value);
+}
+inherits(StringMirror, ValueMirror);
+
+
+StringMirror.prototype.length = function() {
+  return this.value_.length;
+};
+
+
+StringMirror.prototype.toText = function() {
+  if (this.length() > kMaxProtocolStringLength) {
+    return this.value_.substring(0, kMaxProtocolStringLength) +
+           '... (length: ' + this.length() + ')';
+  } else {
+    return this.value_;
+  }
+}
+
+
+/**
+ * Mirror object for objects.
+ * @param {object} value The object reflected by this mirror
+ * @param {boolean} transient indicate whether this object is transient with a
+ *    transient handle
+ * @constructor
+ * @extends ValueMirror
+ */
+function ObjectMirror(value, type, transient) {
+  ValueMirror.call(this, type || OBJECT_TYPE, value, transient);
+}
+inherits(ObjectMirror, ValueMirror);
+
+
+ObjectMirror.prototype.className = function() {
+  return %_ClassOf(this.value_);
+};
+
+
+ObjectMirror.prototype.constructorFunction = function() {
+  return MakeMirror(%DebugGetProperty(this.value_, 'constructor'));
+};
+
+
+ObjectMirror.prototype.prototypeObject = function() {
+  return MakeMirror(%DebugGetProperty(this.value_, 'prototype'));
+};
+
+
+ObjectMirror.prototype.protoObject = function() {
+  return MakeMirror(%DebugGetPrototype(this.value_));
+};
+
+
+ObjectMirror.prototype.hasNamedInterceptor = function() {
+  // Get information on interceptors for this object.
+  var x = %DebugInterceptorInfo(this.value_);
+  return (x & 2) != 0;
+};
+
+
+ObjectMirror.prototype.hasIndexedInterceptor = function() {
+  // Get information on interceptors for this object.
+  var x = %DebugInterceptorInfo(this.value_);
+  return (x & 1) != 0;
+};
+
+
+/**
+ * Return the property names for this object.
+ * @param {number} kind Indicate whether named, indexed or both kinds of
+ *     properties are requested
+ * @param {number} limit Limit the number of names returend to the specified
+       value
+ * @return {Array} Property names for this object
+ */
+ObjectMirror.prototype.propertyNames = function(kind, limit) {
+  // Find kind and limit and allocate array for the result
+  kind = kind || PropertyKind.Named | PropertyKind.Indexed;
+
+  var propertyNames;
+  var elementNames;
+  var total = 0;
+  
+  // Find all the named properties.
+  if (kind & PropertyKind.Named) {
+    // Get the local property names.
+    propertyNames = %DebugLocalPropertyNames(this.value_);
+    total += propertyNames.length;
+
+    // Get names for named interceptor properties if any.
+    if (this.hasNamedInterceptor() && (kind & PropertyKind.Named)) {
+      var namedInterceptorNames =
+          %DebugNamedInterceptorPropertyNames(this.value_);
+      if (namedInterceptorNames) {
+        propertyNames = propertyNames.concat(namedInterceptorNames);
+        total += namedInterceptorNames.length;
+      }
+    }
+  }
+
+  // Find all the indexed properties.
+  if (kind & PropertyKind.Indexed) {
+    // Get the local element names.
+    elementNames = %DebugLocalElementNames(this.value_);
+    total += elementNames.length;
+
+    // Get names for indexed interceptor properties.
+    if (this.hasIndexedInterceptor() && (kind & PropertyKind.Indexed)) {
+      var indexedInterceptorNames =
+          %DebugIndexedInterceptorElementNames(this.value_);
+      if (indexedInterceptorNames) {
+        elementNames = elementNames.concat(indexedInterceptorNames);
+        total += indexedInterceptorNames.length;
+      }
+    }
+  }
+  limit = Math.min(limit || total, total);
+
+  var names = new Array(limit);
+  var index = 0;
+
+  // Copy names for named properties.
+  if (kind & PropertyKind.Named) {
+    for (var i = 0; index < limit && i < propertyNames.length; i++) {
+      names[index++] = propertyNames[i];
+    }
+  }
+
+  // Copy names for indexed properties.
+  if (kind & PropertyKind.Indexed) {
+    for (var i = 0; index < limit && i < elementNames.length; i++) {
+      names[index++] = elementNames[i];
+    }
+  }
+
+  return names;
+};
+
+
+/**
+ * Return the properties for this object as an array of PropertyMirror objects.
+ * @param {number} kind Indicate whether named, indexed or both kinds of
+ *     properties are requested
+ * @param {number} limit Limit the number of properties returend to the
+       specified value
+ * @return {Array} Property mirrors for this object
+ */
+ObjectMirror.prototype.properties = function(kind, limit) {
+  var names = this.propertyNames(kind, limit);
+  var properties = new Array(names.length);
+  for (var i = 0; i < names.length; i++) {
+    properties[i] = this.property(names[i]);
+  }
+
+  return properties;
+};
+
+
+ObjectMirror.prototype.property = function(name) {
+  var details = %DebugGetPropertyDetails(this.value_, %ToString(name));
+  if (details) {
+    return new PropertyMirror(this, name, details);
+  }
+
+  // Nothing found.
+  return GetUndefinedMirror();
+};
+
+
+
+/**
+ * Try to find a property from its value.
+ * @param {Mirror} value The property value to look for
+ * @return {PropertyMirror} The property with the specified value. If no
+ *     property was found with the specified value UndefinedMirror is returned
+ */
+ObjectMirror.prototype.lookupProperty = function(value) {
+  var properties = this.properties();
+
+  // Look for property value in properties.
+  for (var i = 0; i < properties.length; i++) {
+
+    // Skip properties which are defined through assessors.
+    var property = properties[i];
+    if (property.propertyType() != PropertyType.Callbacks) {
+      if (%_ObjectEquals(property.value_, value.value_)) {
+        return property;
+      }
+    }
+  }
+
+  // Nothing found.
+  return GetUndefinedMirror();
+};
+
+
+/**
+ * Returns objects which has direct references to this object
+ * @param {number} opt_max_objects Optional parameter specifying the maximum
+ *     number of referencing objects to return.
+ * @return {Array} The objects which has direct references to this object.
+ */
+ObjectMirror.prototype.referencedBy = function(opt_max_objects) {
+  // Find all objects with direct references to this object.
+  var result = %DebugReferencedBy(this.value_,
+                                  Mirror.prototype, opt_max_objects || 0);
+
+  // Make mirrors for all the references found.
+  for (var i = 0; i < result.length; i++) {
+    result[i] = MakeMirror(result[i]);
+  }
+
+  return result;
+};
+
+
+ObjectMirror.prototype.toText = function() {
+  var name;
+  var ctor = this.constructorFunction();
+  if (ctor.isUndefined()) {
+    name = this.className();
+  } else {
+    name = ctor.name();
+    if (!name) {
+      name = this.className();
+    }
+  }
+  return '#<' + builtins.GetInstanceName(name) + '>';
+};
+
+
+/**
+ * Mirror object for functions.
+ * @param {function} value The function object reflected by this mirror.
+ * @constructor
+ * @extends ObjectMirror
+ */
+function FunctionMirror(value) {
+  ObjectMirror.call(this, value, FUNCTION_TYPE);
+  this.resolved_ = true;
+}
+inherits(FunctionMirror, ObjectMirror);
+
+
+/**
+ * Returns whether the function is resolved.
+ * @return {boolean} True if the function is resolved. Unresolved functions can
+ *     only originate as functions from stack frames
+ */
+FunctionMirror.prototype.resolved = function() {
+  return this.resolved_;
+};
+
+
+/**
+ * Returns the name of the function.
+ * @return {string} Name of the function
+ */
+FunctionMirror.prototype.name = function() {
+  return %FunctionGetName(this.value_);
+};
+
+
+/**
+ * Returns the inferred name of the function.
+ * @return {string} Name of the function
+ */
+FunctionMirror.prototype.inferredName = function() {
+  return %FunctionGetInferredName(this.value_);
+};
+
+
+/**
+ * Returns the source code for the function.
+ * @return {string or undefined} The source code for the function. If the
+ *     function is not resolved undefined will be returned.
+ */
+FunctionMirror.prototype.source = function() {
+  // Return source if function is resolved. Otherwise just fall through to
+  // return undefined.
+  if (this.resolved()) {
+    return builtins.FunctionSourceString(this.value_);
+  }
+};
+
+
+/**
+ * Returns the script object for the function.
+ * @return {ScriptMirror or undefined} Script object for the function or
+ *     undefined if the function has no script
+ */
+FunctionMirror.prototype.script = function() {
+  // Return script if function is resolved. Otherwise just fall through
+  // to return undefined.
+  if (this.resolved()) {
+    var script = %FunctionGetScript(this.value_);
+    if (script) {
+      return MakeMirror(script);
+    }
+  }
+};
+
+
+/**
+ * Returns objects constructed by this function.
+ * @param {number} opt_max_instances Optional parameter specifying the maximum
+ *     number of instances to return.
+ * @return {Array or undefined} The objects constructed by this function.
+ */
+FunctionMirror.prototype.constructedBy = function(opt_max_instances) {
+  if (this.resolved()) {
+    // Find all objects constructed from this function.
+    var result = %DebugConstructedBy(this.value_, opt_max_instances || 0);
+
+    // Make mirrors for all the instances found.
+    for (var i = 0; i < result.length; i++) {
+      result[i] = MakeMirror(result[i]);
+    }
+
+    return result;
+  } else {
+    return [];
+  }
+};
+
+
+FunctionMirror.prototype.toText = function() {
+  return this.source();
+}
+
+
+/**
+ * Mirror object for unresolved functions.
+ * @param {string} value The name for the unresolved function reflected by this
+ *     mirror.
+ * @constructor
+ * @extends ObjectMirror
+ */
+function UnresolvedFunctionMirror(value) {
+  // Construct this using the ValueMirror as an unresolved function is not a
+  // real object but just a string.
+  ValueMirror.call(this, FUNCTION_TYPE, value);
+  this.propertyCount_ = 0;
+  this.elementCount_ = 0;
+  this.resolved_ = false;
+}
+inherits(UnresolvedFunctionMirror, FunctionMirror);
+
+
+UnresolvedFunctionMirror.prototype.className = function() {
+  return 'Function';
+};
+
+
+UnresolvedFunctionMirror.prototype.constructorFunction = function() {
+  return GetUndefinedMirror();
+};
+
+
+UnresolvedFunctionMirror.prototype.prototypeObject = function() {
+  return GetUndefinedMirror();
+};
+
+
+UnresolvedFunctionMirror.prototype.protoObject = function() {
+  return GetUndefinedMirror();
+};
+
+
+UnresolvedFunctionMirror.prototype.name = function() {
+  return this.value_;
+};
+
+
+UnresolvedFunctionMirror.prototype.inferredName = function() {
+  return undefined;
+};
+
+
+UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
+  return [];
+}
+
+
+/**
+ * Mirror object for arrays.
+ * @param {Array} value The Array object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function ArrayMirror(value) {
+  ObjectMirror.call(this, value);
+}
+inherits(ArrayMirror, ObjectMirror);
+
+
+ArrayMirror.prototype.length = function() {
+  return this.value_.length;
+};
+
+
+ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index, opt_to_index) {
+  var from_index = opt_from_index || 0;
+  var to_index = opt_to_index || this.length() - 1;
+  if (from_index > to_index) return new Array();
+  var values = new Array(to_index - from_index + 1);
+  for (var i = from_index; i <= to_index; i++) {
+    var details = %DebugGetPropertyDetails(this.value_, %ToString(i));
+    var value;
+    if (details) {
+      value = new PropertyMirror(this, i, details);
+    } else {
+      value = GetUndefinedMirror();
+    }
+    values[i - from_index] = value;
+  }
+  return values;
+}
+
+
+/**
+ * Mirror object for dates.
+ * @param {Date} value The Date object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function DateMirror(value) {
+  ObjectMirror.call(this, value);
+}
+inherits(DateMirror, ObjectMirror);
+
+
+DateMirror.prototype.toText = function() {
+  var s = JSON.stringify(this.value_);
+  return s.substring(1, s.length - 1);  // cut quotes
+}
+
+
+/**
+ * Mirror object for regular expressions.
+ * @param {RegExp} value The RegExp object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function RegExpMirror(value) {
+  ObjectMirror.call(this, value, REGEXP_TYPE);
+}
+inherits(RegExpMirror, ObjectMirror);
+
+
+/**
+ * Returns the source to the regular expression.
+ * @return {string or undefined} The source to the regular expression
+ */
+RegExpMirror.prototype.source = function() {
+  return this.value_.source;
+};
+
+
+/**
+ * Returns whether this regular expression has the global (g) flag set.
+ * @return {boolean} Value of the global flag
+ */
+RegExpMirror.prototype.global = function() {
+  return this.value_.global;
+};
+
+
+/**
+ * Returns whether this regular expression has the ignore case (i) flag set.
+ * @return {boolean} Value of the ignore case flag
+ */
+RegExpMirror.prototype.ignoreCase = function() {
+  return this.value_.ignoreCase;
+};
+
+
+/**
+ * Returns whether this regular expression has the multiline (m) flag set.
+ * @return {boolean} Value of the multiline flag
+ */
+RegExpMirror.prototype.multiline = function() {
+  return this.value_.multiline;
+};
+
+
+RegExpMirror.prototype.toText = function() {
+  // Simpel to text which is used when on specialization in subclass.
+  return "/" + this.source() + "/";
+}
+
+
+/**
+ * Mirror object for error objects.
+ * @param {Error} value The error object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function ErrorMirror(value) {
+  ObjectMirror.call(this, value, ERROR_TYPE);
+}
+inherits(ErrorMirror, ObjectMirror);
+
+
+/**
+ * Returns the message for this eror object.
+ * @return {string or undefined} The message for this eror object
+ */
+ErrorMirror.prototype.message = function() {
+  return this.value_.message;
+};
+
+
+ErrorMirror.prototype.toText = function() {
+  // Use the same text representation as in messages.js.
+  var text;
+  try {
+    str = builtins.ToDetailString(this.value_);
+  } catch (e) {
+    str = '#<an Error>';
+  }
+  return str;
+}
+
+
+/**
+ * Base mirror object for properties.
+ * @param {ObjectMirror} mirror The mirror object having this property
+ * @param {string} name The name of the property
+ * @param {Array} details Details about the property
+ * @constructor
+ * @extends Mirror
+ */
+function PropertyMirror(mirror, name, details) {
+  Mirror.call(this, PROPERTY_TYPE);
+  this.mirror_ = mirror;
+  this.name_ = name;
+  this.value_ = details[0];
+  this.details_ = details[1];
+  if (details.length > 2) {
+    this.exception_ = details[2]
+    this.getter_ = details[3];
+    this.setter_ = details[4];
+  }
+}
+inherits(PropertyMirror, Mirror);
+
+
+PropertyMirror.prototype.isReadOnly = function() {
+  return (this.attributes() & PropertyAttribute.ReadOnly) != 0;
+}
+
+
+PropertyMirror.prototype.isEnum = function() {
+  return (this.attributes() & PropertyAttribute.DontEnum) == 0;
+}
+
+
+PropertyMirror.prototype.canDelete = function() {
+  return (this.attributes() & PropertyAttribute.DontDelete) == 0;
+}
+
+
+PropertyMirror.prototype.name = function() {
+  return this.name_;
+}
+
+
+PropertyMirror.prototype.isIndexed = function() {
+  for (var i = 0; i < this.name_.length; i++) {
+    if (this.name_[i] < '0' || '9' < this.name_[i]) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+PropertyMirror.prototype.value = function() {
+  return MakeMirror(this.value_, false);
+}
+
+
+/**
+ * Returns whether this property value is an exception.
+ * @return {booolean} True if this property value is an exception
+ */
+PropertyMirror.prototype.isException = function() {
+  return this.exception_ ? true : false;
+}
+
+
+PropertyMirror.prototype.attributes = function() {
+  return %DebugPropertyAttributesFromDetails(this.details_);
+}
+
+
+PropertyMirror.prototype.propertyType = function() {
+  return %DebugPropertyTypeFromDetails(this.details_);
+}
+
+
+PropertyMirror.prototype.insertionIndex = function() {
+  return %DebugPropertyIndexFromDetails(this.details_);
+}
+
+
+/**
+ * Returns whether this property has a getter defined through __defineGetter__.
+ * @return {booolean} True if this property has a getter
+ */
+PropertyMirror.prototype.hasGetter = function() {
+  return this.getter_ ? true : false;
+}
+
+
+/**
+ * Returns whether this property has a setter defined through __defineSetter__.
+ * @return {booolean} True if this property has a setter
+ */
+PropertyMirror.prototype.hasSetter = function() {
+  return this.setter_ ? true : false;
+}
+
+
+/**
+ * Returns the getter for this property defined through __defineGetter__.
+ * @return {Mirror} FunctionMirror reflecting the getter function or
+ *     UndefinedMirror if there is no getter for this property
+ */
+PropertyMirror.prototype.getter = function() {
+  if (this.hasGetter()) {
+    return MakeMirror(this.getter_);
+  } else {
+    return GetUndefinedMirror();
+  }
+}
+
+
+/**
+ * Returns the setter for this property defined through __defineSetter__.
+ * @return {Mirror} FunctionMirror reflecting the setter function or
+ *     UndefinedMirror if there is no setter for this property
+ */
+PropertyMirror.prototype.setter = function() {
+  if (this.hasSetter()) {
+    return MakeMirror(this.setter_);
+  } else {
+    return GetUndefinedMirror();
+  }
+}
+
+
+/**
+ * Returns whether this property is natively implemented by the host or a set
+ * through JavaScript code.
+ * @return {boolean} True if the property is 
+ *     UndefinedMirror if there is no setter for this property
+ */
+PropertyMirror.prototype.isNative = function() {
+  return (this.propertyType() == PropertyType.Interceptor) ||
+         ((this.propertyType() == PropertyType.Callbacks) &&
+          !this.hasGetter() && !this.hasSetter());
+}
+
+
+const kFrameDetailsFrameIdIndex = 0;
+const kFrameDetailsReceiverIndex = 1;
+const kFrameDetailsFunctionIndex = 2;
+const kFrameDetailsArgumentCountIndex = 3;
+const kFrameDetailsLocalCountIndex = 4;
+const kFrameDetailsSourcePositionIndex = 5;
+const kFrameDetailsConstructCallIndex = 6;
+const kFrameDetailsDebuggerFrameIndex = 7;
+const kFrameDetailsFirstDynamicIndex = 8;
+
+const kFrameDetailsNameIndex = 0;
+const kFrameDetailsValueIndex = 1;
+const kFrameDetailsNameValueSize = 2;
+
+/**
+ * Wrapper for the frame details information retreived from the VM. The frame
+ * details from the VM is an array with the following content. See runtime.cc
+ * Runtime_GetFrameDetails.
+ *     0: Id
+ *     1: Receiver
+ *     2: Function
+ *     3: Argument count
+ *     4: Local count
+ *     5: Source position
+ *     6: Construct call
+ *     Arguments name, value
+ *     Locals name, value
+ * @param {number} break_id Current break id
+ * @param {number} index Frame number
+ * @constructor
+ */
+function FrameDetails(break_id, index) {
+  this.break_id_ = break_id;
+  this.details_ = %GetFrameDetails(break_id, index);
+}
+
+
+FrameDetails.prototype.frameId = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsFrameIdIndex];
+}
+
+
+FrameDetails.prototype.receiver = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsReceiverIndex];
+}
+
+
+FrameDetails.prototype.func = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsFunctionIndex];
+}
+
+
+FrameDetails.prototype.isConstructCall = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsConstructCallIndex];
+}
+
+
+FrameDetails.prototype.isDebuggerFrame = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsDebuggerFrameIndex];
+}
+
+
+FrameDetails.prototype.argumentCount = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsArgumentCountIndex];
+}
+
+
+FrameDetails.prototype.argumentName = function(index) {
+  %CheckExecutionState(this.break_id_);
+  if (index >= 0 && index < this.argumentCount()) {
+    return this.details_[kFrameDetailsFirstDynamicIndex +
+                         index * kFrameDetailsNameValueSize +
+                         kFrameDetailsNameIndex]
+  }
+}
+
+
+FrameDetails.prototype.argumentValue = function(index) {
+  %CheckExecutionState(this.break_id_);
+  if (index >= 0 && index < this.argumentCount()) {
+    return this.details_[kFrameDetailsFirstDynamicIndex +
+                         index * kFrameDetailsNameValueSize +
+                         kFrameDetailsValueIndex]
+  }
+}
+
+
+FrameDetails.prototype.localCount = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsLocalCountIndex];
+}
+
+
+FrameDetails.prototype.sourcePosition = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsSourcePositionIndex];
+}
+
+
+FrameDetails.prototype.localName = function(index) {
+  %CheckExecutionState(this.break_id_);
+  if (index >= 0 && index < this.localCount()) {
+    var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize
+    return this.details_[locals_offset +
+                         index * kFrameDetailsNameValueSize +
+                         kFrameDetailsNameIndex]
+  }
+}
+
+
+FrameDetails.prototype.localValue = function(index) {
+  %CheckExecutionState(this.break_id_);
+  if (index >= 0 && index < this.localCount()) {
+    var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize
+    return this.details_[locals_offset +
+                         index * kFrameDetailsNameValueSize +
+                         kFrameDetailsValueIndex]
+  }
+}
+
+
+FrameDetails.prototype.scopeCount = function() {
+  return %GetScopeCount(this.break_id_, this.frameId());
+}
+
+
+/**
+ * Mirror object for stack frames.
+ * @param {number} break_id The break id in the VM for which this frame is
+       valid
+ * @param {number} index The frame index (top frame is index 0)
+ * @constructor
+ * @extends Mirror
+ */
+function FrameMirror(break_id, index) {
+  Mirror.call(this, FRAME_TYPE);
+  this.break_id_ = break_id;
+  this.index_ = index;
+  this.details_ = new FrameDetails(break_id, index);
+}
+inherits(FrameMirror, Mirror);
+
+
+FrameMirror.prototype.index = function() {
+  return this.index_;
+};
+
+
+FrameMirror.prototype.func = function() {
+  // Get the function for this frame from the VM.
+  var f = this.details_.func();
+  
+  // Create a function mirror. NOTE: MakeMirror cannot be used here as the
+  // value returned from the VM might be a string if the function for the
+  // frame is unresolved.
+  if (IS_FUNCTION(f)) {
+    return MakeMirror(f);
+  } else {
+    return new UnresolvedFunctionMirror(f);
+  }
+};
+
+
+FrameMirror.prototype.receiver = function() {
+  return MakeMirror(this.details_.receiver());
+};
+
+
+FrameMirror.prototype.isConstructCall = function() {
+  return this.details_.isConstructCall();
+};
+
+
+FrameMirror.prototype.isDebuggerFrame = function() {
+  return this.details_.isDebuggerFrame();
+};
+
+
+FrameMirror.prototype.argumentCount = function() {
+  return this.details_.argumentCount();
+};
+
+
+FrameMirror.prototype.argumentName = function(index) {
+  return this.details_.argumentName(index);
+};
+
+
+FrameMirror.prototype.argumentValue = function(index) {
+  return MakeMirror(this.details_.argumentValue(index));
+};
+
+
+FrameMirror.prototype.localCount = function() {
+  return this.details_.localCount();
+};
+
+
+FrameMirror.prototype.localName = function(index) {
+  return this.details_.localName(index);
+};
+
+
+FrameMirror.prototype.localValue = function(index) {
+  return MakeMirror(this.details_.localValue(index));
+};
+
+
+FrameMirror.prototype.sourcePosition = function() {
+  return this.details_.sourcePosition();
+};
+
+
+FrameMirror.prototype.sourceLocation = function() {
+  if (this.func().resolved() && this.func().script()) {
+    return this.func().script().locationFromPosition(this.sourcePosition(),
+                                                     true);
+  }
+};
+
+
+FrameMirror.prototype.sourceLine = function() {
+  if (this.func().resolved()) {
+    var location = this.sourceLocation();
+    if (location) {
+      return location.line;
+    }
+  }
+};
+
+
+FrameMirror.prototype.sourceColumn = function() {
+  if (this.func().resolved()) {
+    var location = this.sourceLocation();
+    if (location) {
+      return location.column;
+    }
+  }
+};
+
+
+FrameMirror.prototype.sourceLineText = function() {
+  if (this.func().resolved()) {
+    var location = this.sourceLocation();
+    if (location) {
+      return location.sourceText();
+    }
+  }
+};
+
+
+FrameMirror.prototype.scopeCount = function() {
+  return this.details_.scopeCount();
+};
+
+
+FrameMirror.prototype.scope = function(index) {
+  return new ScopeMirror(this, index);
+};
+
+
+FrameMirror.prototype.evaluate = function(source, disable_break) {
+  var result = %DebugEvaluate(this.break_id_, this.details_.frameId(),
+                              source, Boolean(disable_break));
+  return MakeMirror(result);
+};
+
+
+FrameMirror.prototype.invocationText = function() {
+  // Format frame invoaction (receiver, function and arguments).
+  var result = '';
+  var func = this.func();
+  var receiver = this.receiver();
+  if (this.isConstructCall()) {
+    // For constructor frames display new followed by the function name.
+    result += 'new ';
+    result += func.name() ? func.name() : '[anonymous]';
+  } else if (this.isDebuggerFrame()) {
+    result += '[debugger]';
+  } else {
+    // If the receiver has a className which is 'global' don't display it.
+    var display_receiver = !receiver.className || receiver.className() != 'global';
+    if (display_receiver) {
+      result += receiver.toText();
+    }
+    // Try to find the function as a property in the receiver. Include the
+    // prototype chain in the lookup.
+    var property = GetUndefinedMirror();
+    if (!receiver.isUndefined()) {
+      for (var r = receiver; !r.isNull() && property.isUndefined(); r = r.protoObject()) {
+        property = r.lookupProperty(func);
+      }
+    }
+    if (!property.isUndefined()) {
+      // The function invoked was found on the receiver. Use the property name
+      // for the backtrace.
+      if (!property.isIndexed()) {
+        if (display_receiver) {
+          result += '.';
+        }
+        result += property.name();
+      } else {
+        result += '[';
+        result += property.name();
+        result += ']';
+      }
+      // Also known as - if the name in the function doesn't match the name
+      // under which it was looked up.
+      if (func.name() && func.name() != property.name()) {
+        result += '(aka ' + func.name() + ')';
+      }
+    } else {
+      // The function invoked was not found on the receiver. Use the function
+      // name if available for the backtrace.
+      if (display_receiver) {
+        result += '.';
+      }
+      result += func.name() ? func.name() : '[anonymous]';
+    }
+  }
+
+  // Render arguments for normal frames.
+  if (!this.isDebuggerFrame()) {
+    result += '(';
+    for (var i = 0; i < this.argumentCount(); i++) {
+      if (i != 0) result += ', ';
+      if (this.argumentName(i)) {
+        result += this.argumentName(i);
+        result += '=';
+      }
+      result += this.argumentValue(i).toText();
+    }
+    result += ')';
+  }
+
+  return result;
+}
+
+
+FrameMirror.prototype.sourceAndPositionText = function() {
+  // Format source and position.
+  var result = '';
+  var func = this.func();
+  if (func.resolved()) {
+    if (func.script()) {
+      if (func.script().name()) {
+        result += func.script().name();
+      } else {
+        result += '[unnamed]';
+      }
+      if (!this.isDebuggerFrame()) {
+        var location = this.sourceLocation();
+        result += ' line ';
+        result += !IS_UNDEFINED(location) ? (location.line + 1) : '?';
+        result += ' column ';
+        result += !IS_UNDEFINED(location) ? (location.column + 1) : '?';
+        if (!IS_UNDEFINED(this.sourcePosition())) {
+          result += ' (position ' + (this.sourcePosition() + 1) + ')';
+        }
+      }
+    } else {
+      result += '[no source]';
+    }
+  } else {
+    result += '[unresolved]';
+  }
+
+  return result;
+}
+
+
+FrameMirror.prototype.localsText = function() {
+  // Format local variables.
+  var result = '';
+  var locals_count = this.localCount()
+  if (locals_count > 0) {
+    for (var i = 0; i < locals_count; ++i) {
+      result += '      var ';
+      result += this.localName(i);
+      result += ' = ';
+      result += this.localValue(i).toText();
+      if (i < locals_count - 1) result += '\n';
+    }
+  }
+
+  return result;
+}
+
+
+FrameMirror.prototype.toText = function(opt_locals) {
+  var result = '';
+  result += '#' + (this.index() <= 9 ? '0' : '') + this.index();
+  result += ' ';
+  result += this.invocationText();
+  result += ' ';
+  result += this.sourceAndPositionText();
+  if (opt_locals) {
+    result += '\n';
+    result += this.localsText();
+  }
+  return result;
+}
+
+
+const kScopeDetailsTypeIndex = 0;
+const kScopeDetailsObjectIndex = 1;
+
+function ScopeDetails(frame, index) {
+  this.break_id_ = frame.break_id_;
+  this.details_ = %GetScopeDetails(frame.break_id_,
+                                   frame.details_.frameId(),
+                                   index);
+}
+
+
+ScopeDetails.prototype.type = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kScopeDetailsTypeIndex];
+}
+
+
+ScopeDetails.prototype.object = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kScopeDetailsObjectIndex];
+}
+
+
+/**
+ * Mirror object for scope.
+ * @param {FrameMirror} frame The frame this scope is a part of
+ * @param {number} index The scope index in the frame
+ * @constructor
+ * @extends Mirror
+ */
+function ScopeMirror(frame, index) {
+  Mirror.call(this, SCOPE_TYPE);
+  this.frame_index_ = frame.index_;
+  this.scope_index_ = index;
+  this.details_ = new ScopeDetails(frame, index);
+}
+inherits(ScopeMirror, Mirror);
+
+
+ScopeMirror.prototype.frameIndex = function() {
+  return this.frame_index_;
+};
+
+
+ScopeMirror.prototype.scopeIndex = function() {
+  return this.scope_index_;
+};
+
+
+ScopeMirror.prototype.scopeType = function() {
+  return this.details_.type();
+};
+
+
+ScopeMirror.prototype.scopeObject = function() {
+  // For local and closure scopes create a transient mirror as these objects are
+  // created on the fly materializing the local or closure scopes and
+  // therefore will not preserve identity.
+  var transient = this.scopeType() == ScopeType.Local ||
+                  this.scopeType() == ScopeType.Closure;
+  return MakeMirror(this.details_.object(), transient);
+};
+
+
+/**
+ * Mirror object for script source.
+ * @param {Script} script The script object
+ * @constructor
+ * @extends Mirror
+ */
+function ScriptMirror(script) {
+  Mirror.call(this, SCRIPT_TYPE);
+  this.script_ = script;
+  this.context_ = new ContextMirror(script.context_data);
+  this.allocateHandle_();
+}
+inherits(ScriptMirror, Mirror);
+
+
+ScriptMirror.prototype.value = function() {
+  return this.script_;
+};
+
+
+ScriptMirror.prototype.name = function() {
+  return this.script_.name;
+};
+
+
+ScriptMirror.prototype.id = function() {
+  return this.script_.id;
+};
+
+
+ScriptMirror.prototype.source = function() {
+  return this.script_.source;
+};
+
+
+ScriptMirror.prototype.lineOffset = function() {
+  return this.script_.line_offset;
+};
+
+
+ScriptMirror.prototype.columnOffset = function() {
+  return this.script_.column_offset;
+};
+
+
+ScriptMirror.prototype.data = function() {
+  return this.script_.data;
+};
+
+
+ScriptMirror.prototype.scriptType = function() {
+  return this.script_.type;
+};
+
+
+ScriptMirror.prototype.compilationType = function() {
+  return this.script_.compilation_type;
+};
+
+
+ScriptMirror.prototype.lineCount = function() {
+  return this.script_.lineCount();
+};
+
+
+ScriptMirror.prototype.locationFromPosition = function(
+    position, include_resource_offset) {
+  return this.script_.locationFromPosition(position, include_resource_offset);
+}
+
+
+ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
+  return this.script_.sourceSlice(opt_from_line, opt_to_line);
+}
+
+
+ScriptMirror.prototype.context = function() {
+  return this.context_;
+};
+
+
+ScriptMirror.prototype.evalFromFunction = function() {
+  return MakeMirror(this.script_.eval_from_function);
+};
+
+
+ScriptMirror.prototype.evalFromLocation = function() {
+  var eval_from_function = this.evalFromFunction();
+  if (!eval_from_function.isUndefined()) {
+    var position = this.script_.eval_from_position;
+    return eval_from_function.script().locationFromPosition(position, true);
+  }
+};
+
+
+ScriptMirror.prototype.toText = function() {
+  var result = '';
+  result += this.name();
+  result += ' (lines: ';
+  if (this.lineOffset() > 0) {
+    result += this.lineOffset();
+    result += '-';
+    result += this.lineOffset() + this.lineCount() - 1;
+  } else {
+    result += this.lineCount();
+  }
+  result += ')';
+  return result;
+}
+
+
+/**
+ * Mirror object for context.
+ * @param {Object} data The context data
+ * @constructor
+ * @extends Mirror
+ */
+function ContextMirror(data) {
+  Mirror.call(this, CONTEXT_TYPE);
+  this.data_ = data;
+  this.allocateHandle_();
+}
+inherits(ContextMirror, Mirror);
+
+
+ContextMirror.prototype.data = function() {
+  return this.data_;
+};
+
+
+/**
+ * Returns a mirror serializer
+ *
+ * @param {boolean} details Set to true to include details
+ * @param {Object} options Options comtrolling the serialization
+ *     The following options can be set:
+ *       includeSource: include ths full source of scripts
+ * @returns {MirrorSerializer} mirror serializer
+ */
+function MakeMirrorSerializer(details, options) {
+  return new JSONProtocolSerializer(details, options);
+}
+
+
+/**
+ * Object for serializing a mirror objects and its direct references.
+ * @param {boolean} details Indicates whether to include details for the mirror
+ *     serialized
+ * @constructor
+ */
+function JSONProtocolSerializer(details, options) {
+  this.details_ = details;
+  this.options_ = options;
+  this.mirrors_ = [ ];
+}
+
+
+/**
+ * Returns a serialization of an object reference. The referenced object are
+ * added to the serialization state.
+ *
+ * @param {Mirror} mirror The mirror to serialize
+ * @returns {String} JSON serialization
+ */
+JSONProtocolSerializer.prototype.serializeReference = function(mirror) {
+  return this.serialize_(mirror, true, true);
+}
+
+
+/**
+ * Returns a serialization of an object value. The referenced objects are
+ * added to the serialization state.
+ *
+ * @param {Mirror} mirror The mirror to serialize
+ * @returns {String} JSON serialization
+ */
+JSONProtocolSerializer.prototype.serializeValue = function(mirror) {
+  var json = this.serialize_(mirror, false, true);
+  return json;
+}
+
+
+/**
+ * Returns a serialization of all the objects referenced.
+ *
+ * @param {Mirror} mirror The mirror to serialize.
+ * @returns {Array.<Object>} Array of the referenced objects converted to
+ *     protcol objects.
+ */
+JSONProtocolSerializer.prototype.serializeReferencedObjects = function() {
+  // Collect the protocol representation of the referenced objects in an array.
+  var content = [];
+  
+  // Get the number of referenced objects.
+  var count = this.mirrors_.length;
+  
+  for (var i = 0; i < count; i++) {
+    content.push(this.serialize_(this.mirrors_[i], false, false));
+  }
+
+  return content;
+}
+
+
+JSONProtocolSerializer.prototype.includeSource_ = function() {
+  return this.options_ && this.options_.includeSource;
+}
+
+
+JSONProtocolSerializer.prototype.inlineRefs_ = function() {
+  return this.options_ && this.options_.inlineRefs;
+}
+
+
+JSONProtocolSerializer.prototype.add_ = function(mirror) {
+  // If this mirror is already in the list just return.
+  for (var i = 0; i < this.mirrors_.length; i++) {
+    if (this.mirrors_[i] === mirror) {
+      return;
+    }
+  }
+  
+  // Add the mirror to the list of mirrors to be serialized.
+  this.mirrors_.push(mirror);
+}
+
+
+/**
+ * Formats mirror object to protocol reference object with some data that can
+ * be used to display the value in debugger.
+ * @param {Mirror} mirror Mirror to serialize.
+ * @return {Object} Protocol reference object.
+ */
+JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ = 
+    function(mirror) {
+  var o = {};
+  o.ref = mirror.handle();
+  o.type = mirror.type();
+  switch (mirror.type()) {
+    case UNDEFINED_TYPE:
+    case NULL_TYPE:
+    case BOOLEAN_TYPE:
+    case NUMBER_TYPE:
+      o.value = mirror.value();
+      break;
+    case STRING_TYPE:
+      // Limit string length.
+      o.value = mirror.toText();
+      break;
+    case FUNCTION_TYPE:
+      o.name = mirror.name();
+      o.inferredName = mirror.inferredName();
+      if (mirror.script()) {
+        o.scriptId = mirror.script().id();
+      }
+      break;
+    case ERROR_TYPE:
+    case REGEXP_TYPE:
+      o.value = mirror.toText();
+      break;
+    case OBJECT_TYPE:
+      o.className = mirror.className();
+      break;
+  }
+  return o;
+};
+
+
+JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
+                                                       details) {
+  // If serializing a reference to a mirror just return the reference and add
+  // the mirror to the referenced mirrors.
+  if (reference &&
+      (mirror.isValue() || mirror.isScript() || mirror.isContext())) {
+    if (this.inlineRefs_() && mirror.isValue()) {
+      return this.serializeReferenceWithDisplayData_(mirror);
+    } else {
+      this.add_(mirror);
+      return {'ref' : mirror.handle()};
+    }
+  }
+  
+  // Collect the JSON property/value pairs.
+  var content = {};
+
+  // Add the mirror handle.
+  if (mirror.isValue() || mirror.isScript() || mirror.isContext()) {
+    content.handle = mirror.handle();
+  }
+
+  // Always add the type.
+  content.type = mirror.type();
+
+  switch (mirror.type()) {
+    case UNDEFINED_TYPE:
+    case NULL_TYPE:
+      // Undefined and null are represented just by their type.
+      break;
+
+    case BOOLEAN_TYPE:
+      // Boolean values are simply represented by their value.
+      content.value = mirror.value();
+      break;
+
+    case NUMBER_TYPE:
+      // Number values are simply represented by their value.
+      content.value = NumberToJSON_(mirror.value());
+      break;
+
+    case STRING_TYPE:
+      // String values might have their value cropped to keep down size.
+      if (mirror.length() > kMaxProtocolStringLength) {
+        var str = mirror.value().substring(0, kMaxProtocolStringLength);
+        content.value = str;
+        content.fromIndex = 0;
+        content.toIndex = kMaxProtocolStringLength;
+      } else {
+        content.value = mirror.value();
+      }
+      content.length = mirror.length();
+      break;
+
+    case OBJECT_TYPE:
+    case FUNCTION_TYPE:
+    case ERROR_TYPE:
+    case REGEXP_TYPE:
+      // Add object representation.
+      this.serializeObject_(mirror, content, details);
+      break;
+
+    case PROPERTY_TYPE:
+      throw new Error('PropertyMirror cannot be serialized independeltly')
+      break;
+
+    case FRAME_TYPE:
+      // Add object representation.
+      this.serializeFrame_(mirror, content);
+      break;
+
+    case SCOPE_TYPE:
+      // Add object representation.
+      this.serializeScope_(mirror, content);
+      break;
+
+    case SCRIPT_TYPE:
+      // Script is represented by id, name and source attributes.
+      if (mirror.name()) {
+        content.name = mirror.name();
+      }
+      content.id = mirror.id();
+      content.lineOffset = mirror.lineOffset();
+      content.columnOffset = mirror.columnOffset();
+      content.lineCount = mirror.lineCount();
+      if (mirror.data()) {
+        content.data = mirror.data();
+      }
+      if (this.includeSource_()) {
+        content.source = mirror.source();
+      } else {
+        var sourceStart = mirror.source().substring(0, 80);
+        content.sourceStart = sourceStart;
+      }
+      content.sourceLength = mirror.source().length;
+      content.scriptType = mirror.scriptType();
+      content.compilationType = mirror.compilationType();
+      // For compilation type eval emit information on the script from which
+      // eval was called if a script is present.
+      if (mirror.compilationType() == 1 &&
+          mirror.evalFromFunction().script()) {
+        content.evalFromScript =
+            this.serializeReference(mirror.evalFromFunction().script());
+        var evalFromLocation = mirror.evalFromLocation()
+        content.evalFromLocation = { line: evalFromLocation.line,
+                                     column: evalFromLocation.column}
+      }
+      if (mirror.context()) {
+        content.context = this.serializeReference(mirror.context());
+      }
+      break;
+
+    case CONTEXT_TYPE:
+      content.data = mirror.data();
+      break;
+  }
+
+  // Always add the text representation.
+  content.text = mirror.toText();
+  
+  // Create and return the JSON string.
+  return content;
+}
+
+
+/**
+ * Serialize object information to the following JSON format.
+ *
+ *   {"className":"<class name>",
+ *    "constructorFunction":{"ref":<number>},
+ *    "protoObject":{"ref":<number>},
+ *    "prototypeObject":{"ref":<number>},
+ *    "namedInterceptor":<boolean>,
+ *    "indexedInterceptor":<boolean>,
+ *    "properties":[<properties>]}
+ */
+JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
+                                                             details) {
+  // Add general object properties.
+  content.className = mirror.className();
+  content.constructorFunction =
+      this.serializeReference(mirror.constructorFunction());
+  content.protoObject = this.serializeReference(mirror.protoObject());
+  content.prototypeObject = this.serializeReference(mirror.prototypeObject());
+
+  // Add flags to indicate whether there are interceptors.
+  if (mirror.hasNamedInterceptor()) {
+    content.namedInterceptor = true;
+  }
+  if (mirror.hasIndexedInterceptor()) {
+    content.indexedInterceptor = true;
+  }
+  
+  // Add function specific properties.
+  if (mirror.isFunction()) {
+    // Add function specific properties.
+    content.name = mirror.name();
+    if (!IS_UNDEFINED(mirror.inferredName())) {
+      content.inferredName = mirror.inferredName();
+    }
+    content.resolved = mirror.resolved();
+    if (mirror.resolved()) {
+      content.source = mirror.source();
+    }
+    if (mirror.script()) {
+      content.script = this.serializeReference(mirror.script());
+    }
+  }
+
+  // Add date specific properties.
+  if (mirror.isDate()) {
+    // Add date specific properties.
+    content.value = mirror.value();
+  }
+
+  // Add actual properties - named properties followed by indexed properties.
+  var propertyNames = mirror.propertyNames(PropertyKind.Named);
+  var propertyIndexes = mirror.propertyNames(PropertyKind.Indexed);
+  var p = new Array(propertyNames.length + propertyIndexes.length);
+  for (var i = 0; i < propertyNames.length; i++) {
+    var propertyMirror = mirror.property(propertyNames[i]);
+    p[i] = this.serializeProperty_(propertyMirror);
+    if (details) {
+      this.add_(propertyMirror.value());
+    }
+  }
+  for (var i = 0; i < propertyIndexes.length; i++) {
+    var propertyMirror = mirror.property(propertyIndexes[i]);
+    p[propertyNames.length + i] = this.serializeProperty_(propertyMirror);
+    if (details) {
+      this.add_(propertyMirror.value());
+    }
+  }
+  content.properties = p;
+}
+
+
+/**
+ * Serialize property information to the following JSON format for building the
+ * array of properties.
+ *
+ *   {"name":"<property name>",
+ *    "attributes":<number>,
+ *    "propertyType":<number>,
+ *    "ref":<number>}
+ *
+ * If the attribute for the property is PropertyAttribute.None it is not added.
+ * If the propertyType for the property is PropertyType.Normal it is not added.
+ * Here are a couple of examples.
+ *
+ *   {"name":"hello","ref":1}
+ *   {"name":"length","attributes":7,"propertyType":3,"ref":2}
+ *
+ * @param {PropertyMirror} propertyMirror The property to serialize.
+ * @returns {Object} Protocol object representing the property.
+ */
+JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
+  var result = {};
+  
+  result.name = propertyMirror.name();
+  var propertyValue = propertyMirror.value();
+  if (this.inlineRefs_() && propertyValue.isValue()) {
+    result.value = this.serializeReferenceWithDisplayData_(propertyValue);
+  } else {
+    if (propertyMirror.attributes() != PropertyAttribute.None) {
+      result.attributes = propertyMirror.attributes();
+    }
+    if (propertyMirror.propertyType() != PropertyType.Normal) {
+      result.propertyType = propertyMirror.propertyType();
+    }
+    result.ref = propertyValue.handle();
+  }
+  return result;
+}
+
+
+JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
+  content.index = mirror.index();
+  content.receiver = this.serializeReference(mirror.receiver());
+  var func = mirror.func();
+  content.func = this.serializeReference(func);
+  if (func.script()) {
+    content.script = this.serializeReference(func.script());
+  }
+  content.constructCall = mirror.isConstructCall();
+  content.debuggerFrame = mirror.isDebuggerFrame();
+  var x = new Array(mirror.argumentCount());
+  for (var i = 0; i < mirror.argumentCount(); i++) {
+    var arg = {};
+    var argument_name = mirror.argumentName(i)
+    if (argument_name) {
+      arg.name = argument_name;
+    }
+    arg.value = this.serializeReference(mirror.argumentValue(i));
+    x[i] = arg;
+  }
+  content.arguments = x;
+  var x = new Array(mirror.localCount());
+  for (var i = 0; i < mirror.localCount(); i++) {
+    var local = {};
+    local.name = mirror.localName(i);
+    local.value = this.serializeReference(mirror.localValue(i));
+    x[i] = local;
+  }
+  content.locals = x;
+  content.position = mirror.sourcePosition();
+  var line = mirror.sourceLine();
+  if (!IS_UNDEFINED(line)) {
+    content.line = line;
+  }
+  var column = mirror.sourceColumn();
+  if (!IS_UNDEFINED(column)) {
+    content.column = column;
+  }
+  var source_line_text = mirror.sourceLineText();
+  if (!IS_UNDEFINED(source_line_text)) {
+    content.sourceLineText = source_line_text;
+  }
+  
+  content.scopes = [];
+  for (var i = 0; i < mirror.scopeCount(); i++) {
+    var scope = mirror.scope(i);
+    content.scopes.push({
+      type: scope.scopeType(),
+      index: i
+    });
+  }
+}
+
+
+JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
+  content.index = mirror.scopeIndex();
+  content.frameIndex = mirror.frameIndex();
+  content.type = mirror.scopeType();
+  content.object = this.inlineRefs_() ?
+                   this.serializeValue(mirror.scopeObject()) :
+                   this.serializeReference(mirror.scopeObject());
+}
+
+
+/**
+ * Convert a number to a protocol value. For all finite numbers the number
+ * itself is returned. For non finite numbers NaN, Infinite and
+ * -Infinite the string representation "NaN", "Infinite" or "-Infinite"
+ * (not including the quotes) is returned.
+ *
+ * @param {number} value The number value to convert to a protocol value.
+ * @returns {number|string} Protocol value.
+ */
+function NumberToJSON_(value) {
+  if (isNaN(value)) {
+    return 'NaN';
+  }
+  if (!isFinite(value)) {
+    if (value > 0) {
+      return 'Infinity';
+    } else {
+      return '-Infinity';
+    }
+  }
+  return value; 
+}
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
new file mode 100644
index 0000000..80789eb
--- /dev/null
+++ b/src/mksnapshot.cc
@@ -0,0 +1,186 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <signal.h>
+#include <string>
+#include <map>
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "natives.h"
+#include "platform.h"
+#include "serialize.h"
+
+// use explicit namespace to avoid clashing with types in namespace v8
+namespace i = v8::internal;
+using namespace v8;
+
+static const unsigned int kMaxCounters = 256;
+
+// A single counter in a counter collection.
+class Counter {
+ public:
+  static const int kMaxNameSize = 64;
+  int32_t* Bind(const char* name) {
+    int i;
+    for (i = 0; i < kMaxNameSize - 1 && name[i]; i++) {
+      name_[i] = name[i];
+    }
+    name_[i] = '\0';
+    return &counter_;
+  }
+ private:
+  int32_t counter_;
+  uint8_t name_[kMaxNameSize];
+};
+
+
+// A set of counters and associated information.  An instance of this
+// class is stored directly in the memory-mapped counters file if
+// the --save-counters options is used
+class CounterCollection {
+ public:
+  CounterCollection() {
+    magic_number_ = 0xDEADFACE;
+    max_counters_ = kMaxCounters;
+    max_name_size_ = Counter::kMaxNameSize;
+    counters_in_use_ = 0;
+  }
+  Counter* GetNextCounter() {
+    if (counters_in_use_ == kMaxCounters) return NULL;
+    return &counters_[counters_in_use_++];
+  }
+ private:
+  uint32_t magic_number_;
+  uint32_t max_counters_;
+  uint32_t max_name_size_;
+  uint32_t counters_in_use_;
+  Counter counters_[kMaxCounters];
+};
+
+
+// We statically allocate a set of local counters to be used if we
+// don't want to store the stats in a memory-mapped file
+static CounterCollection local_counters;
+static CounterCollection* counters = &local_counters;
+
+
+typedef std::map<std::string, int*> CounterMap;
+typedef std::map<std::string, int*>::iterator CounterMapIterator;
+static CounterMap counter_table_;
+
+// Callback receiver when v8 has a counter to track.
+static int* counter_callback(const char* name) {
+  std::string counter = name;
+  // See if this counter name is already known.
+  if (counter_table_.find(counter) != counter_table_.end())
+    return counter_table_[counter];
+
+  Counter* ctr = counters->GetNextCounter();
+  if (ctr == NULL) return NULL;
+  int* ptr = ctr->Bind(name);
+  counter_table_[counter] = ptr;
+  return ptr;
+}
+
+
+// Write C++ code that defines Snapshot::snapshot_ to contain the snapshot
+// to the file given by filename. Only the first size chars are written.
+static int WriteInternalSnapshotToFile(const char* filename,
+                                       const v8::internal::byte* bytes,
+                                       int size) {
+  FILE* f = i::OS::FOpen(filename, "wb");
+  if (f == NULL) {
+    i::OS::PrintError("Cannot open file %s for reading.\n", filename);
+    return 0;
+  }
+  fprintf(f, "// Autogenerated snapshot file. Do not edit.\n\n");
+  fprintf(f, "#include \"v8.h\"\n");
+  fprintf(f, "#include \"platform.h\"\n\n");
+  fprintf(f, "#include \"snapshot.h\"\n\n");
+  fprintf(f, "namespace v8 {\nnamespace internal {\n\n");
+  fprintf(f, "const byte Snapshot::data_[] = {");
+  int written = 0;
+  written += fprintf(f, "0x%x", bytes[0]);
+  for (int i = 1; i < size; ++i) {
+    written += fprintf(f, ",0x%x", bytes[i]);
+    // The following is needed to keep the line length low on Visual C++:
+    if (i % 512 == 0) fprintf(f, "\n");
+  }
+  fprintf(f, "};\n\n");
+  fprintf(f, "int Snapshot::size_ = %d;\n\n", size);
+  fprintf(f, "} }  // namespace v8::internal\n");
+  fclose(f);
+  return written;
+}
+
+
+int main(int argc, char** argv) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // By default, log code create information in the snapshot.
+  i::FLAG_log_code = true;
+#endif
+  // Print the usage if an error occurs when parsing the command line
+  // flags or if the help flag is set.
+  int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+  if (result > 0 || argc != 2 || i::FLAG_help) {
+    ::printf("Usage: %s [flag] ... outfile\n", argv[0]);
+    i::FlagList::PrintHelp();
+    return !i::FLAG_help;
+  }
+
+  v8::V8::SetCounterFunction(counter_callback);
+  v8::HandleScope scope;
+
+  const int kExtensionCount = 1;
+  const char* extension_list[kExtensionCount] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(kExtensionCount, extension_list);
+
+  i::Serializer::Enable();
+  v8::Context::New(&extensions);
+
+  // Make sure all builtin scripts are cached.
+  { HandleScope scope;
+    for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
+      i::Bootstrapper::NativesSourceLookup(i);
+    }
+  }
+  // Get rid of unreferenced scripts with a global GC.
+  i::Heap::CollectAllGarbage(false);
+  i::Serializer ser;
+  ser.Serialize();
+  v8::internal::byte* bytes;
+  int len;
+  ser.Finalize(&bytes, &len);
+
+  WriteInternalSnapshotToFile(argv[1], bytes, len);
+
+  i::DeleteArray(bytes);
+
+  return 0;
+}
diff --git a/src/natives.h b/src/natives.h
new file mode 100644
index 0000000..fdfd213
--- /dev/null
+++ b/src/natives.h
@@ -0,0 +1,63 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_NATIVES_H_
+#define V8_NATIVES_H_
+
+namespace v8 {
+namespace internal {
+
+typedef bool (*NativeSourceCallback)(Vector<const char> name,
+                                     Vector<const char> source,
+                                     int index);
+
+enum NativeType {
+  CORE, D8
+};
+
+template <NativeType type>
+class NativesCollection {
+ public:
+  // Number of built-in scripts.
+  static int GetBuiltinsCount();
+  // Number of delayed/lazy loading scripts.
+  static int GetDelayCount();
+
+  // These are used to access built-in scripts.
+  // The delayed script has an index in the interval [0, GetDelayCount()).
+  // The non-delayed script has an index in the interval
+  // [GetDelayCount(), GetNativesCount()).
+  static int GetIndex(const char* name);
+  static Vector<const char> GetScriptSource(int index);
+  static Vector<const char> GetScriptName(int index);
+};
+
+typedef NativesCollection<CORE> Natives;
+
+} }  // namespace v8::internal
+
+#endif  // V8_NATIVES_H_
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
new file mode 100644
index 0000000..288cc21
--- /dev/null
+++ b/src/objects-debug.cc
@@ -0,0 +1,1167 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "disassembler.h"
+#include "disasm.h"
+#include "macro-assembler.h"
+#include "jsregexp.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef DEBUG
+
+static const char* TypeToString(InstanceType type);
+
+
+void Object::Print() {
+  if (IsSmi()) {
+    Smi::cast(this)->SmiPrint();
+  } else if (IsFailure()) {
+    Failure::cast(this)->FailurePrint();
+  } else {
+    HeapObject::cast(this)->HeapObjectPrint();
+  }
+  Flush();
+}
+
+
+void Object::PrintLn() {
+  Print();
+  PrintF("\n");
+}
+
+
+void Object::Verify() {
+  if (IsSmi()) {
+    Smi::cast(this)->SmiVerify();
+  } else if (IsFailure()) {
+    Failure::cast(this)->FailureVerify();
+  } else {
+    HeapObject::cast(this)->HeapObjectVerify();
+  }
+}
+
+
+void Object::VerifyPointer(Object* p) {
+  if (p->IsHeapObject()) {
+    HeapObject::VerifyHeapPointer(p);
+  } else {
+    ASSERT(p->IsSmi());
+  }
+}
+
+
+void Smi::SmiVerify() {
+  ASSERT(IsSmi());
+}
+
+
+void Failure::FailureVerify() {
+  ASSERT(IsFailure());
+}
+
+
+void HeapObject::PrintHeader(const char* id) {
+  PrintF("%p: [%s]\n", this, id);
+}
+
+
+void HeapObject::HeapObjectPrint() {
+  InstanceType instance_type = map()->instance_type();
+
+  HandleScope scope;
+  if (instance_type < FIRST_NONSTRING_TYPE) {
+    String::cast(this)->StringPrint();
+    return;
+  }
+
+  switch (instance_type) {
+    case MAP_TYPE:
+      Map::cast(this)->MapPrint();
+      break;
+    case HEAP_NUMBER_TYPE:
+      HeapNumber::cast(this)->HeapNumberPrint();
+      break;
+    case FIXED_ARRAY_TYPE:
+      FixedArray::cast(this)->FixedArrayPrint();
+      break;
+    case BYTE_ARRAY_TYPE:
+      ByteArray::cast(this)->ByteArrayPrint();
+      break;
+    case PIXEL_ARRAY_TYPE:
+      PixelArray::cast(this)->PixelArrayPrint();
+      break;
+    case FILLER_TYPE:
+      PrintF("filler");
+      break;
+    case JS_OBJECT_TYPE:  // fall through
+    case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+    case JS_ARRAY_TYPE:
+    case JS_REGEXP_TYPE:
+      JSObject::cast(this)->JSObjectPrint();
+      break;
+    case ODDBALL_TYPE:
+      Oddball::cast(this)->to_string()->Print();
+      break;
+    case JS_FUNCTION_TYPE:
+      JSFunction::cast(this)->JSFunctionPrint();
+      break;
+    case JS_GLOBAL_PROXY_TYPE:
+      JSGlobalProxy::cast(this)->JSGlobalProxyPrint();
+      break;
+    case JS_GLOBAL_OBJECT_TYPE:
+      JSGlobalObject::cast(this)->JSGlobalObjectPrint();
+      break;
+    case JS_BUILTINS_OBJECT_TYPE:
+      JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint();
+      break;
+    case JS_VALUE_TYPE:
+      PrintF("Value wrapper around:");
+      JSValue::cast(this)->value()->Print();
+      break;
+    case CODE_TYPE:
+      Code::cast(this)->CodePrint();
+      break;
+    case PROXY_TYPE:
+      Proxy::cast(this)->ProxyPrint();
+      break;
+    case SHARED_FUNCTION_INFO_TYPE:
+      SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint();
+      break;
+    case JS_GLOBAL_PROPERTY_CELL_TYPE:
+      JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellPrint();
+      break;
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+  case NAME##_TYPE:                        \
+    Name::cast(this)->Name##Print();       \
+    break;
+  STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+
+    default:
+      PrintF("UNKNOWN TYPE %d", map()->instance_type());
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void HeapObject::HeapObjectVerify() {
+  InstanceType instance_type = map()->instance_type();
+
+  if (instance_type < FIRST_NONSTRING_TYPE) {
+    String::cast(this)->StringVerify();
+    return;
+  }
+
+  switch (instance_type) {
+    case MAP_TYPE:
+      Map::cast(this)->MapVerify();
+      break;
+    case HEAP_NUMBER_TYPE:
+      HeapNumber::cast(this)->HeapNumberVerify();
+      break;
+    case FIXED_ARRAY_TYPE:
+      FixedArray::cast(this)->FixedArrayVerify();
+      break;
+    case BYTE_ARRAY_TYPE:
+      ByteArray::cast(this)->ByteArrayVerify();
+      break;
+    case PIXEL_ARRAY_TYPE:
+      PixelArray::cast(this)->PixelArrayVerify();
+      break;
+    case CODE_TYPE:
+      Code::cast(this)->CodeVerify();
+      break;
+    case ODDBALL_TYPE:
+      Oddball::cast(this)->OddballVerify();
+      break;
+    case JS_OBJECT_TYPE:
+    case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+      JSObject::cast(this)->JSObjectVerify();
+      break;
+    case JS_VALUE_TYPE:
+      JSValue::cast(this)->JSValueVerify();
+      break;
+    case JS_FUNCTION_TYPE:
+      JSFunction::cast(this)->JSFunctionVerify();
+      break;
+    case JS_GLOBAL_PROXY_TYPE:
+      JSGlobalProxy::cast(this)->JSGlobalProxyVerify();
+      break;
+    case JS_GLOBAL_OBJECT_TYPE:
+      JSGlobalObject::cast(this)->JSGlobalObjectVerify();
+      break;
+    case JS_BUILTINS_OBJECT_TYPE:
+      JSBuiltinsObject::cast(this)->JSBuiltinsObjectVerify();
+      break;
+    case JS_GLOBAL_PROPERTY_CELL_TYPE:
+      JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellVerify();
+      break;
+    case JS_ARRAY_TYPE:
+      JSArray::cast(this)->JSArrayVerify();
+      break;
+    case JS_REGEXP_TYPE:
+      JSRegExp::cast(this)->JSRegExpVerify();
+      break;
+    case FILLER_TYPE:
+      break;
+    case PROXY_TYPE:
+      Proxy::cast(this)->ProxyVerify();
+      break;
+    case SHARED_FUNCTION_INFO_TYPE:
+      SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify();
+      break;
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+  case NAME##_TYPE:                        \
+    Name::cast(this)->Name##Verify();      \
+    break;
+    STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void HeapObject::VerifyHeapPointer(Object* p) {
+  ASSERT(p->IsHeapObject());
+  ASSERT(Heap::Contains(HeapObject::cast(p)));
+}
+
+
+void HeapNumber::HeapNumberVerify() {
+  ASSERT(IsHeapNumber());
+}
+
+
+void ByteArray::ByteArrayPrint() {
+  PrintF("byte array, data starts at %p", GetDataStartAddress());
+}
+
+
+void PixelArray::PixelArrayPrint() {
+  PrintF("pixel array");
+}
+
+
+void ByteArray::ByteArrayVerify() {
+  ASSERT(IsByteArray());
+}
+
+
+void PixelArray::PixelArrayVerify() {
+  ASSERT(IsPixelArray());
+}
+
+
+void JSObject::PrintProperties() {
+  if (HasFastProperties()) {
+    DescriptorArray* descs = map()->instance_descriptors();
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      PrintF("   ");
+      descs->GetKey(i)->StringPrint();
+      PrintF(": ");
+      switch (descs->GetType(i)) {
+        case FIELD: {
+          int index = descs->GetFieldIndex(i);
+          FastPropertyAt(index)->ShortPrint();
+          PrintF(" (field at offset %d)\n", index);
+          break;
+        }
+        case CONSTANT_FUNCTION:
+          descs->GetConstantFunction(i)->ShortPrint();
+          PrintF(" (constant function)\n");
+          break;
+        case CALLBACKS:
+          descs->GetCallbacksObject(i)->ShortPrint();
+          PrintF(" (callback)\n");
+          break;
+        case MAP_TRANSITION:
+          PrintF(" (map transition)\n");
+          break;
+        case CONSTANT_TRANSITION:
+          PrintF(" (constant transition)\n");
+          break;
+        case NULL_DESCRIPTOR:
+          PrintF(" (null descriptor)\n");
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+    }
+  } else {
+    property_dictionary()->Print();
+  }
+}
+
+
+void JSObject::PrintElements() {
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      // Print in array notation for non-sparse arrays.
+      FixedArray* p = FixedArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: ", i);
+        p->get(i)->ShortPrint();
+        PrintF("\n");
+      }
+      break;
+    }
+    case PIXEL_ELEMENTS: {
+      PixelArray* p = PixelArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, p->get(i));
+      }
+      break;
+    }
+    case DICTIONARY_ELEMENTS:
+      elements()->Print();
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void JSObject::JSObjectPrint() {
+  PrintF("%p: [JSObject]\n", this);
+  PrintF(" - map = %p\n", map());
+  PrintF(" - prototype = %p\n", GetPrototype());
+  PrintF(" {\n");
+  PrintProperties();
+  PrintElements();
+  PrintF(" }\n");
+}
+
+
+void JSObject::JSObjectVerify() {
+  VerifyHeapPointer(properties());
+  VerifyHeapPointer(elements());
+  if (HasFastProperties()) {
+    CHECK_EQ(map()->unused_property_fields(),
+             (map()->inobject_properties() + properties()->length() -
+              map()->NextFreePropertyIndex()));
+  }
+}
+
+
+static const char* TypeToString(InstanceType type) {
+  switch (type) {
+    case INVALID_TYPE: return "INVALID";
+    case MAP_TYPE: return "MAP";
+    case HEAP_NUMBER_TYPE: return "HEAP_NUMBER";
+    case SHORT_SYMBOL_TYPE:
+    case MEDIUM_SYMBOL_TYPE:
+    case LONG_SYMBOL_TYPE: return "SYMBOL";
+    case SHORT_ASCII_SYMBOL_TYPE:
+    case MEDIUM_ASCII_SYMBOL_TYPE:
+    case LONG_ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL";
+    case SHORT_SLICED_SYMBOL_TYPE:
+    case MEDIUM_SLICED_SYMBOL_TYPE:
+    case LONG_SLICED_SYMBOL_TYPE: return "SLICED_SYMBOL";
+    case SHORT_SLICED_ASCII_SYMBOL_TYPE:
+    case MEDIUM_SLICED_ASCII_SYMBOL_TYPE:
+    case LONG_SLICED_ASCII_SYMBOL_TYPE: return "SLICED_ASCII_SYMBOL";
+    case SHORT_CONS_SYMBOL_TYPE:
+    case MEDIUM_CONS_SYMBOL_TYPE:
+    case LONG_CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
+    case SHORT_CONS_ASCII_SYMBOL_TYPE:
+    case MEDIUM_CONS_ASCII_SYMBOL_TYPE:
+    case LONG_CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
+    case SHORT_EXTERNAL_ASCII_SYMBOL_TYPE:
+    case MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE:
+    case LONG_EXTERNAL_ASCII_SYMBOL_TYPE:
+    case SHORT_EXTERNAL_SYMBOL_TYPE:
+    case MEDIUM_EXTERNAL_SYMBOL_TYPE:
+    case LONG_EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
+    case SHORT_ASCII_STRING_TYPE:
+    case MEDIUM_ASCII_STRING_TYPE:
+    case LONG_ASCII_STRING_TYPE: return "ASCII_STRING";
+    case SHORT_STRING_TYPE:
+    case MEDIUM_STRING_TYPE:
+    case LONG_STRING_TYPE: return "TWO_BYTE_STRING";
+    case SHORT_CONS_STRING_TYPE:
+    case MEDIUM_CONS_STRING_TYPE:
+    case LONG_CONS_STRING_TYPE:
+    case SHORT_CONS_ASCII_STRING_TYPE:
+    case MEDIUM_CONS_ASCII_STRING_TYPE:
+    case LONG_CONS_ASCII_STRING_TYPE: return "CONS_STRING";
+    case SHORT_SLICED_STRING_TYPE:
+    case MEDIUM_SLICED_STRING_TYPE:
+    case LONG_SLICED_STRING_TYPE:
+    case SHORT_SLICED_ASCII_STRING_TYPE:
+    case MEDIUM_SLICED_ASCII_STRING_TYPE:
+    case LONG_SLICED_ASCII_STRING_TYPE: return "SLICED_STRING";
+    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+    case MEDIUM_EXTERNAL_ASCII_STRING_TYPE:
+    case LONG_EXTERNAL_ASCII_STRING_TYPE:
+    case SHORT_EXTERNAL_STRING_TYPE:
+    case MEDIUM_EXTERNAL_STRING_TYPE:
+    case LONG_EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
+    case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
+    case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
+    case PIXEL_ARRAY_TYPE: return "PIXEL_ARRAY";
+    case FILLER_TYPE: return "FILLER";
+    case JS_OBJECT_TYPE: return "JS_OBJECT";
+    case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
+    case ODDBALL_TYPE: return "ODDBALL";
+    case JS_GLOBAL_PROPERTY_CELL_TYPE: return "JS_GLOBAL_PROPERTY_CELL";
+    case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
+    case JS_FUNCTION_TYPE: return "JS_FUNCTION";
+    case CODE_TYPE: return "CODE";
+    case JS_ARRAY_TYPE: return "JS_ARRAY";
+    case JS_REGEXP_TYPE: return "JS_REGEXP";
+    case JS_VALUE_TYPE: return "JS_VALUE";
+    case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
+    case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
+    case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
+    case PROXY_TYPE: return "PROXY";
+    case SMI_TYPE: return "SMI";
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
+  STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+  }
+  return "UNKNOWN";
+}
+
+
+void Map::MapPrint() {
+  HeapObject::PrintHeader("Map");
+  PrintF(" - type: %s\n", TypeToString(instance_type()));
+  PrintF(" - instance size: %d\n", instance_size());
+  PrintF(" - inobject properties: %d\n", inobject_properties());
+  PrintF(" - pre-allocated property fields: %d\n",
+      pre_allocated_property_fields());
+  PrintF(" - unused property fields: %d\n", unused_property_fields());
+  if (is_hidden_prototype()) {
+    PrintF(" - hidden_prototype\n");
+  }
+  if (has_named_interceptor()) {
+    PrintF(" - named_interceptor\n");
+  }
+  if (has_indexed_interceptor()) {
+    PrintF(" - indexed_interceptor\n");
+  }
+  if (is_undetectable()) {
+    PrintF(" - undetectable\n");
+  }
+  if (needs_loading()) {
+    PrintF(" - needs_loading\n");
+  }
+  if (has_instance_call_handler()) {
+    PrintF(" - instance_call_handler\n");
+  }
+  if (is_access_check_needed()) {
+    PrintF(" - access_check_needed\n");
+  }
+  PrintF(" - instance descriptors: ");
+  instance_descriptors()->ShortPrint();
+  PrintF("\n - prototype: ");
+  prototype()->ShortPrint();
+  PrintF("\n - constructor: ");
+  constructor()->ShortPrint();
+  PrintF("\n");
+}
+
+
+void Map::MapVerify() {
+  ASSERT(!Heap::InNewSpace(this));
+  ASSERT(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
+  ASSERT(kPointerSize <= instance_size()
+         && instance_size() < Heap::Capacity());
+  VerifyHeapPointer(prototype());
+  VerifyHeapPointer(instance_descriptors());
+}
+
+
+void FixedArray::FixedArrayPrint() {
+  HeapObject::PrintHeader("FixedArray");
+  PrintF(" - length: %d", length());
+  for (int i = 0; i < length(); i++) {
+    PrintF("\n  [%d]: ", i);
+    get(i)->ShortPrint();
+  }
+  PrintF("\n");
+}
+
+
+void FixedArray::FixedArrayVerify() {
+  for (int i = 0; i < length(); i++) {
+    Object* e = get(i);
+    if (e->IsHeapObject()) {
+      VerifyHeapPointer(e);
+    } else {
+      e->Verify();
+    }
+  }
+}
+
+
+void JSValue::JSValuePrint() {
+  HeapObject::PrintHeader("ValueObject");
+  value()->Print();
+}
+
+
+void JSValue::JSValueVerify() {
+  Object* v = value();
+  if (v->IsHeapObject()) {
+    VerifyHeapPointer(v);
+  }
+}
+
+
+void String::StringPrint() {
+  if (StringShape(this).IsSymbol()) {
+    PrintF("#");
+  } else if (StringShape(this).IsCons()) {
+    PrintF("c\"");
+  } else {
+    PrintF("\"");
+  }
+
+  for (int i = 0; i < length(); i++) {
+    PrintF("%c", Get(i));
+  }
+
+  if (!StringShape(this).IsSymbol()) PrintF("\"");
+}
+
+
+void String::StringVerify() {
+  CHECK(IsString());
+  CHECK(length() >= 0 && length() <= Smi::kMaxValue);
+  if (IsSymbol()) {
+    CHECK(!Heap::InNewSpace(this));
+  }
+}
+
+
+void JSFunction::JSFunctionPrint() {
+  HeapObject::PrintHeader("Function");
+  PrintF(" - map = 0x%p\n", map());
+  PrintF(" - is boilerplate: %s\n", IsBoilerplate() ? "yes" : "no");
+  PrintF(" - initial_map = ");
+  if (has_initial_map()) {
+    initial_map()->ShortPrint();
+  }
+  PrintF("\n - shared_info = ");
+  shared()->ShortPrint();
+  PrintF("\n   - name = ");
+  shared()->name()->Print();
+  PrintF("\n - context = ");
+  unchecked_context()->ShortPrint();
+  PrintF("\n - code = ");
+  code()->ShortPrint();
+  PrintF("\n");
+
+  PrintProperties();
+  PrintElements();
+
+  PrintF("\n");
+}
+
+
+void JSFunction::JSFunctionVerify() {
+  CHECK(IsJSFunction());
+  VerifyObjectField(kPrototypeOrInitialMapOffset);
+}
+
+
+void SharedFunctionInfo::SharedFunctionInfoPrint() {
+  HeapObject::PrintHeader("SharedFunctionInfo");
+  PrintF(" - name: ");
+  name()->ShortPrint();
+  PrintF("\n - expected_nof_properties: %d", expected_nof_properties());
+  PrintF("\n - instance class name = ");
+  instance_class_name()->Print();
+  PrintF("\n - code = ");
+  code()->ShortPrint();
+  PrintF("\n - source code = ");
+  GetSourceCode()->ShortPrint();
+  // Script files are often large, hard to read.
+  // PrintF("\n - script =");
+  // script()->Print();
+  PrintF("\n - function token position = %d", function_token_position());
+  PrintF("\n - start position = %d", start_position());
+  PrintF("\n - end position = %d", end_position());
+  PrintF("\n - is expression = %d", is_expression());
+  PrintF("\n - debug info = ");
+  debug_info()->ShortPrint();
+  PrintF("\n - length = %d", length());
+  PrintF("\n - has_only_this_property_assignments = %d",
+         has_only_this_property_assignments());
+  PrintF("\n - has_only_simple_this_property_assignments = %d",
+         has_only_simple_this_property_assignments());
+  PrintF("\n - this_property_assignments = ");
+  this_property_assignments()->ShortPrint();
+  PrintF("\n");
+}
+
+void SharedFunctionInfo::SharedFunctionInfoVerify() {
+  CHECK(IsSharedFunctionInfo());
+  VerifyObjectField(kNameOffset);
+  VerifyObjectField(kCodeOffset);
+  VerifyObjectField(kInstanceClassNameOffset);
+  VerifyObjectField(kExternalReferenceDataOffset);
+  VerifyObjectField(kScriptOffset);
+  VerifyObjectField(kDebugInfoOffset);
+}
+
+
+void JSGlobalProxy::JSGlobalProxyPrint() {
+  PrintF("global_proxy");
+  JSObjectPrint();
+  PrintF("context : ");
+  context()->ShortPrint();
+  PrintF("\n");
+}
+
+
+void JSGlobalProxy::JSGlobalProxyVerify() {
+  CHECK(IsJSGlobalProxy());
+  JSObjectVerify();
+  VerifyObjectField(JSGlobalProxy::kContextOffset);
+  // Make sure that this object has no properties, elements.
+  CHECK_EQ(0, properties()->length());
+  CHECK_EQ(0, elements()->length());
+}
+
+
+void JSGlobalObject::JSGlobalObjectPrint() {
+  PrintF("global ");
+  JSObjectPrint();
+  PrintF("global context : ");
+  global_context()->ShortPrint();
+  PrintF("\n");
+}
+
+
+void JSGlobalObject::JSGlobalObjectVerify() {
+  CHECK(IsJSGlobalObject());
+  JSObjectVerify();
+  for (int i = GlobalObject::kBuiltinsOffset;
+       i < JSGlobalObject::kSize;
+       i += kPointerSize) {
+    VerifyObjectField(i);
+  }
+}
+
+
+void JSBuiltinsObject::JSBuiltinsObjectPrint() {
+  PrintF("builtins ");
+  JSObjectPrint();
+}
+
+
+void JSBuiltinsObject::JSBuiltinsObjectVerify() {
+  CHECK(IsJSBuiltinsObject());
+  JSObjectVerify();
+  for (int i = GlobalObject::kBuiltinsOffset;
+       i < JSBuiltinsObject::kSize;
+       i += kPointerSize) {
+    VerifyObjectField(i);
+  }
+}
+
+
+void Oddball::OddballVerify() {
+  CHECK(IsOddball());
+  VerifyHeapPointer(to_string());
+  Object* number = to_number();
+  if (number->IsHeapObject()) {
+    ASSERT(number == Heap::nan_value());
+  } else {
+    ASSERT(number->IsSmi());
+    int value = Smi::cast(number)->value();
+    ASSERT(value == 0 || value == 1 || value == -1 ||
+           value == -2 || value == -3);
+  }
+}
+
+
+void JSGlobalPropertyCell::JSGlobalPropertyCellVerify() {
+  CHECK(IsJSGlobalPropertyCell());
+  VerifyObjectField(kValueOffset);
+}
+
+
+void JSGlobalPropertyCell::JSGlobalPropertyCellPrint() {
+  HeapObject::PrintHeader("JSGlobalPropertyCell");
+}
+
+
+void Code::CodePrint() {
+  HeapObject::PrintHeader("Code");
+#ifdef ENABLE_DISASSEMBLER
+  Disassemble(NULL);
+#endif
+}
+
+
+void Code::CodeVerify() {
+  CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
+                  static_cast<intptr_t>(kCodeAlignment)));
+  Address last_gc_pc = NULL;
+  for (RelocIterator it(this); !it.done(); it.next()) {
+    it.rinfo()->Verify();
+    // Ensure that GC will not iterate twice over the same pointer.
+    if (RelocInfo::IsGCRelocMode(it.rinfo()->rmode())) {
+      CHECK(it.rinfo()->pc() != last_gc_pc);
+      last_gc_pc = it.rinfo()->pc();
+    }
+  }
+}
+
+
+void JSArray::JSArrayVerify() {
+  JSObjectVerify();
+  ASSERT(length()->IsNumber() || length()->IsUndefined());
+  ASSERT(elements()->IsUndefined() || elements()->IsFixedArray());
+}
+
+
+void JSRegExp::JSRegExpVerify() {
+  JSObjectVerify();
+  ASSERT(data()->IsUndefined() || data()->IsFixedArray());
+  switch (TypeTag()) {
+    case JSRegExp::ATOM: {
+      FixedArray* arr = FixedArray::cast(data());
+      ASSERT(arr->get(JSRegExp::kAtomPatternIndex)->IsString());
+      break;
+    }
+    case JSRegExp::IRREGEXP: {
+      bool is_native = RegExpImpl::UsesNativeRegExp();
+
+      FixedArray* arr = FixedArray::cast(data());
+      Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex);
+      // TheHole : Not compiled yet.
+      // JSObject: Compilation error.
+      // Code/ByteArray: Compiled code.
+      ASSERT(ascii_data->IsTheHole() || ascii_data->IsJSObject() ||
+          (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
+      Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
+      ASSERT(uc16_data->IsTheHole() || ascii_data->IsJSObject() ||
+          (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
+      ASSERT(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
+      ASSERT(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
+      break;
+    }
+    default:
+      ASSERT_EQ(JSRegExp::NOT_COMPILED, TypeTag());
+      ASSERT(data()->IsUndefined());
+      break;
+  }
+}
+
+
+void Proxy::ProxyPrint() {
+  PrintF("proxy to %p", proxy());
+}
+
+
+void Proxy::ProxyVerify() {
+  ASSERT(IsProxy());
+}
+
+
+void AccessorInfo::AccessorInfoVerify() {
+  CHECK(IsAccessorInfo());
+  VerifyPointer(getter());
+  VerifyPointer(setter());
+  VerifyPointer(name());
+  VerifyPointer(data());
+  VerifyPointer(flag());
+}
+
+void AccessorInfo::AccessorInfoPrint() {
+  HeapObject::PrintHeader("AccessorInfo");
+  PrintF("\n - getter: ");
+  getter()->ShortPrint();
+  PrintF("\n - setter: ");
+  setter()->ShortPrint();
+  PrintF("\n - name: ");
+  name()->ShortPrint();
+  PrintF("\n - data: ");
+  data()->ShortPrint();
+  PrintF("\n - flag: ");
+  flag()->ShortPrint();
+}
+
+void AccessCheckInfo::AccessCheckInfoVerify() {
+  CHECK(IsAccessCheckInfo());
+  VerifyPointer(named_callback());
+  VerifyPointer(indexed_callback());
+  VerifyPointer(data());
+}
+
+void AccessCheckInfo::AccessCheckInfoPrint() {
+  HeapObject::PrintHeader("AccessCheckInfo");
+  PrintF("\n - named_callback: ");
+  named_callback()->ShortPrint();
+  PrintF("\n - indexed_callback: ");
+  indexed_callback()->ShortPrint();
+  PrintF("\n - data: ");
+  data()->ShortPrint();
+}
+
+void InterceptorInfo::InterceptorInfoVerify() {
+  CHECK(IsInterceptorInfo());
+  VerifyPointer(getter());
+  VerifyPointer(setter());
+  VerifyPointer(query());
+  VerifyPointer(deleter());
+  VerifyPointer(enumerator());
+  VerifyPointer(data());
+}
+
+void InterceptorInfo::InterceptorInfoPrint() {
+  HeapObject::PrintHeader("InterceptorInfo");
+  PrintF("\n - getter: ");
+  getter()->ShortPrint();
+  PrintF("\n - setter: ");
+  setter()->ShortPrint();
+  PrintF("\n - query: ");
+  query()->ShortPrint();
+  PrintF("\n - deleter: ");
+  deleter()->ShortPrint();
+  PrintF("\n - enumerator: ");
+  enumerator()->ShortPrint();
+  PrintF("\n - data: ");
+  data()->ShortPrint();
+}
+
+void CallHandlerInfo::CallHandlerInfoVerify() {
+  CHECK(IsCallHandlerInfo());
+  VerifyPointer(callback());
+  VerifyPointer(data());
+}
+
+void CallHandlerInfo::CallHandlerInfoPrint() {
+  HeapObject::PrintHeader("CallHandlerInfo");
+  PrintF("\n - callback: ");
+  callback()->ShortPrint();
+  PrintF("\n - data: ");
+  data()->ShortPrint();
+}
+
+void TemplateInfo::TemplateInfoVerify() {
+  VerifyPointer(tag());
+  VerifyPointer(property_list());
+}
+
+void FunctionTemplateInfo::FunctionTemplateInfoVerify() {
+  CHECK(IsFunctionTemplateInfo());
+  TemplateInfoVerify();
+  VerifyPointer(serial_number());
+  VerifyPointer(call_code());
+  VerifyPointer(property_accessors());
+  VerifyPointer(prototype_template());
+  VerifyPointer(parent_template());
+  VerifyPointer(named_property_handler());
+  VerifyPointer(indexed_property_handler());
+  VerifyPointer(instance_template());
+  VerifyPointer(signature());
+  VerifyPointer(access_check_info());
+}
+
+void FunctionTemplateInfo::FunctionTemplateInfoPrint() {
+  HeapObject::PrintHeader("FunctionTemplateInfo");
+  PrintF("\n - tag: ");
+  tag()->ShortPrint();
+  PrintF("\n - property_list: ");
+  property_list()->ShortPrint();
+  PrintF("\n - serial_number: ");
+  serial_number()->ShortPrint();
+  PrintF("\n - call_code: ");
+  call_code()->ShortPrint();
+  PrintF("\n - property_accessors: ");
+  property_accessors()->ShortPrint();
+  PrintF("\n - prototype_template: ");
+  prototype_template()->ShortPrint();
+  PrintF("\n - parent_template: ");
+  parent_template()->ShortPrint();
+  PrintF("\n - named_property_handler: ");
+  named_property_handler()->ShortPrint();
+  PrintF("\n - indexed_property_handler: ");
+  indexed_property_handler()->ShortPrint();
+  PrintF("\n - instance_template: ");
+  instance_template()->ShortPrint();
+  PrintF("\n - signature: ");
+  signature()->ShortPrint();
+  PrintF("\n - access_check_info: ");
+  access_check_info()->ShortPrint();
+  PrintF("\n - hidden_prototype: %s", hidden_prototype() ? "true" : "false");
+  PrintF("\n - undetectable: %s", undetectable() ? "true" : "false");
+  PrintF("\n - need_access_check: %s", needs_access_check() ? "true" : "false");
+}
+
+void ObjectTemplateInfo::ObjectTemplateInfoVerify() {
+  CHECK(IsObjectTemplateInfo());
+  TemplateInfoVerify();
+  VerifyPointer(constructor());
+  VerifyPointer(internal_field_count());
+}
+
+void ObjectTemplateInfo::ObjectTemplateInfoPrint() {
+  HeapObject::PrintHeader("ObjectTemplateInfo");
+  PrintF("\n - constructor: ");
+  constructor()->ShortPrint();
+  PrintF("\n - internal_field_count: ");
+  internal_field_count()->ShortPrint();
+}
+
+void SignatureInfo::SignatureInfoVerify() {
+  CHECK(IsSignatureInfo());
+  VerifyPointer(receiver());
+  VerifyPointer(args());
+}
+
+void SignatureInfo::SignatureInfoPrint() {
+  HeapObject::PrintHeader("SignatureInfo");
+  PrintF("\n - receiver: ");
+  receiver()->ShortPrint();
+  PrintF("\n - args: ");
+  args()->ShortPrint();
+}
+
+void TypeSwitchInfo::TypeSwitchInfoVerify() {
+  CHECK(IsTypeSwitchInfo());
+  VerifyPointer(types());
+}
+
+void TypeSwitchInfo::TypeSwitchInfoPrint() {
+  HeapObject::PrintHeader("TypeSwitchInfo");
+  PrintF("\n - types: ");
+  types()->ShortPrint();
+}
+
+
+void Script::ScriptVerify() {
+  CHECK(IsScript());
+  VerifyPointer(source());
+  VerifyPointer(name());
+  line_offset()->SmiVerify();
+  column_offset()->SmiVerify();
+  VerifyPointer(data());
+  VerifyPointer(wrapper());
+  type()->SmiVerify();
+  VerifyPointer(line_ends());
+  VerifyPointer(id());
+}
+
+
+void Script::ScriptPrint() {
+  HeapObject::PrintHeader("Script");
+  PrintF("\n - source: ");
+  source()->ShortPrint();
+  PrintF("\n - name: ");
+  name()->ShortPrint();
+  PrintF("\n - line_offset: ");
+  line_offset()->ShortPrint();
+  PrintF("\n - column_offset: ");
+  column_offset()->ShortPrint();
+  PrintF("\n - type: ");
+  type()->ShortPrint();
+  PrintF("\n - id: ");
+  id()->ShortPrint();
+  PrintF("\n");
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void DebugInfo::DebugInfoVerify() {
+  CHECK(IsDebugInfo());
+  VerifyPointer(shared());
+  VerifyPointer(original_code());
+  VerifyPointer(code());
+  VerifyPointer(break_points());
+}
+
+
+void DebugInfo::DebugInfoPrint() {
+  HeapObject::PrintHeader("DebugInfo");
+  PrintF("\n - shared: ");
+  shared()->ShortPrint();
+  PrintF("\n - original_code: ");
+  original_code()->ShortPrint();
+  PrintF("\n - code: ");
+  code()->ShortPrint();
+  PrintF("\n - break_points: ");
+  break_points()->Print();
+}
+
+
+void BreakPointInfo::BreakPointInfoVerify() {
+  CHECK(IsBreakPointInfo());
+  code_position()->SmiVerify();
+  source_position()->SmiVerify();
+  statement_position()->SmiVerify();
+  VerifyPointer(break_point_objects());
+}
+
+
+void BreakPointInfo::BreakPointInfoPrint() {
+  HeapObject::PrintHeader("BreakPointInfo");
+  PrintF("\n - code_position: %d", code_position());
+  PrintF("\n - source_position: %d", source_position());
+  PrintF("\n - statement_position: %d", statement_position());
+  PrintF("\n - break_point_objects: ");
+  break_point_objects()->ShortPrint();
+}
+#endif
+
+
+void JSObject::IncrementSpillStatistics(SpillInformation* info) {
+  info->number_of_objects_++;
+  // Named properties
+  if (HasFastProperties()) {
+    info->number_of_objects_with_fast_properties_++;
+    info->number_of_fast_used_fields_   += map()->NextFreePropertyIndex();
+    info->number_of_fast_unused_fields_ += map()->unused_property_fields();
+  } else {
+    StringDictionary* dict = property_dictionary();
+    info->number_of_slow_used_properties_ += dict->NumberOfElements();
+    info->number_of_slow_unused_properties_ +=
+        dict->Capacity() - dict->NumberOfElements();
+  }
+  // Indexed properties
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      info->number_of_objects_with_fast_elements_++;
+      int holes = 0;
+      FixedArray* e = FixedArray::cast(elements());
+      int len = e->length();
+      for (int i = 0; i < len; i++) {
+        if (e->get(i) == Heap::the_hole_value()) holes++;
+      }
+      info->number_of_fast_used_elements_   += len - holes;
+      info->number_of_fast_unused_elements_ += holes;
+      break;
+    }
+    case PIXEL_ELEMENTS: {
+      info->number_of_objects_with_fast_elements_++;
+      PixelArray* e = PixelArray::cast(elements());
+      info->number_of_fast_used_elements_ += e->length();
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      NumberDictionary* dict = element_dictionary();
+      info->number_of_slow_used_elements_ += dict->NumberOfElements();
+      info->number_of_slow_unused_elements_ +=
+          dict->Capacity() - dict->NumberOfElements();
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void JSObject::SpillInformation::Clear() {
+  number_of_objects_ = 0;
+  number_of_objects_with_fast_properties_ = 0;
+  number_of_objects_with_fast_elements_ = 0;
+  number_of_fast_used_fields_ = 0;
+  number_of_fast_unused_fields_ = 0;
+  number_of_slow_used_properties_ = 0;
+  number_of_slow_unused_properties_ = 0;
+  number_of_fast_used_elements_ = 0;
+  number_of_fast_unused_elements_ = 0;
+  number_of_slow_used_elements_ = 0;
+  number_of_slow_unused_elements_ = 0;
+}
+
+void JSObject::SpillInformation::Print() {
+  PrintF("\n  JSObject Spill Statistics (#%d):\n", number_of_objects_);
+
+  PrintF("    - fast properties (#%d): %d (used) %d (unused)\n",
+         number_of_objects_with_fast_properties_,
+         number_of_fast_used_fields_, number_of_fast_unused_fields_);
+
+  PrintF("    - slow properties (#%d): %d (used) %d (unused)\n",
+         number_of_objects_ - number_of_objects_with_fast_properties_,
+         number_of_slow_used_properties_, number_of_slow_unused_properties_);
+
+  PrintF("    - fast elements (#%d): %d (used) %d (unused)\n",
+         number_of_objects_with_fast_elements_,
+         number_of_fast_used_elements_, number_of_fast_unused_elements_);
+
+  PrintF("    - slow elements (#%d): %d (used) %d (unused)\n",
+         number_of_objects_ - number_of_objects_with_fast_elements_,
+         number_of_slow_used_elements_, number_of_slow_unused_elements_);
+
+  PrintF("\n");
+}
+
+
+void DescriptorArray::PrintDescriptors() {
+  PrintF("Descriptor array  %d\n", number_of_descriptors());
+  for (int i = 0; i < number_of_descriptors(); i++) {
+    PrintF(" %d: ", i);
+    Descriptor desc;
+    Get(i, &desc);
+    desc.Print();
+  }
+  PrintF("\n");
+}
+
+
+bool DescriptorArray::IsSortedNoDuplicates() {
+  String* current_key = NULL;
+  uint32_t current = 0;
+  for (int i = 0; i < number_of_descriptors(); i++) {
+    String* key = GetKey(i);
+    if (key == current_key) {
+      PrintDescriptors();
+      return false;
+    }
+    current_key = key;
+    uint32_t hash = GetKey(i)->Hash();
+    if (hash < current) {
+      PrintDescriptors();
+      return false;
+    }
+    current = hash;
+  }
+  return true;
+}
+
+
+#endif  // DEBUG
+
+} }  // namespace v8::internal
diff --git a/src/objects-inl.h b/src/objects-inl.h
new file mode 100644
index 0000000..29b886d
--- /dev/null
+++ b/src/objects-inl.h
@@ -0,0 +1,2887 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Review notes:
+//
+// - The use of macros in these inline functions may seem superfluous
+// but it is absolutely needed to make sure gcc generates optimal
+// code. gcc is not happy when attempting to inline too deep.
+//
+
+#ifndef V8_OBJECTS_INL_H_
+#define V8_OBJECTS_INL_H_
+
+#include "objects.h"
+#include "contexts.h"
+#include "conversions-inl.h"
+#include "property.h"
+
+namespace v8 {
+namespace internal {
+
+PropertyDetails::PropertyDetails(Smi* smi) {
+  value_ = smi->value();
+}
+
+
+Smi* PropertyDetails::AsSmi() {
+  return Smi::FromInt(value_);
+}
+
+
+PropertyDetails PropertyDetails::AsDeleted() {
+  PropertyDetails d(DONT_ENUM, NORMAL);
+  Smi* smi = Smi::FromInt(AsSmi()->value() | DeletedField::encode(1));
+  return PropertyDetails(smi);
+}
+
+
+#define CAST_ACCESSOR(type)                     \
+  type* type::cast(Object* object) {            \
+    ASSERT(object->Is##type());                 \
+    return reinterpret_cast<type*>(object);     \
+  }
+
+
+#define INT_ACCESSORS(holder, name, offset)                             \
+  int holder::name() { return READ_INT_FIELD(this, offset); }           \
+  void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
+
+
+#define ACCESSORS(holder, name, type, offset)                           \
+  type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
+  void holder::set_##name(type* value, WriteBarrierMode mode) {         \
+    WRITE_FIELD(this, offset, value);                                   \
+    CONDITIONAL_WRITE_BARRIER(this, offset, mode);                      \
+  }
+
+
+
+#define SMI_ACCESSORS(holder, name, offset)             \
+  int holder::name() {                                  \
+    Object* value = READ_FIELD(this, offset);           \
+    return Smi::cast(value)->value();                   \
+  }                                                     \
+  void holder::set_##name(int value) {                  \
+    WRITE_FIELD(this, offset, Smi::FromInt(value));     \
+  }
+
+
+#define BOOL_GETTER(holder, field, name, offset)           \
+  bool holder::name() {                                    \
+    return BooleanBit::get(field(), offset);               \
+  }                                                        \
+
+
+#define BOOL_ACCESSORS(holder, field, name, offset)        \
+  bool holder::name() {                                    \
+    return BooleanBit::get(field(), offset);               \
+  }                                                        \
+  void holder::set_##name(bool value) {                    \
+    set_##field(BooleanBit::set(field(), offset, value));  \
+  }
+
+
+bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
+  // There is a constraint on the object; check.
+  if (!this->IsJSObject()) return false;
+  // Fetch the constructor function of the object.
+  Object* cons_obj = JSObject::cast(this)->map()->constructor();
+  if (!cons_obj->IsJSFunction()) return false;
+  JSFunction* fun = JSFunction::cast(cons_obj);
+  // Iterate through the chain of inheriting function templates to
+  // see if the required one occurs.
+  for (Object* type = fun->shared()->function_data();
+       type->IsFunctionTemplateInfo();
+       type = FunctionTemplateInfo::cast(type)->parent_template()) {
+    if (type == expected) return true;
+  }
+  // Didn't find the required type in the inheritance chain.
+  return false;
+}
+
+
+bool Object::IsSmi() {
+  return HAS_SMI_TAG(this);
+}
+
+
+bool Object::IsHeapObject() {
+  return Internals::HasHeapObjectTag(this);
+}
+
+
+bool Object::IsHeapNumber() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == HEAP_NUMBER_TYPE;
+}
+
+
+bool Object::IsString() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
+}
+
+
+bool Object::IsSymbol() {
+  if (!this->IsHeapObject()) return false;
+  uint32_t type = HeapObject::cast(this)->map()->instance_type();
+  return (type & (kIsNotStringMask | kIsSymbolMask)) ==
+         (kStringTag | kSymbolTag);
+}
+
+
+bool Object::IsConsString() {
+  if (!this->IsHeapObject()) return false;
+  uint32_t type = HeapObject::cast(this)->map()->instance_type();
+  return (type & (kIsNotStringMask | kStringRepresentationMask)) ==
+         (kStringTag | kConsStringTag);
+}
+
+
+#ifdef DEBUG
+// These are for cast checks.  If you need one of these in release
+// mode you should consider using a StringShape before moving it out
+// of the ifdef
+
+bool Object::IsSeqString() {
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsSequential();
+}
+
+
+bool Object::IsSeqAsciiString() {
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsSequential() &&
+         String::cast(this)->IsAsciiRepresentation();
+}
+
+
+bool Object::IsSeqTwoByteString() {
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsSequential() &&
+         String::cast(this)->IsTwoByteRepresentation();
+}
+
+
+bool Object::IsExternalString() {
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsExternal();
+}
+
+
+bool Object::IsExternalAsciiString() {
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsExternal() &&
+         String::cast(this)->IsAsciiRepresentation();
+}
+
+
+bool Object::IsExternalTwoByteString() {
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsExternal() &&
+         String::cast(this)->IsTwoByteRepresentation();
+}
+
+
+bool Object::IsSlicedString() {
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsSliced();
+}
+
+
+#endif  // DEBUG
+
+
+StringShape::StringShape(String* str)
+  : type_(str->map()->instance_type()) {
+  set_valid();
+  ASSERT((type_ & kIsNotStringMask) == kStringTag);
+}
+
+
+StringShape::StringShape(Map* map)
+  : type_(map->instance_type()) {
+  set_valid();
+  ASSERT((type_ & kIsNotStringMask) == kStringTag);
+}
+
+
+StringShape::StringShape(InstanceType t)
+  : type_(static_cast<uint32_t>(t)) {
+  set_valid();
+  ASSERT((type_ & kIsNotStringMask) == kStringTag);
+}
+
+
+bool StringShape::IsSymbol() {
+  ASSERT(valid());
+  return (type_ & kIsSymbolMask) == kSymbolTag;
+}
+
+
+bool String::IsAsciiRepresentation() {
+  uint32_t type = map()->instance_type();
+  if ((type & kStringRepresentationMask) == kSlicedStringTag) {
+    return SlicedString::cast(this)->buffer()->IsAsciiRepresentation();
+  }
+  if ((type & kStringRepresentationMask) == kConsStringTag &&
+      ConsString::cast(this)->second()->length() == 0) {
+    return ConsString::cast(this)->first()->IsAsciiRepresentation();
+  }
+  return (type & kStringEncodingMask) == kAsciiStringTag;
+}
+
+
+bool String::IsTwoByteRepresentation() {
+  uint32_t type = map()->instance_type();
+  if ((type & kStringRepresentationMask) == kSlicedStringTag) {
+    return SlicedString::cast(this)->buffer()->IsTwoByteRepresentation();
+  } else if ((type & kStringRepresentationMask) == kConsStringTag &&
+             ConsString::cast(this)->second()->length() == 0) {
+    return ConsString::cast(this)->first()->IsTwoByteRepresentation();
+  }
+  return (type & kStringEncodingMask) == kTwoByteStringTag;
+}
+
+
+bool StringShape::IsCons() {
+  return (type_ & kStringRepresentationMask) == kConsStringTag;
+}
+
+
+bool StringShape::IsSliced() {
+  return (type_ & kStringRepresentationMask) == kSlicedStringTag;
+}
+
+
+bool StringShape::IsExternal() {
+  return (type_ & kStringRepresentationMask) == kExternalStringTag;
+}
+
+
+bool StringShape::IsSequential() {
+  return (type_ & kStringRepresentationMask) == kSeqStringTag;
+}
+
+
+StringRepresentationTag StringShape::representation_tag() {
+  uint32_t tag = (type_ & kStringRepresentationMask);
+  return static_cast<StringRepresentationTag>(tag);
+}
+
+
+uint32_t StringShape::full_representation_tag() {
+  return (type_ & (kStringRepresentationMask | kStringEncodingMask));
+}
+
+
+STATIC_CHECK((kStringRepresentationMask | kStringEncodingMask) ==
+             Internals::kFullStringRepresentationMask);
+
+
+uint32_t StringShape::size_tag() {
+  return (type_ & kStringSizeMask);
+}
+
+
+bool StringShape::IsSequentialAscii() {
+  return full_representation_tag() == (kSeqStringTag | kAsciiStringTag);
+}
+
+
+bool StringShape::IsSequentialTwoByte() {
+  return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
+}
+
+
+bool StringShape::IsExternalAscii() {
+  return full_representation_tag() == (kExternalStringTag | kAsciiStringTag);
+}
+
+
+bool StringShape::IsExternalTwoByte() {
+  return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
+}
+
+
+STATIC_CHECK((kExternalStringTag | kTwoByteStringTag) ==
+             Internals::kExternalTwoByteRepresentationTag);
+
+
+uc32 FlatStringReader::Get(int index) {
+  ASSERT(0 <= index && index <= length_);
+  if (is_ascii_) {
+    return static_cast<const byte*>(start_)[index];
+  } else {
+    return static_cast<const uc16*>(start_)[index];
+  }
+}
+
+
+bool Object::IsNumber() {
+  return IsSmi() || IsHeapNumber();
+}
+
+
+bool Object::IsByteArray() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == BYTE_ARRAY_TYPE;
+}
+
+
+bool Object::IsPixelArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() == PIXEL_ARRAY_TYPE;
+}
+
+
+bool Object::IsFailure() {
+  return HAS_FAILURE_TAG(this);
+}
+
+
+bool Object::IsRetryAfterGC() {
+  return HAS_FAILURE_TAG(this)
+    && Failure::cast(this)->type() == Failure::RETRY_AFTER_GC;
+}
+
+
+bool Object::IsOutOfMemoryFailure() {
+  return HAS_FAILURE_TAG(this)
+    && Failure::cast(this)->IsOutOfMemoryException();
+}
+
+
+bool Object::IsException() {
+  return this == Failure::Exception();
+}
+
+
+bool Object::IsJSObject() {
+  return IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
+}
+
+
+bool Object::IsJSContextExtensionObject() {
+  return IsHeapObject()
+    && (HeapObject::cast(this)->map()->instance_type() ==
+        JS_CONTEXT_EXTENSION_OBJECT_TYPE);
+}
+
+
+bool Object::IsMap() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == MAP_TYPE;
+}
+
+
+bool Object::IsFixedArray() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == FIXED_ARRAY_TYPE;
+}
+
+
+bool Object::IsDescriptorArray() {
+  return IsFixedArray();
+}
+
+
+bool Object::IsContext() {
+  return Object::IsHeapObject()
+    && (HeapObject::cast(this)->map() == Heap::context_map() ||
+        HeapObject::cast(this)->map() == Heap::catch_context_map() ||
+        HeapObject::cast(this)->map() == Heap::global_context_map());
+}
+
+
+bool Object::IsCatchContext() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map() == Heap::catch_context_map();
+}
+
+
+bool Object::IsGlobalContext() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map() == Heap::global_context_map();
+}
+
+
+bool Object::IsJSFunction() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_TYPE;
+}
+
+
+template <> inline bool Is<JSFunction>(Object* obj) {
+  return obj->IsJSFunction();
+}
+
+
+bool Object::IsCode() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == CODE_TYPE;
+}
+
+
+bool Object::IsOddball() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE;
+}
+
+
+bool Object::IsJSGlobalPropertyCell() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type()
+      == JS_GLOBAL_PROPERTY_CELL_TYPE;
+}
+
+
+bool Object::IsSharedFunctionInfo() {
+  return Object::IsHeapObject() &&
+      (HeapObject::cast(this)->map()->instance_type() ==
+       SHARED_FUNCTION_INFO_TYPE);
+}
+
+
+bool Object::IsJSValue() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == JS_VALUE_TYPE;
+}
+
+
+bool Object::IsStringWrapper() {
+  return IsJSValue() && JSValue::cast(this)->value()->IsString();
+}
+
+
+bool Object::IsProxy() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == PROXY_TYPE;
+}
+
+
+bool Object::IsBoolean() {
+  return IsTrue() || IsFalse();
+}
+
+
+bool Object::IsJSArray() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == JS_ARRAY_TYPE;
+}
+
+
+bool Object::IsJSRegExp() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == JS_REGEXP_TYPE;
+}
+
+
+template <> inline bool Is<JSArray>(Object* obj) {
+  return obj->IsJSArray();
+}
+
+
+bool Object::IsHashTable() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map() == Heap::hash_table_map();
+}
+
+
+bool Object::IsDictionary() {
+  return IsHashTable() && this != Heap::symbol_table();
+}
+
+
+bool Object::IsSymbolTable() {
+  return IsHashTable() && this == Heap::raw_unchecked_symbol_table();
+}
+
+
+bool Object::IsCompilationCacheTable() {
+  return IsHashTable();
+}
+
+
+bool Object::IsMapCache() {
+  return IsHashTable();
+}
+
+
+bool Object::IsPrimitive() {
+  return IsOddball() || IsNumber() || IsString();
+}
+
+
+bool Object::IsJSGlobalProxy() {
+  bool result = IsHeapObject() &&
+                (HeapObject::cast(this)->map()->instance_type() ==
+                 JS_GLOBAL_PROXY_TYPE);
+  ASSERT(!result || IsAccessCheckNeeded());
+  return result;
+}
+
+
+bool Object::IsGlobalObject() {
+  if (!IsHeapObject()) return false;
+
+  InstanceType type = HeapObject::cast(this)->map()->instance_type();
+  return type == JS_GLOBAL_OBJECT_TYPE ||
+         type == JS_BUILTINS_OBJECT_TYPE;
+}
+
+
+bool Object::IsJSGlobalObject() {
+  return IsHeapObject() &&
+      (HeapObject::cast(this)->map()->instance_type() ==
+       JS_GLOBAL_OBJECT_TYPE);
+}
+
+
+bool Object::IsJSBuiltinsObject() {
+  return IsHeapObject() &&
+      (HeapObject::cast(this)->map()->instance_type() ==
+       JS_BUILTINS_OBJECT_TYPE);
+}
+
+
+bool Object::IsUndetectableObject() {
+  return IsHeapObject()
+    && HeapObject::cast(this)->map()->is_undetectable();
+}
+
+
+bool Object::IsAccessCheckNeeded() {
+  return IsHeapObject()
+    && HeapObject::cast(this)->map()->is_access_check_needed();
+}
+
+
+bool Object::IsStruct() {
+  if (!IsHeapObject()) return false;
+  switch (HeapObject::cast(this)->map()->instance_type()) {
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true;
+  STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+    default: return false;
+  }
+}
+
+
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name)                  \
+  bool Object::Is##Name() {                                      \
+    return Object::IsHeapObject()                                \
+      && HeapObject::cast(this)->map()->instance_type() == NAME##_TYPE; \
+  }
+  STRUCT_LIST(MAKE_STRUCT_PREDICATE)
+#undef MAKE_STRUCT_PREDICATE
+
+
+bool Object::IsUndefined() {
+  return this == Heap::undefined_value();
+}
+
+
+bool Object::IsTheHole() {
+  return this == Heap::the_hole_value();
+}
+
+
+bool Object::IsNull() {
+  return this == Heap::null_value();
+}
+
+
+bool Object::IsTrue() {
+  return this == Heap::true_value();
+}
+
+
+bool Object::IsFalse() {
+  return this == Heap::false_value();
+}
+
+
+double Object::Number() {
+  ASSERT(IsNumber());
+  return IsSmi()
+    ? static_cast<double>(reinterpret_cast<Smi*>(this)->value())
+    : reinterpret_cast<HeapNumber*>(this)->value();
+}
+
+
+
+Object* Object::ToSmi() {
+  if (IsSmi()) return this;
+  if (IsHeapNumber()) {
+    double value = HeapNumber::cast(this)->value();
+    int int_value = FastD2I(value);
+    if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
+      return Smi::FromInt(int_value);
+    }
+  }
+  return Failure::Exception();
+}
+
+
+bool Object::HasSpecificClassOf(String* name) {
+  return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
+}
+
+
+Object* Object::GetElement(uint32_t index) {
+  return GetElementWithReceiver(this, index);
+}
+
+
+Object* Object::GetProperty(String* key) {
+  PropertyAttributes attributes;
+  return GetPropertyWithReceiver(this, key, &attributes);
+}
+
+
+Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
+  return GetPropertyWithReceiver(this, key, attributes);
+}
+
+
+#define FIELD_ADDR(p, offset) \
+  (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
+
+#define READ_FIELD(p, offset) \
+  (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)))
+
+#define WRITE_FIELD(p, offset, value) \
+  (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
+
+
+#define WRITE_BARRIER(object, offset) \
+  Heap::RecordWrite(object->address(), offset);
+
+// CONDITIONAL_WRITE_BARRIER must be issued after the actual
+// write due to the assert validating the written value.
+#define CONDITIONAL_WRITE_BARRIER(object, offset, mode) \
+  if (mode == UPDATE_WRITE_BARRIER) { \
+    Heap::RecordWrite(object->address(), offset); \
+  } else { \
+    ASSERT(mode == SKIP_WRITE_BARRIER); \
+    ASSERT(Heap::InNewSpace(object) || \
+           !Heap::InNewSpace(READ_FIELD(object, offset))); \
+  }
+
+#define READ_DOUBLE_FIELD(p, offset) \
+  (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_DOUBLE_FIELD(p, offset, value) \
+  (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INT_FIELD(p, offset) \
+  (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INT_FIELD(p, offset, value) \
+  (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INTPTR_FIELD(p, offset) \
+  (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INTPTR_FIELD(p, offset, value) \
+  (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_UINT32_FIELD(p, offset) \
+  (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_UINT32_FIELD(p, offset, value) \
+  (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_SHORT_FIELD(p, offset) \
+  (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_SHORT_FIELD(p, offset, value) \
+  (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_BYTE_FIELD(p, offset) \
+  (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_BYTE_FIELD(p, offset, value) \
+  (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
+
+
+Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
+  return &READ_FIELD(obj, byte_offset);
+}
+
+
+int Smi::value() {
+  return Internals::SmiValue(this);
+}
+
+
+Smi* Smi::FromInt(int value) {
+  ASSERT(Smi::IsValid(value));
+  intptr_t tagged_value =
+      (static_cast<intptr_t>(value) << kSmiTagSize) | kSmiTag;
+  return reinterpret_cast<Smi*>(tagged_value);
+}
+
+
+Smi* Smi::FromIntptr(intptr_t value) {
+  ASSERT(Smi::IsValid(value));
+  return reinterpret_cast<Smi*>((value << kSmiTagSize) | kSmiTag);
+}
+
+
+Failure::Type Failure::type() const {
+  return static_cast<Type>(value() & kFailureTypeTagMask);
+}
+
+
+bool Failure::IsInternalError() const {
+  return type() == INTERNAL_ERROR;
+}
+
+
+bool Failure::IsOutOfMemoryException() const {
+  return type() == OUT_OF_MEMORY_EXCEPTION;
+}
+
+
+int Failure::requested() const {
+  const int kShiftBits =
+      kFailureTypeTagSize + kSpaceTagSize - kObjectAlignmentBits;
+  STATIC_ASSERT(kShiftBits >= 0);
+  ASSERT(type() == RETRY_AFTER_GC);
+  return value() >> kShiftBits;
+}
+
+
+AllocationSpace Failure::allocation_space() const {
+  ASSERT_EQ(RETRY_AFTER_GC, type());
+  return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize)
+                                      & kSpaceTagMask);
+}
+
+
+Failure* Failure::InternalError() {
+  return Construct(INTERNAL_ERROR);
+}
+
+
+Failure* Failure::Exception() {
+  return Construct(EXCEPTION);
+}
+
+
+Failure* Failure::OutOfMemoryException() {
+  return Construct(OUT_OF_MEMORY_EXCEPTION);
+}
+
+
+int Failure::value() const {
+  return static_cast<int>(reinterpret_cast<intptr_t>(this) >> kFailureTagSize);
+}
+
+
+Failure* Failure::RetryAfterGC(int requested_bytes) {
+  // Assert that the space encoding fits in the three bytes allotted for it.
+  ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
+  int requested = requested_bytes >> kObjectAlignmentBits;
+  int value = (requested << kSpaceTagSize) | NEW_SPACE;
+  ASSERT(value >> kSpaceTagSize == requested);
+  ASSERT(Smi::IsValid(value));
+  ASSERT(value == ((value << kFailureTypeTagSize) >> kFailureTypeTagSize));
+  ASSERT(Smi::IsValid(value << kFailureTypeTagSize));
+  return Construct(RETRY_AFTER_GC, value);
+}
+
+
+Failure* Failure::Construct(Type type, int value) {
+  int info = (value << kFailureTypeTagSize) | type;
+  ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
+  return reinterpret_cast<Failure*>(
+      (static_cast<intptr_t>(info) << kFailureTagSize) | kFailureTag);
+}
+
+
+bool Smi::IsValid(intptr_t value) {
+#ifdef DEBUG
+  bool in_range = (value >= kMinValue) && (value <= kMaxValue);
+#endif
+  // To be representable as an tagged small integer, the two
+  // most-significant bits of 'value' must be either 00 or 11 due to
+  // sign-extension. To check this we add 01 to the two
+  // most-significant bits, and check if the most-significant bit is 0
+  //
+  // CAUTION: The original code below:
+  // bool result = ((value + 0x40000000) & 0x80000000) == 0;
+  // may lead to incorrect results according to the C language spec, and
+  // in fact doesn't work correctly with gcc4.1.1 in some cases: The
+  // compiler may produce undefined results in case of signed integer
+  // overflow. The computation must be done w/ unsigned ints.
+  bool result =
+      ((static_cast<unsigned int>(value) + 0x40000000U) & 0x80000000U) == 0;
+  ASSERT(result == in_range);
+  return result;
+}
+
+
+bool Smi::IsIntptrValid(intptr_t value) {
+#ifdef DEBUG
+  bool in_range = (value >= kMinValue) && (value <= kMaxValue);
+#endif
+  // See Smi::IsValid(int) for description.
+  bool result =
+      ((static_cast<uintptr_t>(value) + 0x40000000U) < 0x80000000U);
+  ASSERT(result == in_range);
+  return result;
+}
+
+
+MapWord MapWord::FromMap(Map* map) {
+  return MapWord(reinterpret_cast<uintptr_t>(map));
+}
+
+
+Map* MapWord::ToMap() {
+  return reinterpret_cast<Map*>(value_);
+}
+
+
+bool MapWord::IsForwardingAddress() {
+  return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
+}
+
+
+MapWord MapWord::FromForwardingAddress(HeapObject* object) {
+  Address raw = reinterpret_cast<Address>(object) - kHeapObjectTag;
+  return MapWord(reinterpret_cast<uintptr_t>(raw));
+}
+
+
+HeapObject* MapWord::ToForwardingAddress() {
+  ASSERT(IsForwardingAddress());
+  return HeapObject::FromAddress(reinterpret_cast<Address>(value_));
+}
+
+
+bool MapWord::IsMarked() {
+  return (value_ & kMarkingMask) == 0;
+}
+
+
+void MapWord::SetMark() {
+  value_ &= ~kMarkingMask;
+}
+
+
+void MapWord::ClearMark() {
+  value_ |= kMarkingMask;
+}
+
+
+bool MapWord::IsOverflowed() {
+  return (value_ & kOverflowMask) != 0;
+}
+
+
+void MapWord::SetOverflow() {
+  value_ |= kOverflowMask;
+}
+
+
+void MapWord::ClearOverflow() {
+  value_ &= ~kOverflowMask;
+}
+
+
+MapWord MapWord::EncodeAddress(Address map_address, int offset) {
+  // Offset is the distance in live bytes from the first live object in the
+  // same page. The offset between two objects in the same page should not
+  // exceed the object area size of a page.
+  ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
+
+  int compact_offset = offset >> kObjectAlignmentBits;
+  ASSERT(compact_offset < (1 << kForwardingOffsetBits));
+
+  Page* map_page = Page::FromAddress(map_address);
+  ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
+
+  int map_page_offset =
+      map_page->Offset(map_address) >> kObjectAlignmentBits;
+
+  uintptr_t encoding =
+      (compact_offset << kForwardingOffsetShift) |
+      (map_page_offset << kMapPageOffsetShift) |
+      (map_page->mc_page_index << kMapPageIndexShift);
+  return MapWord(encoding);
+}
+
+
+Address MapWord::DecodeMapAddress(MapSpace* map_space) {
+  int map_page_index =
+      static_cast<int>((value_ & kMapPageIndexMask) >> kMapPageIndexShift);
+  ASSERT_MAP_PAGE_INDEX(map_page_index);
+
+  int map_page_offset = static_cast<int>(
+      ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift)
+      << kObjectAlignmentBits);
+
+  return (map_space->PageAddress(map_page_index) + map_page_offset);
+}
+
+
+int MapWord::DecodeOffset() {
+  // The offset field is represented in the kForwardingOffsetBits
+  // most-significant bits.
+  int offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
+  ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
+  return offset;
+}
+
+
+MapWord MapWord::FromEncodedAddress(Address address) {
+  return MapWord(reinterpret_cast<uintptr_t>(address));
+}
+
+
+Address MapWord::ToEncodedAddress() {
+  return reinterpret_cast<Address>(value_);
+}
+
+
+#ifdef DEBUG
+void HeapObject::VerifyObjectField(int offset) {
+  VerifyPointer(READ_FIELD(this, offset));
+}
+#endif
+
+
+Map* HeapObject::map() {
+  return map_word().ToMap();
+}
+
+
+void HeapObject::set_map(Map* value) {
+  set_map_word(MapWord::FromMap(value));
+}
+
+
+MapWord HeapObject::map_word() {
+  return MapWord(reinterpret_cast<uintptr_t>(READ_FIELD(this, kMapOffset)));
+}
+
+
+void HeapObject::set_map_word(MapWord map_word) {
+  // WRITE_FIELD does not update the remembered set, but there is no need
+  // here.
+  WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
+}
+
+
+HeapObject* HeapObject::FromAddress(Address address) {
+  ASSERT_TAG_ALIGNED(address);
+  return reinterpret_cast<HeapObject*>(address + kHeapObjectTag);
+}
+
+
+Address HeapObject::address() {
+  return reinterpret_cast<Address>(this) - kHeapObjectTag;
+}
+
+
+int HeapObject::Size() {
+  return SizeFromMap(map());
+}
+
+
+void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) {
+  v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)),
+                   reinterpret_cast<Object**>(FIELD_ADDR(this, end)));
+}
+
+
+void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
+  v->VisitPointer(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
+}
+
+
+bool HeapObject::IsMarked() {
+  return map_word().IsMarked();
+}
+
+
+void HeapObject::SetMark() {
+  ASSERT(!IsMarked());
+  MapWord first_word = map_word();
+  first_word.SetMark();
+  set_map_word(first_word);
+}
+
+
+void HeapObject::ClearMark() {
+  ASSERT(IsMarked());
+  MapWord first_word = map_word();
+  first_word.ClearMark();
+  set_map_word(first_word);
+}
+
+
+bool HeapObject::IsOverflowed() {
+  return map_word().IsOverflowed();
+}
+
+
+void HeapObject::SetOverflow() {
+  MapWord first_word = map_word();
+  first_word.SetOverflow();
+  set_map_word(first_word);
+}
+
+
+void HeapObject::ClearOverflow() {
+  ASSERT(IsOverflowed());
+  MapWord first_word = map_word();
+  first_word.ClearOverflow();
+  set_map_word(first_word);
+}
+
+
+double HeapNumber::value() {
+  return READ_DOUBLE_FIELD(this, kValueOffset);
+}
+
+
+void HeapNumber::set_value(double value) {
+  WRITE_DOUBLE_FIELD(this, kValueOffset, value);
+}
+
+
+ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
+
+
+Array* JSObject::elements() {
+  Object* array = READ_FIELD(this, kElementsOffset);
+  // In the assert below Dictionary is covered under FixedArray.
+  ASSERT(array->IsFixedArray() || array->IsPixelArray());
+  return reinterpret_cast<Array*>(array);
+}
+
+
+void JSObject::set_elements(Array* value, WriteBarrierMode mode) {
+  // In the assert below Dictionary is covered under FixedArray.
+  ASSERT(value->IsFixedArray() || value->IsPixelArray());
+  WRITE_FIELD(this, kElementsOffset, value);
+  CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, mode);
+}
+
+
+void JSObject::initialize_properties() {
+  ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
+  WRITE_FIELD(this, kPropertiesOffset, Heap::empty_fixed_array());
+}
+
+
+void JSObject::initialize_elements() {
+  ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
+  WRITE_FIELD(this, kElementsOffset, Heap::empty_fixed_array());
+}
+
+
+ACCESSORS(Oddball, to_string, String, kToStringOffset)
+ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
+
+
+Object* JSGlobalPropertyCell::value() {
+  return READ_FIELD(this, kValueOffset);
+}
+
+
+void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
+  // The write barrier is not used for global property cells.
+  ASSERT(!val->IsJSGlobalPropertyCell());
+  WRITE_FIELD(this, kValueOffset, val);
+}
+
+
+int JSObject::GetHeaderSize() {
+  InstanceType type = map()->instance_type();
+  // Check for the most common kind of JavaScript object before
+  // falling into the generic switch. This speeds up the internal
+  // field operations considerably on average.
+  if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
+  switch (type) {
+    case JS_GLOBAL_PROXY_TYPE:
+      return JSGlobalProxy::kSize;
+    case JS_GLOBAL_OBJECT_TYPE:
+      return JSGlobalObject::kSize;
+    case JS_BUILTINS_OBJECT_TYPE:
+      return JSBuiltinsObject::kSize;
+    case JS_FUNCTION_TYPE:
+      return JSFunction::kSize;
+    case JS_VALUE_TYPE:
+      return JSValue::kSize;
+    case JS_ARRAY_TYPE:
+      return JSValue::kSize;
+    case JS_REGEXP_TYPE:
+      return JSValue::kSize;
+    case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+      return JSObject::kHeaderSize;
+    default:
+      UNREACHABLE();
+      return 0;
+  }
+}
+
+
+int JSObject::GetInternalFieldCount() {
+  ASSERT(1 << kPointerSizeLog2 == kPointerSize);
+  // Make sure to adjust for the number of in-object properties. These
+  // properties do contribute to the size, but are not internal fields.
+  return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
+         map()->inobject_properties();
+}
+
+
+Object* JSObject::GetInternalField(int index) {
+  ASSERT(index < GetInternalFieldCount() && index >= 0);
+  // Internal objects do follow immediately after the header, whereas in-object
+  // properties are at the end of the object. Therefore there is no need
+  // to adjust the index here.
+  return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index));
+}
+
+
+void JSObject::SetInternalField(int index, Object* value) {
+  ASSERT(index < GetInternalFieldCount() && index >= 0);
+  // Internal objects do follow immediately after the header, whereas in-object
+  // properties are at the end of the object. Therefore there is no need
+  // to adjust the index here.
+  int offset = GetHeaderSize() + (kPointerSize * index);
+  WRITE_FIELD(this, offset, value);
+  WRITE_BARRIER(this, offset);
+}
+
+
+// Access fast-case object properties at index. The use of these routines
+// is needed to correctly distinguish between properties stored in-object and
+// properties stored in the properties array.
+Object* JSObject::FastPropertyAt(int index) {
+  // Adjust for the number of properties stored in the object.
+  index -= map()->inobject_properties();
+  if (index < 0) {
+    int offset = map()->instance_size() + (index * kPointerSize);
+    return READ_FIELD(this, offset);
+  } else {
+    ASSERT(index < properties()->length());
+    return properties()->get(index);
+  }
+}
+
+
+Object* JSObject::FastPropertyAtPut(int index, Object* value) {
+  // Adjust for the number of properties stored in the object.
+  index -= map()->inobject_properties();
+  if (index < 0) {
+    int offset = map()->instance_size() + (index * kPointerSize);
+    WRITE_FIELD(this, offset, value);
+    WRITE_BARRIER(this, offset);
+  } else {
+    ASSERT(index < properties()->length());
+    properties()->set(index, value);
+  }
+  return value;
+}
+
+
+Object* JSObject::InObjectPropertyAt(int index) {
+  // Adjust for the number of properties stored in the object.
+  index -= map()->inobject_properties();
+  ASSERT(index < 0);
+  int offset = map()->instance_size() + (index * kPointerSize);
+  return READ_FIELD(this, offset);
+}
+
+
+Object* JSObject::InObjectPropertyAtPut(int index,
+                                        Object* value,
+                                        WriteBarrierMode mode) {
+  // Adjust for the number of properties stored in the object.
+  index -= map()->inobject_properties();
+  ASSERT(index < 0);
+  int offset = map()->instance_size() + (index * kPointerSize);
+  WRITE_FIELD(this, offset, value);
+  CONDITIONAL_WRITE_BARRIER(this, offset, mode);
+  return value;
+}
+
+
+
+void JSObject::InitializeBody(int object_size) {
+  Object* value = Heap::undefined_value();
+  for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
+    WRITE_FIELD(this, offset, value);
+  }
+}
+
+
+void Struct::InitializeBody(int object_size) {
+  Object* value = Heap::undefined_value();
+  for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
+    WRITE_FIELD(this, offset, value);
+  }
+}
+
+
+bool JSObject::HasFastProperties() {
+  return !properties()->IsDictionary();
+}
+
+
+bool Array::IndexFromObject(Object* object, uint32_t* index) {
+  if (object->IsSmi()) {
+    int value = Smi::cast(object)->value();
+    if (value < 0) return false;
+    *index = value;
+    return true;
+  }
+  if (object->IsHeapNumber()) {
+    double value = HeapNumber::cast(object)->value();
+    uint32_t uint_value = static_cast<uint32_t>(value);
+    if (value == static_cast<double>(uint_value)) {
+      *index = uint_value;
+      return true;
+    }
+  }
+  return false;
+}
+
+
+bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
+  if (!this->IsJSValue()) return false;
+
+  JSValue* js_value = JSValue::cast(this);
+  if (!js_value->value()->IsString()) return false;
+
+  String* str = String::cast(js_value->value());
+  if (index >= (uint32_t)str->length()) return false;
+
+  return true;
+}
+
+
+Object* FixedArray::get(int index) {
+  ASSERT(index >= 0 && index < this->length());
+  return READ_FIELD(this, kHeaderSize + index * kPointerSize);
+}
+
+
+void FixedArray::set(int index, Smi* value) {
+  ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
+  int offset = kHeaderSize + index * kPointerSize;
+  WRITE_FIELD(this, offset, value);
+}
+
+
+void FixedArray::set(int index, Object* value) {
+  ASSERT(index >= 0 && index < this->length());
+  int offset = kHeaderSize + index * kPointerSize;
+  WRITE_FIELD(this, offset, value);
+  WRITE_BARRIER(this, offset);
+}
+
+
+WriteBarrierMode HeapObject::GetWriteBarrierMode() {
+  if (Heap::InNewSpace(this)) return SKIP_WRITE_BARRIER;
+  return UPDATE_WRITE_BARRIER;
+}
+
+
+void FixedArray::set(int index,
+                     Object* value,
+                     WriteBarrierMode mode) {
+  ASSERT(index >= 0 && index < this->length());
+  int offset = kHeaderSize + index * kPointerSize;
+  WRITE_FIELD(this, offset, value);
+  CONDITIONAL_WRITE_BARRIER(this, offset, mode);
+}
+
+
+void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
+  ASSERT(index >= 0 && index < array->length());
+  WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
+}
+
+
+void FixedArray::set_undefined(int index) {
+  ASSERT(index >= 0 && index < this->length());
+  ASSERT(!Heap::InNewSpace(Heap::undefined_value()));
+  WRITE_FIELD(this, kHeaderSize + index * kPointerSize,
+              Heap::undefined_value());
+}
+
+
+void FixedArray::set_null(int index) {
+  ASSERT(index >= 0 && index < this->length());
+  ASSERT(!Heap::InNewSpace(Heap::null_value()));
+  WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::null_value());
+}
+
+
+void FixedArray::set_the_hole(int index) {
+  ASSERT(index >= 0 && index < this->length());
+  ASSERT(!Heap::InNewSpace(Heap::the_hole_value()));
+  WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::the_hole_value());
+}
+
+
+bool DescriptorArray::IsEmpty() {
+  ASSERT(this == Heap::empty_descriptor_array() ||
+         this->length() > 2);
+  return this == Heap::empty_descriptor_array();
+}
+
+
+void DescriptorArray::fast_swap(FixedArray* array, int first, int second) {
+  Object* tmp = array->get(first);
+  fast_set(array, first, array->get(second));
+  fast_set(array, second, tmp);
+}
+
+
+int DescriptorArray::Search(String* name) {
+  SLOW_ASSERT(IsSortedNoDuplicates());
+
+  // Check for empty descriptor array.
+  int nof = number_of_descriptors();
+  if (nof == 0) return kNotFound;
+
+  // Fast case: do linear search for small arrays.
+  const int kMaxElementsForLinearSearch = 8;
+  if (StringShape(name).IsSymbol() && nof < kMaxElementsForLinearSearch) {
+    return LinearSearch(name, nof);
+  }
+
+  // Slow case: perform binary search.
+  return BinarySearch(name, 0, nof - 1);
+}
+
+
+String* DescriptorArray::GetKey(int descriptor_number) {
+  ASSERT(descriptor_number < number_of_descriptors());
+  return String::cast(get(ToKeyIndex(descriptor_number)));
+}
+
+
+Object* DescriptorArray::GetValue(int descriptor_number) {
+  ASSERT(descriptor_number < number_of_descriptors());
+  return GetContentArray()->get(ToValueIndex(descriptor_number));
+}
+
+
+Smi* DescriptorArray::GetDetails(int descriptor_number) {
+  ASSERT(descriptor_number < number_of_descriptors());
+  return Smi::cast(GetContentArray()->get(ToDetailsIndex(descriptor_number)));
+}
+
+
+PropertyType DescriptorArray::GetType(int descriptor_number) {
+  ASSERT(descriptor_number < number_of_descriptors());
+  return PropertyDetails(GetDetails(descriptor_number)).type();
+}
+
+
+int DescriptorArray::GetFieldIndex(int descriptor_number) {
+  return Descriptor::IndexFromValue(GetValue(descriptor_number));
+}
+
+
+JSFunction* DescriptorArray::GetConstantFunction(int descriptor_number) {
+  return JSFunction::cast(GetValue(descriptor_number));
+}
+
+
+Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
+  ASSERT(GetType(descriptor_number) == CALLBACKS);
+  return GetValue(descriptor_number);
+}
+
+
+AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
+  ASSERT(GetType(descriptor_number) == CALLBACKS);
+  Proxy* p = Proxy::cast(GetCallbacksObject(descriptor_number));
+  return reinterpret_cast<AccessorDescriptor*>(p->proxy());
+}
+
+
+bool DescriptorArray::IsProperty(int descriptor_number) {
+  return GetType(descriptor_number) < FIRST_PHANTOM_PROPERTY_TYPE;
+}
+
+
+bool DescriptorArray::IsTransition(int descriptor_number) {
+  PropertyType t = GetType(descriptor_number);
+  return t == MAP_TRANSITION || t == CONSTANT_TRANSITION;
+}
+
+
+bool DescriptorArray::IsNullDescriptor(int descriptor_number) {
+  return GetType(descriptor_number) == NULL_DESCRIPTOR;
+}
+
+
+bool DescriptorArray::IsDontEnum(int descriptor_number) {
+  return PropertyDetails(GetDetails(descriptor_number)).IsDontEnum();
+}
+
+
+void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
+  desc->Init(GetKey(descriptor_number),
+             GetValue(descriptor_number),
+             GetDetails(descriptor_number));
+}
+
+
+void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
+  // Range check.
+  ASSERT(descriptor_number < number_of_descriptors());
+
+  // Make sure non of the elements in desc are in new space.
+  ASSERT(!Heap::InNewSpace(desc->GetKey()));
+  ASSERT(!Heap::InNewSpace(desc->GetValue()));
+
+  fast_set(this, ToKeyIndex(descriptor_number), desc->GetKey());
+  FixedArray* content_array = GetContentArray();
+  fast_set(content_array, ToValueIndex(descriptor_number), desc->GetValue());
+  fast_set(content_array, ToDetailsIndex(descriptor_number),
+           desc->GetDetails().AsSmi());
+}
+
+
+void DescriptorArray::CopyFrom(int index, DescriptorArray* src, int src_index) {
+  Descriptor desc;
+  src->Get(src_index, &desc);
+  Set(index, &desc);
+}
+
+
+void DescriptorArray::Swap(int first, int second) {
+  fast_swap(this, ToKeyIndex(first), ToKeyIndex(second));
+  FixedArray* content_array = GetContentArray();
+  fast_swap(content_array, ToValueIndex(first), ToValueIndex(second));
+  fast_swap(content_array, ToDetailsIndex(first),  ToDetailsIndex(second));
+}
+
+
+bool NumberDictionary::requires_slow_elements() {
+  Object* max_index_object = get(kMaxNumberKeyIndex);
+  if (!max_index_object->IsSmi()) return false;
+  return 0 !=
+      (Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask);
+}
+
+uint32_t NumberDictionary::max_number_key() {
+  ASSERT(!requires_slow_elements());
+  Object* max_index_object = get(kMaxNumberKeyIndex);
+  if (!max_index_object->IsSmi()) return 0;
+  uint32_t value = static_cast<uint32_t>(Smi::cast(max_index_object)->value());
+  return value >> kRequiresSlowElementsTagSize;
+}
+
+void NumberDictionary::set_requires_slow_elements() {
+  set(kMaxNumberKeyIndex,
+      Smi::FromInt(kRequiresSlowElementsMask),
+      SKIP_WRITE_BARRIER);
+}
+
+
+// ------------------------------------
+// Cast operations
+
+
+CAST_ACCESSOR(FixedArray)
+CAST_ACCESSOR(DescriptorArray)
+CAST_ACCESSOR(SymbolTable)
+CAST_ACCESSOR(CompilationCacheTable)
+CAST_ACCESSOR(MapCache)
+CAST_ACCESSOR(String)
+CAST_ACCESSOR(SeqString)
+CAST_ACCESSOR(SeqAsciiString)
+CAST_ACCESSOR(SeqTwoByteString)
+CAST_ACCESSOR(ConsString)
+CAST_ACCESSOR(SlicedString)
+CAST_ACCESSOR(ExternalString)
+CAST_ACCESSOR(ExternalAsciiString)
+CAST_ACCESSOR(ExternalTwoByteString)
+CAST_ACCESSOR(JSObject)
+CAST_ACCESSOR(Smi)
+CAST_ACCESSOR(Failure)
+CAST_ACCESSOR(HeapObject)
+CAST_ACCESSOR(HeapNumber)
+CAST_ACCESSOR(Oddball)
+CAST_ACCESSOR(JSGlobalPropertyCell)
+CAST_ACCESSOR(SharedFunctionInfo)
+CAST_ACCESSOR(Map)
+CAST_ACCESSOR(JSFunction)
+CAST_ACCESSOR(GlobalObject)
+CAST_ACCESSOR(JSGlobalProxy)
+CAST_ACCESSOR(JSGlobalObject)
+CAST_ACCESSOR(JSBuiltinsObject)
+CAST_ACCESSOR(Code)
+CAST_ACCESSOR(JSArray)
+CAST_ACCESSOR(JSRegExp)
+CAST_ACCESSOR(Proxy)
+CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(PixelArray)
+CAST_ACCESSOR(Struct)
+
+
+#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
+  STRUCT_LIST(MAKE_STRUCT_CAST)
+#undef MAKE_STRUCT_CAST
+
+
+template <typename Shape, typename Key>
+HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
+  ASSERT(obj->IsHashTable());
+  return reinterpret_cast<HashTable*>(obj);
+}
+
+
+INT_ACCESSORS(Array, length, kLengthOffset)
+
+
+bool String::Equals(String* other) {
+  if (other == this) return true;
+  if (StringShape(this).IsSymbol() && StringShape(other).IsSymbol()) {
+    return false;
+  }
+  return SlowEquals(other);
+}
+
+
+int String::length() {
+  uint32_t len = READ_INT_FIELD(this, kLengthOffset);
+
+  ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
+  ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
+  ASSERT(kLongStringTag == 0);
+
+  return len >> (StringShape(this).size_tag() + kLongLengthShift);
+}
+
+
+void String::set_length(int value) {
+  ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
+  ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
+  ASSERT(kLongStringTag == 0);
+
+  WRITE_INT_FIELD(this,
+                  kLengthOffset,
+                  value << (StringShape(this).size_tag() + kLongLengthShift));
+}
+
+
+uint32_t String::length_field() {
+  return READ_UINT32_FIELD(this, kLengthOffset);
+}
+
+
+void String::set_length_field(uint32_t value) {
+  WRITE_UINT32_FIELD(this, kLengthOffset, value);
+}
+
+
+Object* String::TryFlattenIfNotFlat() {
+  // We don't need to flatten strings that are already flat.  Since this code
+  // is inlined, it can be helpful in the flat case to not call out to Flatten.
+  if (!IsFlat()) {
+    return TryFlatten();
+  }
+  return this;
+}
+
+
+uint16_t String::Get(int index) {
+  ASSERT(index >= 0 && index < length());
+  switch (StringShape(this).full_representation_tag()) {
+    case kSeqStringTag | kAsciiStringTag:
+      return SeqAsciiString::cast(this)->SeqAsciiStringGet(index);
+    case kSeqStringTag | kTwoByteStringTag:
+      return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
+    case kConsStringTag | kAsciiStringTag:
+    case kConsStringTag | kTwoByteStringTag:
+      return ConsString::cast(this)->ConsStringGet(index);
+    case kSlicedStringTag | kAsciiStringTag:
+    case kSlicedStringTag | kTwoByteStringTag:
+      return SlicedString::cast(this)->SlicedStringGet(index);
+    case kExternalStringTag | kAsciiStringTag:
+      return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
+    case kExternalStringTag | kTwoByteStringTag:
+      return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
+    default:
+      break;
+  }
+
+  UNREACHABLE();
+  return 0;
+}
+
+
+void String::Set(int index, uint16_t value) {
+  ASSERT(index >= 0 && index < length());
+  ASSERT(StringShape(this).IsSequential());
+
+  return this->IsAsciiRepresentation()
+      ? SeqAsciiString::cast(this)->SeqAsciiStringSet(index, value)
+      : SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value);
+}
+
+
+bool String::IsFlat() {
+  switch (StringShape(this).representation_tag()) {
+    case kConsStringTag: {
+      String* second = ConsString::cast(this)->second();
+      // Only flattened strings have second part empty.
+      return second->length() == 0;
+    }
+    case kSlicedStringTag: {
+      StringRepresentationTag tag =
+          StringShape(SlicedString::cast(this)->buffer()).representation_tag();
+      return tag == kSeqStringTag || tag == kExternalStringTag;
+    }
+    default:
+      return true;
+  }
+}
+
+
+uint16_t SeqAsciiString::SeqAsciiStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
+}
+
+
+void SeqAsciiString::SeqAsciiStringSet(int index, uint16_t value) {
+  ASSERT(index >= 0 && index < length() && value <= kMaxAsciiCharCode);
+  WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
+                   static_cast<byte>(value));
+}
+
+
+Address SeqAsciiString::GetCharsAddress() {
+  return FIELD_ADDR(this, kHeaderSize);
+}
+
+
+char* SeqAsciiString::GetChars() {
+  return reinterpret_cast<char*>(GetCharsAddress());
+}
+
+
+Address SeqTwoByteString::GetCharsAddress() {
+  return FIELD_ADDR(this, kHeaderSize);
+}
+
+
+uc16* SeqTwoByteString::GetChars() {
+  return reinterpret_cast<uc16*>(FIELD_ADDR(this, kHeaderSize));
+}
+
+
+uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return READ_SHORT_FIELD(this, kHeaderSize + index * kShortSize);
+}
+
+
+void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
+  ASSERT(index >= 0 && index < length());
+  WRITE_SHORT_FIELD(this, kHeaderSize + index * kShortSize, value);
+}
+
+
+int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
+  uint32_t length = READ_INT_FIELD(this, kLengthOffset);
+
+  ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
+  ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
+  ASSERT(kLongStringTag == 0);
+
+  // Use the map (and not 'this') to compute the size tag, since
+  // TwoByteStringSize is called during GC when maps are encoded.
+  length >>= StringShape(instance_type).size_tag() + kLongLengthShift;
+
+  return SizeFor(length);
+}
+
+
+int SeqAsciiString::SeqAsciiStringSize(InstanceType instance_type) {
+  uint32_t length = READ_INT_FIELD(this, kLengthOffset);
+
+  ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
+  ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
+  ASSERT(kLongStringTag == 0);
+
+  // Use the map (and not 'this') to compute the size tag, since
+  // AsciiStringSize is called during GC when maps are encoded.
+  length >>= StringShape(instance_type).size_tag() + kLongLengthShift;
+
+  return SizeFor(length);
+}
+
+
+String* ConsString::first() {
+  return String::cast(READ_FIELD(this, kFirstOffset));
+}
+
+
+Object* ConsString::unchecked_first() {
+  return READ_FIELD(this, kFirstOffset);
+}
+
+
+void ConsString::set_first(String* value, WriteBarrierMode mode) {
+  WRITE_FIELD(this, kFirstOffset, value);
+  CONDITIONAL_WRITE_BARRIER(this, kFirstOffset, mode);
+}
+
+
+String* ConsString::second() {
+  return String::cast(READ_FIELD(this, kSecondOffset));
+}
+
+
+Object* ConsString::unchecked_second() {
+  return READ_FIELD(this, kSecondOffset);
+}
+
+
+void ConsString::set_second(String* value, WriteBarrierMode mode) {
+  WRITE_FIELD(this, kSecondOffset, value);
+  CONDITIONAL_WRITE_BARRIER(this, kSecondOffset, mode);
+}
+
+
+String* SlicedString::buffer() {
+  return String::cast(READ_FIELD(this, kBufferOffset));
+}
+
+
+void SlicedString::set_buffer(String* buffer) {
+  WRITE_FIELD(this, kBufferOffset, buffer);
+  WRITE_BARRIER(this, kBufferOffset);
+}
+
+
+int SlicedString::start() {
+  return READ_INT_FIELD(this, kStartOffset);
+}
+
+
+void SlicedString::set_start(int start) {
+  WRITE_INT_FIELD(this, kStartOffset, start);
+}
+
+
+ExternalAsciiString::Resource* ExternalAsciiString::resource() {
+  return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
+}
+
+
+void ExternalAsciiString::set_resource(
+    ExternalAsciiString::Resource* resource) {
+  *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+}
+
+
+Map* ExternalAsciiString::StringMap(int length) {
+  Map* map;
+  // Number of characters: determines the map.
+  if (length <= String::kMaxShortStringSize) {
+    map = Heap::short_external_ascii_string_map();
+  } else if (length <= String::kMaxMediumStringSize) {
+    map = Heap::medium_external_ascii_string_map();
+  } else {
+    map = Heap::long_external_ascii_string_map();
+  }
+  return map;
+}
+
+
+Map* ExternalAsciiString::SymbolMap(int length) {
+  Map* map;
+  // Number of characters: determines the map.
+  if (length <= String::kMaxShortStringSize) {
+    map = Heap::short_external_ascii_symbol_map();
+  } else if (length <= String::kMaxMediumStringSize) {
+    map = Heap::medium_external_ascii_symbol_map();
+  } else {
+    map = Heap::long_external_ascii_symbol_map();
+  }
+  return map;
+}
+
+
+ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
+  return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
+}
+
+
+void ExternalTwoByteString::set_resource(
+    ExternalTwoByteString::Resource* resource) {
+  *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+}
+
+
+Map* ExternalTwoByteString::StringMap(int length) {
+  Map* map;
+  // Number of characters: determines the map.
+  if (length <= String::kMaxShortStringSize) {
+    map = Heap::short_external_string_map();
+  } else if (length <= String::kMaxMediumStringSize) {
+    map = Heap::medium_external_string_map();
+  } else {
+    map = Heap::long_external_string_map();
+  }
+  return map;
+}
+
+
+Map* ExternalTwoByteString::SymbolMap(int length) {
+  Map* map;
+  // Number of characters: determines the map.
+  if (length <= String::kMaxShortStringSize) {
+    map = Heap::short_external_symbol_map();
+  } else if (length <= String::kMaxMediumStringSize) {
+    map = Heap::medium_external_symbol_map();
+  } else {
+    map = Heap::long_external_symbol_map();
+  }
+  return map;
+}
+
+
+byte ByteArray::get(int index) {
+  ASSERT(index >= 0 && index < this->length());
+  return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
+}
+
+
+void ByteArray::set(int index, byte value) {
+  ASSERT(index >= 0 && index < this->length());
+  WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
+}
+
+
+int ByteArray::get_int(int index) {
+  ASSERT(index >= 0 && (index * kIntSize) < this->length());
+  return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
+}
+
+
+ByteArray* ByteArray::FromDataStartAddress(Address address) {
+  ASSERT_TAG_ALIGNED(address);
+  return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
+}
+
+
+Address ByteArray::GetDataStartAddress() {
+  return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
+}
+
+
+uint8_t* PixelArray::external_pointer() {
+  intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
+  return reinterpret_cast<uint8_t*>(ptr);
+}
+
+
+void PixelArray::set_external_pointer(uint8_t* value, WriteBarrierMode mode) {
+  intptr_t ptr = reinterpret_cast<intptr_t>(value);
+  WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
+}
+
+
+uint8_t PixelArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint8_t* ptr = external_pointer();
+  return ptr[index];
+}
+
+
+void PixelArray::set(int index, uint8_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint8_t* ptr = external_pointer();
+  ptr[index] = value;
+}
+
+
+int Map::instance_size() {
+  return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
+}
+
+
+int Map::inobject_properties() {
+  return READ_BYTE_FIELD(this, kInObjectPropertiesOffset);
+}
+
+
+int Map::pre_allocated_property_fields() {
+  return READ_BYTE_FIELD(this, kPreAllocatedPropertyFieldsOffset);
+}
+
+
+int HeapObject::SizeFromMap(Map* map) {
+  InstanceType instance_type = map->instance_type();
+  // Only inline the most frequent cases.
+  if (instance_type == JS_OBJECT_TYPE ||
+      (instance_type & (kIsNotStringMask | kStringRepresentationMask)) ==
+      (kStringTag | kConsStringTag) ||
+      instance_type == JS_ARRAY_TYPE) return map->instance_size();
+  if (instance_type == FIXED_ARRAY_TYPE) {
+    return reinterpret_cast<FixedArray*>(this)->FixedArraySize();
+  }
+  if (instance_type == BYTE_ARRAY_TYPE) {
+    return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
+  }
+  // Otherwise do the general size computation.
+  return SlowSizeFromMap(map);
+}
+
+
+void Map::set_instance_size(int value) {
+  ASSERT_EQ(0, value & (kPointerSize - 1));
+  value >>= kPointerSizeLog2;
+  ASSERT(0 <= value && value < 256);
+  WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value));
+}
+
+
+void Map::set_inobject_properties(int value) {
+  ASSERT(0 <= value && value < 256);
+  WRITE_BYTE_FIELD(this, kInObjectPropertiesOffset, static_cast<byte>(value));
+}
+
+
+void Map::set_pre_allocated_property_fields(int value) {
+  ASSERT(0 <= value && value < 256);
+  WRITE_BYTE_FIELD(this,
+                   kPreAllocatedPropertyFieldsOffset,
+                   static_cast<byte>(value));
+}
+
+
+InstanceType Map::instance_type() {
+  return static_cast<InstanceType>(READ_BYTE_FIELD(this, kInstanceTypeOffset));
+}
+
+
+void Map::set_instance_type(InstanceType value) {
+  ASSERT(0 <= value && value < 256);
+  WRITE_BYTE_FIELD(this, kInstanceTypeOffset, value);
+}
+
+
+int Map::unused_property_fields() {
+  return READ_BYTE_FIELD(this, kUnusedPropertyFieldsOffset);
+}
+
+
+void Map::set_unused_property_fields(int value) {
+  WRITE_BYTE_FIELD(this, kUnusedPropertyFieldsOffset, Min(value, 255));
+}
+
+
+byte Map::bit_field() {
+  return READ_BYTE_FIELD(this, kBitFieldOffset);
+}
+
+
+void Map::set_bit_field(byte value) {
+  WRITE_BYTE_FIELD(this, kBitFieldOffset, value);
+}
+
+
+byte Map::bit_field2() {
+  return READ_BYTE_FIELD(this, kBitField2Offset);
+}
+
+
+void Map::set_bit_field2(byte value) {
+  WRITE_BYTE_FIELD(this, kBitField2Offset, value);
+}
+
+
+void Map::set_non_instance_prototype(bool value) {
+  if (value) {
+    set_bit_field(bit_field() | (1 << kHasNonInstancePrototype));
+  } else {
+    set_bit_field(bit_field() & ~(1 << kHasNonInstancePrototype));
+  }
+}
+
+
+bool Map::has_non_instance_prototype() {
+  return ((1 << kHasNonInstancePrototype) & bit_field()) != 0;
+}
+
+
+void Map::set_is_access_check_needed(bool access_check_needed) {
+  if (access_check_needed) {
+    set_bit_field(bit_field() | (1 << kIsAccessCheckNeeded));
+  } else {
+    set_bit_field(bit_field() & ~(1 << kIsAccessCheckNeeded));
+  }
+}
+
+
+bool Map::is_access_check_needed() {
+  return ((1 << kIsAccessCheckNeeded) & bit_field()) != 0;
+}
+
+
+Code::Flags Code::flags() {
+  return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
+}
+
+
+void Code::set_flags(Code::Flags flags) {
+  STATIC_ASSERT(Code::NUMBER_OF_KINDS <= (kFlagsKindMask >> kFlagsKindShift)+1);
+  // Make sure that all call stubs have an arguments count.
+  ASSERT(ExtractKindFromFlags(flags) != CALL_IC ||
+         ExtractArgumentsCountFromFlags(flags) >= 0);
+  WRITE_INT_FIELD(this, kFlagsOffset, flags);
+}
+
+
+Code::Kind Code::kind() {
+  return ExtractKindFromFlags(flags());
+}
+
+
+InLoopFlag Code::ic_in_loop() {
+  return ExtractICInLoopFromFlags(flags());
+}
+
+
+InlineCacheState Code::ic_state() {
+  InlineCacheState result = ExtractICStateFromFlags(flags());
+  // Only allow uninitialized or debugger states for non-IC code
+  // objects. This is used in the debugger to determine whether or not
+  // a call to code object has been replaced with a debug break call.
+  ASSERT(is_inline_cache_stub() ||
+         result == UNINITIALIZED ||
+         result == DEBUG_BREAK ||
+         result == DEBUG_PREPARE_STEP_IN);
+  return result;
+}
+
+
+PropertyType Code::type() {
+  ASSERT(ic_state() == MONOMORPHIC);
+  return ExtractTypeFromFlags(flags());
+}
+
+
+int Code::arguments_count() {
+  ASSERT(is_call_stub() || kind() == STUB);
+  return ExtractArgumentsCountFromFlags(flags());
+}
+
+
+CodeStub::Major Code::major_key() {
+  ASSERT(kind() == STUB);
+  return static_cast<CodeStub::Major>(READ_BYTE_FIELD(this,
+                                                      kStubMajorKeyOffset));
+}
+
+
+void Code::set_major_key(CodeStub::Major major) {
+  ASSERT(kind() == STUB);
+  ASSERT(0 <= major && major < 256);
+  WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
+}
+
+
+bool Code::is_inline_cache_stub() {
+  Kind kind = this->kind();
+  return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
+}
+
+
+Code::Flags Code::ComputeFlags(Kind kind,
+                               InLoopFlag in_loop,
+                               InlineCacheState ic_state,
+                               PropertyType type,
+                               int argc) {
+  // Compute the bit mask.
+  int bits = kind << kFlagsKindShift;
+  if (in_loop) bits |= kFlagsICInLoopMask;
+  bits |= ic_state << kFlagsICStateShift;
+  bits |= type << kFlagsTypeShift;
+  bits |= argc << kFlagsArgumentsCountShift;
+  // Cast to flags and validate result before returning it.
+  Flags result = static_cast<Flags>(bits);
+  ASSERT(ExtractKindFromFlags(result) == kind);
+  ASSERT(ExtractICStateFromFlags(result) == ic_state);
+  ASSERT(ExtractICInLoopFromFlags(result) == in_loop);
+  ASSERT(ExtractTypeFromFlags(result) == type);
+  ASSERT(ExtractArgumentsCountFromFlags(result) == argc);
+  return result;
+}
+
+
+Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
+                                          PropertyType type,
+                                          InLoopFlag in_loop,
+                                          int argc) {
+  return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc);
+}
+
+
+Code::Kind Code::ExtractKindFromFlags(Flags flags) {
+  int bits = (flags & kFlagsKindMask) >> kFlagsKindShift;
+  return static_cast<Kind>(bits);
+}
+
+
+InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
+  int bits = (flags & kFlagsICStateMask) >> kFlagsICStateShift;
+  return static_cast<InlineCacheState>(bits);
+}
+
+
+InLoopFlag Code::ExtractICInLoopFromFlags(Flags flags) {
+  int bits = (flags & kFlagsICInLoopMask);
+  return bits != 0 ? IN_LOOP : NOT_IN_LOOP;
+}
+
+
+PropertyType Code::ExtractTypeFromFlags(Flags flags) {
+  int bits = (flags & kFlagsTypeMask) >> kFlagsTypeShift;
+  return static_cast<PropertyType>(bits);
+}
+
+
+int Code::ExtractArgumentsCountFromFlags(Flags flags) {
+  return (flags & kFlagsArgumentsCountMask) >> kFlagsArgumentsCountShift;
+}
+
+
+Code::Flags Code::RemoveTypeFromFlags(Flags flags) {
+  int bits = flags & ~kFlagsTypeMask;
+  return static_cast<Flags>(bits);
+}
+
+
+Code* Code::GetCodeFromTargetAddress(Address address) {
+  HeapObject* code = HeapObject::FromAddress(address - Code::kHeaderSize);
+  // GetCodeFromTargetAddress might be called when marking objects during mark
+  // sweep. reinterpret_cast is therefore used instead of the more appropriate
+  // Code::cast. Code::cast does not work when the object's map is
+  // marked.
+  Code* result = reinterpret_cast<Code*>(code);
+  return result;
+}
+
+
+Object* Map::prototype() {
+  return READ_FIELD(this, kPrototypeOffset);
+}
+
+
+void Map::set_prototype(Object* value, WriteBarrierMode mode) {
+  ASSERT(value->IsNull() || value->IsJSObject());
+  WRITE_FIELD(this, kPrototypeOffset, value);
+  CONDITIONAL_WRITE_BARRIER(this, kPrototypeOffset, mode);
+}
+
+
+ACCESSORS(Map, instance_descriptors, DescriptorArray,
+          kInstanceDescriptorsOffset)
+ACCESSORS(Map, code_cache, FixedArray, kCodeCacheOffset)
+ACCESSORS(Map, constructor, Object, kConstructorOffset)
+
+ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
+ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
+
+ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
+ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
+ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset)
+
+ACCESSORS(JSGlobalProxy, context, Object, kContextOffset)
+
+ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
+ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
+ACCESSORS(AccessorInfo, data, Object, kDataOffset)
+ACCESSORS(AccessorInfo, name, Object, kNameOffset)
+ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
+
+ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
+ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
+ACCESSORS(AccessCheckInfo, data, Object, kDataOffset)
+
+ACCESSORS(InterceptorInfo, getter, Object, kGetterOffset)
+ACCESSORS(InterceptorInfo, setter, Object, kSetterOffset)
+ACCESSORS(InterceptorInfo, query, Object, kQueryOffset)
+ACCESSORS(InterceptorInfo, deleter, Object, kDeleterOffset)
+ACCESSORS(InterceptorInfo, enumerator, Object, kEnumeratorOffset)
+ACCESSORS(InterceptorInfo, data, Object, kDataOffset)
+
+ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
+ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
+
+ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
+ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
+
+ACCESSORS(FunctionTemplateInfo, serial_number, Object, kSerialNumberOffset)
+ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
+ACCESSORS(FunctionTemplateInfo, property_accessors, Object,
+          kPropertyAccessorsOffset)
+ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
+          kPrototypeTemplateOffset)
+ACCESSORS(FunctionTemplateInfo, parent_template, Object, kParentTemplateOffset)
+ACCESSORS(FunctionTemplateInfo, named_property_handler, Object,
+          kNamedPropertyHandlerOffset)
+ACCESSORS(FunctionTemplateInfo, indexed_property_handler, Object,
+          kIndexedPropertyHandlerOffset)
+ACCESSORS(FunctionTemplateInfo, instance_template, Object,
+          kInstanceTemplateOffset)
+ACCESSORS(FunctionTemplateInfo, class_name, Object, kClassNameOffset)
+ACCESSORS(FunctionTemplateInfo, signature, Object, kSignatureOffset)
+ACCESSORS(FunctionTemplateInfo, instance_call_handler, Object,
+          kInstanceCallHandlerOffset)
+ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
+          kAccessCheckInfoOffset)
+ACCESSORS(FunctionTemplateInfo, flag, Smi, kFlagOffset)
+
+ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
+ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
+          kInternalFieldCountOffset)
+
+ACCESSORS(SignatureInfo, receiver, Object, kReceiverOffset)
+ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
+
+ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
+
+ACCESSORS(Script, source, Object, kSourceOffset)
+ACCESSORS(Script, name, Object, kNameOffset)
+ACCESSORS(Script, id, Object, kIdOffset)
+ACCESSORS(Script, line_offset, Smi, kLineOffsetOffset)
+ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
+ACCESSORS(Script, data, Object, kDataOffset)
+ACCESSORS(Script, context_data, Object, kContextOffset)
+ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
+ACCESSORS(Script, type, Smi, kTypeOffset)
+ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
+ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
+ACCESSORS(Script, eval_from_function, Object, kEvalFromFunctionOffset)
+ACCESSORS(Script, eval_from_instructions_offset, Smi,
+          kEvalFrominstructionsOffsetOffset)
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
+ACCESSORS(DebugInfo, original_code, Code, kOriginalCodeIndex)
+ACCESSORS(DebugInfo, code, Code, kPatchedCodeIndex)
+ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
+
+ACCESSORS(BreakPointInfo, code_position, Smi, kCodePositionIndex)
+ACCESSORS(BreakPointInfo, source_position, Smi, kSourcePositionIndex)
+ACCESSORS(BreakPointInfo, statement_position, Smi, kStatementPositionIndex)
+ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
+#endif
+
+ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
+ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
+          kInstanceClassNameOffset)
+ACCESSORS(SharedFunctionInfo, function_data, Object,
+          kExternalReferenceDataOffset)
+ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
+ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
+ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
+ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
+          kThisPropertyAssignmentsOffset)
+
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
+               kHiddenPrototypeBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
+               kNeedsAccessCheckBit)
+BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
+               kIsExpressionBit)
+BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
+               kIsTopLevelBit)
+BOOL_GETTER(SharedFunctionInfo, compiler_hints,
+            has_only_this_property_assignments,
+            kHasOnlyThisPropertyAssignments)
+BOOL_GETTER(SharedFunctionInfo, compiler_hints,
+            has_only_simple_this_property_assignments,
+            kHasOnlySimpleThisPropertyAssignments)
+
+
+INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
+INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
+              kFormalParameterCountOffset)
+INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
+              kExpectedNofPropertiesOffset)
+INT_ACCESSORS(SharedFunctionInfo, start_position_and_type,
+              kStartPositionAndTypeOffset)
+INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
+INT_ACCESSORS(SharedFunctionInfo, function_token_position,
+              kFunctionTokenPositionOffset)
+INT_ACCESSORS(SharedFunctionInfo, compiler_hints,
+              kCompilerHintsOffset)
+INT_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
+              kThisPropertyAssignmentsCountOffset)
+
+
+void SharedFunctionInfo::DontAdaptArguments() {
+  ASSERT(code()->kind() == Code::BUILTIN);
+  set_formal_parameter_count(kDontAdaptArgumentsSentinel);
+}
+
+
+int SharedFunctionInfo::start_position() {
+  return start_position_and_type() >> kStartPositionShift;
+}
+
+
+void SharedFunctionInfo::set_start_position(int start_position) {
+  set_start_position_and_type((start_position << kStartPositionShift)
+    | (start_position_and_type() & ~kStartPositionMask));
+}
+
+
+Code* SharedFunctionInfo::code() {
+  return Code::cast(READ_FIELD(this, kCodeOffset));
+}
+
+
+void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
+  WRITE_FIELD(this, kCodeOffset, value);
+  CONDITIONAL_WRITE_BARRIER(this, kCodeOffset, mode);
+}
+
+
+bool SharedFunctionInfo::is_compiled() {
+  // TODO(1242782): Create a code kind for uncompiled code.
+  return code()->kind() != Code::STUB;
+}
+
+
+bool JSFunction::IsBoilerplate() {
+  return map() == Heap::boilerplate_function_map();
+}
+
+
+bool JSFunction::IsBuiltin() {
+  return context()->global()->IsJSBuiltinsObject();
+}
+
+
+bool JSObject::IsLoaded() {
+  return !map()->needs_loading();
+}
+
+
+Code* JSFunction::code() {
+  return shared()->code();
+}
+
+
+void JSFunction::set_code(Code* value) {
+  shared()->set_code(value);
+}
+
+
+Context* JSFunction::context() {
+  return Context::cast(READ_FIELD(this, kContextOffset));
+}
+
+
+Object* JSFunction::unchecked_context() {
+  return READ_FIELD(this, kContextOffset);
+}
+
+
+void JSFunction::set_context(Object* value) {
+  ASSERT(value == Heap::undefined_value() || value->IsContext());
+  WRITE_FIELD(this, kContextOffset, value);
+  WRITE_BARRIER(this, kContextOffset);
+}
+
+ACCESSORS(JSFunction, prototype_or_initial_map, Object,
+          kPrototypeOrInitialMapOffset)
+
+
+Map* JSFunction::initial_map() {
+  return Map::cast(prototype_or_initial_map());
+}
+
+
+void JSFunction::set_initial_map(Map* value) {
+  set_prototype_or_initial_map(value);
+}
+
+
+bool JSFunction::has_initial_map() {
+  return prototype_or_initial_map()->IsMap();
+}
+
+
+bool JSFunction::has_instance_prototype() {
+  return has_initial_map() || !prototype_or_initial_map()->IsTheHole();
+}
+
+
+bool JSFunction::has_prototype() {
+  return map()->has_non_instance_prototype() || has_instance_prototype();
+}
+
+
+Object* JSFunction::instance_prototype() {
+  ASSERT(has_instance_prototype());
+  if (has_initial_map()) return initial_map()->prototype();
+  // When there is no initial map and the prototype is a JSObject, the
+  // initial map field is used for the prototype field.
+  return prototype_or_initial_map();
+}
+
+
+Object* JSFunction::prototype() {
+  ASSERT(has_prototype());
+  // If the function's prototype property has been set to a non-JSObject
+  // value, that value is stored in the constructor field of the map.
+  if (map()->has_non_instance_prototype()) return map()->constructor();
+  return instance_prototype();
+}
+
+
+bool JSFunction::is_compiled() {
+  return shared()->is_compiled();
+}
+
+
+int JSFunction::NumberOfLiterals() {
+  return literals()->length();
+}
+
+
+Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) {
+  ASSERT(0 <= id && id < kJSBuiltinsCount);
+  return READ_FIELD(this, kJSBuiltinsOffset + (id * kPointerSize));
+}
+
+
+void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
+                                              Object* value) {
+  ASSERT(0 <= id && id < kJSBuiltinsCount);
+  WRITE_FIELD(this, kJSBuiltinsOffset + (id * kPointerSize), value);
+  WRITE_BARRIER(this, kJSBuiltinsOffset + (id * kPointerSize));
+}
+
+
+Address Proxy::proxy() {
+  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kProxyOffset));
+}
+
+
+void Proxy::set_proxy(Address value) {
+  WRITE_INTPTR_FIELD(this, kProxyOffset, OffsetFrom(value));
+}
+
+
+void Proxy::ProxyIterateBody(ObjectVisitor* visitor) {
+  visitor->VisitExternalReference(
+      reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
+}
+
+
+ACCESSORS(JSValue, value, Object, kValueOffset)
+
+
+JSValue* JSValue::cast(Object* obj) {
+  ASSERT(obj->IsJSValue());
+  ASSERT(HeapObject::cast(obj)->Size() == JSValue::kSize);
+  return reinterpret_cast<JSValue*>(obj);
+}
+
+
+INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
+INT_ACCESSORS(Code, relocation_size, kRelocationSizeOffset)
+INT_ACCESSORS(Code, sinfo_size, kSInfoSizeOffset)
+
+
+byte* Code::instruction_start()  {
+  return FIELD_ADDR(this, kHeaderSize);
+}
+
+
+int Code::body_size() {
+  return RoundUp(instruction_size() + relocation_size(), kObjectAlignment);
+}
+
+
+byte* Code::relocation_start() {
+  return FIELD_ADDR(this, kHeaderSize + instruction_size());
+}
+
+
+byte* Code::entry() {
+  return instruction_start();
+}
+
+
+bool Code::contains(byte* pc) {
+  return (instruction_start() <= pc) &&
+      (pc < instruction_start() + instruction_size());
+}
+
+
+byte* Code::sinfo_start() {
+  return FIELD_ADDR(this, kHeaderSize + body_size());
+}
+
+
+ACCESSORS(JSArray, length, Object, kLengthOffset)
+
+
+ACCESSORS(JSRegExp, data, Object, kDataOffset)
+
+
+JSRegExp::Type JSRegExp::TypeTag() {
+  Object* data = this->data();
+  if (data->IsUndefined()) return JSRegExp::NOT_COMPILED;
+  Smi* smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
+  return static_cast<JSRegExp::Type>(smi->value());
+}
+
+
+int JSRegExp::CaptureCount() {
+  switch (TypeTag()) {
+    case ATOM:
+      return 0;
+    case IRREGEXP:
+      return Smi::cast(DataAt(kIrregexpCaptureCountIndex))->value();
+    default:
+      UNREACHABLE();
+      return -1;
+  }
+}
+
+
+JSRegExp::Flags JSRegExp::GetFlags() {
+  ASSERT(this->data()->IsFixedArray());
+  Object* data = this->data();
+  Smi* smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex));
+  return Flags(smi->value());
+}
+
+
+String* JSRegExp::Pattern() {
+  ASSERT(this->data()->IsFixedArray());
+  Object* data = this->data();
+  String* pattern= String::cast(FixedArray::cast(data)->get(kSourceIndex));
+  return pattern;
+}
+
+
+Object* JSRegExp::DataAt(int index) {
+  ASSERT(TypeTag() != NOT_COMPILED);
+  return FixedArray::cast(data())->get(index);
+}
+
+
+void JSRegExp::SetDataAt(int index, Object* value) {
+  ASSERT(TypeTag() != NOT_COMPILED);
+  ASSERT(index >= kDataIndex);  // Only implementation data can be set this way.
+  FixedArray::cast(data())->set(index, value);
+}
+
+
+JSObject::ElementsKind JSObject::GetElementsKind() {
+  Array* array = elements();
+  if (array->IsFixedArray()) {
+    // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
+    if (array->map() == Heap::fixed_array_map()) {
+      return FAST_ELEMENTS;
+    }
+    ASSERT(array->IsDictionary());
+    return DICTIONARY_ELEMENTS;
+  }
+  ASSERT(array->IsPixelArray());
+  return PIXEL_ELEMENTS;
+}
+
+
+bool JSObject::HasFastElements() {
+  return GetElementsKind() == FAST_ELEMENTS;
+}
+
+
+bool JSObject::HasDictionaryElements() {
+  return GetElementsKind() == DICTIONARY_ELEMENTS;
+}
+
+
+bool JSObject::HasPixelElements() {
+  return GetElementsKind() == PIXEL_ELEMENTS;
+}
+
+
+bool JSObject::HasNamedInterceptor() {
+  return map()->has_named_interceptor();
+}
+
+
+bool JSObject::HasIndexedInterceptor() {
+  return map()->has_indexed_interceptor();
+}
+
+
+StringDictionary* JSObject::property_dictionary() {
+  ASSERT(!HasFastProperties());
+  return StringDictionary::cast(properties());
+}
+
+
+NumberDictionary* JSObject::element_dictionary() {
+  ASSERT(HasDictionaryElements());
+  return NumberDictionary::cast(elements());
+}
+
+
+bool String::HasHashCode() {
+  return (length_field() & kHashComputedMask) != 0;
+}
+
+
+uint32_t String::Hash() {
+  // Fast case: has hash code already been computed?
+  uint32_t field = length_field();
+  if (field & kHashComputedMask) return field >> kHashShift;
+  // Slow case: compute hash code and set it.
+  return ComputeAndSetHash();
+}
+
+
+StringHasher::StringHasher(int length)
+  : length_(length),
+    raw_running_hash_(0),
+    array_index_(0),
+    is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
+    is_first_char_(true),
+    is_valid_(true) { }
+
+
+bool StringHasher::has_trivial_hash() {
+  return length_ > String::kMaxMediumStringSize;
+}
+
+
+void StringHasher::AddCharacter(uc32 c) {
+  // Use the Jenkins one-at-a-time hash function to update the hash
+  // for the given character.
+  raw_running_hash_ += c;
+  raw_running_hash_ += (raw_running_hash_ << 10);
+  raw_running_hash_ ^= (raw_running_hash_ >> 6);
+  // Incremental array index computation.
+  if (is_array_index_) {
+    if (c < '0' || c > '9') {
+      is_array_index_ = false;
+    } else {
+      int d = c - '0';
+      if (is_first_char_) {
+        is_first_char_ = false;
+        if (c == '0' && length_ > 1) {
+          is_array_index_ = false;
+          return;
+        }
+      }
+      if (array_index_ > 429496729U - ((d + 2) >> 3)) {
+        is_array_index_ = false;
+      } else {
+        array_index_ = array_index_ * 10 + d;
+      }
+    }
+  }
+}
+
+
+void StringHasher::AddCharacterNoIndex(uc32 c) {
+  ASSERT(!is_array_index());
+  raw_running_hash_ += c;
+  raw_running_hash_ += (raw_running_hash_ << 10);
+  raw_running_hash_ ^= (raw_running_hash_ >> 6);
+}
+
+
+uint32_t StringHasher::GetHash() {
+  // Get the calculated raw hash value and do some more bit ops to distribute
+  // the hash further. Ensure that we never return zero as the hash value.
+  uint32_t result = raw_running_hash_;
+  result += (result << 3);
+  result ^= (result >> 11);
+  result += (result << 15);
+  if (result == 0) {
+    result = 27;
+  }
+  return result;
+}
+
+
+bool String::AsArrayIndex(uint32_t* index) {
+  uint32_t field = length_field();
+  if ((field & kHashComputedMask) && !(field & kIsArrayIndexMask)) return false;
+  return SlowAsArrayIndex(index);
+}
+
+
+Object* JSObject::GetPrototype() {
+  return JSObject::cast(this)->map()->prototype();
+}
+
+
+PropertyAttributes JSObject::GetPropertyAttribute(String* key) {
+  return GetPropertyAttributeWithReceiver(this, key);
+}
+
+
+bool JSObject::HasElement(uint32_t index) {
+  return HasElementWithReceiver(this, index);
+}
+
+
+bool AccessorInfo::all_can_read() {
+  return BooleanBit::get(flag(), kAllCanReadBit);
+}
+
+
+void AccessorInfo::set_all_can_read(bool value) {
+  set_flag(BooleanBit::set(flag(), kAllCanReadBit, value));
+}
+
+
+bool AccessorInfo::all_can_write() {
+  return BooleanBit::get(flag(), kAllCanWriteBit);
+}
+
+
+void AccessorInfo::set_all_can_write(bool value) {
+  set_flag(BooleanBit::set(flag(), kAllCanWriteBit, value));
+}
+
+
+bool AccessorInfo::prohibits_overwriting() {
+  return BooleanBit::get(flag(), kProhibitsOverwritingBit);
+}
+
+
+void AccessorInfo::set_prohibits_overwriting(bool value) {
+  set_flag(BooleanBit::set(flag(), kProhibitsOverwritingBit, value));
+}
+
+
+PropertyAttributes AccessorInfo::property_attributes() {
+  return AttributesField::decode(static_cast<uint32_t>(flag()->value()));
+}
+
+
+void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
+  ASSERT(AttributesField::is_valid(attributes));
+  int rest_value = flag()->value() & ~AttributesField::mask();
+  set_flag(Smi::FromInt(rest_value | AttributesField::encode(attributes)));
+}
+
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::SetEntry(int entry,
+                                      Object* key,
+                                      Object* value,
+                                      PropertyDetails details) {
+  ASSERT(!key->IsString() || details.IsDeleted() || details.index() > 0);
+  int index = HashTable<Shape, Key>::EntryToIndex(entry);
+  WriteBarrierMode mode = FixedArray::GetWriteBarrierMode();
+  FixedArray::set(index, key, mode);
+  FixedArray::set(index+1, value, mode);
+  FixedArray::fast_set(this, index+2, details.AsSmi());
+}
+
+
+void Map::ClearCodeCache() {
+  // No write barrier is needed since empty_fixed_array is not in new space.
+  // Please note this function is used during marking:
+  //  - MarkCompactCollector::MarkUnmarkedObject
+  ASSERT(!Heap::InNewSpace(Heap::raw_unchecked_empty_fixed_array()));
+  WRITE_FIELD(this, kCodeCacheOffset, Heap::raw_unchecked_empty_fixed_array());
+}
+
+
+void JSArray::EnsureSize(int required_size) {
+  ASSERT(HasFastElements());
+  if (elements()->length() >= required_size) return;
+  Expand(required_size);
+}
+
+
+void JSArray::SetContent(FixedArray* storage) {
+  set_length(Smi::FromInt(storage->length()), SKIP_WRITE_BARRIER);
+  set_elements(storage);
+}
+
+
+Object* FixedArray::Copy() {
+  if (length() == 0) return this;
+  return Heap::CopyFixedArray(this);
+}
+
+
+#undef CAST_ACCESSOR
+#undef INT_ACCESSORS
+#undef SMI_ACCESSORS
+#undef ACCESSORS
+#undef FIELD_ADDR
+#undef READ_FIELD
+#undef WRITE_FIELD
+#undef WRITE_BARRIER
+#undef CONDITIONAL_WRITE_BARRIER
+#undef READ_MEMADDR_FIELD
+#undef WRITE_MEMADDR_FIELD
+#undef READ_DOUBLE_FIELD
+#undef WRITE_DOUBLE_FIELD
+#undef READ_INT_FIELD
+#undef WRITE_INT_FIELD
+#undef READ_SHORT_FIELD
+#undef WRITE_SHORT_FIELD
+#undef READ_BYTE_FIELD
+#undef WRITE_BYTE_FIELD
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_OBJECTS_INL_H_
diff --git a/src/objects.cc b/src/objects.cc
new file mode 100644
index 0000000..e2fa3b5
--- /dev/null
+++ b/src/objects.cc
@@ -0,0 +1,7982 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "arguments.h"
+#include "bootstrapper.h"
+#include "debug.h"
+#include "execution.h"
+#include "objects-inl.h"
+#include "macro-assembler.h"
+#include "scanner.h"
+#include "scopeinfo.h"
+#include "string-stream.h"
+
+#ifdef ENABLE_DISASSEMBLER
+#include "disassembler.h"
+#endif
+
+
+namespace v8 {
+namespace internal {
+
+// Getters and setters are stored in a fixed array property.  These are
+// constants for their indices.
+const int kGetterIndex = 0;
+const int kSetterIndex = 1;
+
+
+static Object* CreateJSValue(JSFunction* constructor, Object* value) {
+  Object* result = Heap::AllocateJSObject(constructor);
+  if (result->IsFailure()) return result;
+  JSValue::cast(result)->set_value(value);
+  return result;
+}
+
+
+Object* Object::ToObject(Context* global_context) {
+  if (IsNumber()) {
+    return CreateJSValue(global_context->number_function(), this);
+  } else if (IsBoolean()) {
+    return CreateJSValue(global_context->boolean_function(), this);
+  } else if (IsString()) {
+    return CreateJSValue(global_context->string_function(), this);
+  }
+  ASSERT(IsJSObject());
+  return this;
+}
+
+
+Object* Object::ToObject() {
+  Context* global_context = Top::context()->global_context();
+  if (IsJSObject()) {
+    return this;
+  } else if (IsNumber()) {
+    return CreateJSValue(global_context->number_function(), this);
+  } else if (IsBoolean()) {
+    return CreateJSValue(global_context->boolean_function(), this);
+  } else if (IsString()) {
+    return CreateJSValue(global_context->string_function(), this);
+  }
+
+  // Throw a type error.
+  return Failure::InternalError();
+}
+
+
+Object* Object::ToBoolean() {
+  if (IsTrue()) return Heap::true_value();
+  if (IsFalse()) return Heap::false_value();
+  if (IsSmi()) {
+    return Heap::ToBoolean(Smi::cast(this)->value() != 0);
+  }
+  if (IsUndefined() || IsNull()) return Heap::false_value();
+  // Undetectable object is false
+  if (IsUndetectableObject()) {
+    return Heap::false_value();
+  }
+  if (IsString()) {
+    return Heap::ToBoolean(String::cast(this)->length() != 0);
+  }
+  if (IsHeapNumber()) {
+    return HeapNumber::cast(this)->HeapNumberToBoolean();
+  }
+  return Heap::true_value();
+}
+
+
+void Object::Lookup(String* name, LookupResult* result) {
+  if (IsJSObject()) return JSObject::cast(this)->Lookup(name, result);
+  Object* holder = NULL;
+  Context* global_context = Top::context()->global_context();
+  if (IsString()) {
+    holder = global_context->string_function()->instance_prototype();
+  } else if (IsNumber()) {
+    holder = global_context->number_function()->instance_prototype();
+  } else if (IsBoolean()) {
+    holder = global_context->boolean_function()->instance_prototype();
+  }
+  ASSERT(holder != NULL);  // Cannot handle null or undefined.
+  JSObject::cast(holder)->Lookup(name, result);
+}
+
+
+Object* Object::GetPropertyWithReceiver(Object* receiver,
+                                        String* name,
+                                        PropertyAttributes* attributes) {
+  LookupResult result;
+  Lookup(name, &result);
+  Object* value = GetProperty(receiver, &result, name, attributes);
+  ASSERT(*attributes <= ABSENT);
+  return value;
+}
+
+
+Object* Object::GetPropertyWithCallback(Object* receiver,
+                                        Object* structure,
+                                        String* name,
+                                        Object* holder) {
+  // To accommodate both the old and the new api we switch on the
+  // data structure used to store the callbacks.  Eventually proxy
+  // callbacks should be phased out.
+  if (structure->IsProxy()) {
+    AccessorDescriptor* callback =
+        reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
+    Object* value = (callback->getter)(receiver, callback->data);
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    return value;
+  }
+
+  // api style callbacks.
+  if (structure->IsAccessorInfo()) {
+    AccessorInfo* data = AccessorInfo::cast(structure);
+    Object* fun_obj = data->getter();
+    v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+    HandleScope scope;
+    JSObject* self = JSObject::cast(receiver);
+    JSObject* holder_handle = JSObject::cast(holder);
+    Handle<String> key(name);
+    LOG(ApiNamedPropertyAccess("load", self, name));
+    CustomArguments args(data->data(), self, holder_handle);
+    v8::AccessorInfo info(args.end());
+    v8::Handle<v8::Value> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = call_fun(v8::Utils::ToLocal(key), info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (result.IsEmpty()) return Heap::undefined_value();
+    return *v8::Utils::OpenHandle(*result);
+  }
+
+  // __defineGetter__ callback
+  if (structure->IsFixedArray()) {
+    Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
+    if (getter->IsJSFunction()) {
+      return Object::GetPropertyWithDefinedGetter(receiver,
+                                                  JSFunction::cast(getter));
+    }
+    // Getter is not a function.
+    return Heap::undefined_value();
+  }
+
+  UNREACHABLE();
+  return 0;
+}
+
+
+Object* Object::GetPropertyWithDefinedGetter(Object* receiver,
+                                             JSFunction* getter) {
+  HandleScope scope;
+  Handle<JSFunction> fun(JSFunction::cast(getter));
+  Handle<Object> self(receiver);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Handle stepping into a getter if step into is active.
+  if (Debug::StepInActive()) {
+    Debug::HandleStepIn(fun, Handle<Object>::null(), 0, false);
+  }
+#endif
+  bool has_pending_exception;
+  Handle<Object> result =
+      Execution::Call(fun, self, 0, NULL, &has_pending_exception);
+  // Check for pending exception and return the result.
+  if (has_pending_exception) return Failure::Exception();
+  return *result;
+}
+
+
+// Only deal with CALLBACKS and INTERCEPTOR
+Object* JSObject::GetPropertyWithFailedAccessCheck(
+    Object* receiver,
+    LookupResult* result,
+    String* name,
+    PropertyAttributes* attributes) {
+  if (result->IsValid()) {
+    switch (result->type()) {
+      case CALLBACKS: {
+        // Only allow API accessors.
+        Object* obj = result->GetCallbackObject();
+        if (obj->IsAccessorInfo()) {
+          AccessorInfo* info = AccessorInfo::cast(obj);
+          if (info->all_can_read()) {
+            *attributes = result->GetAttributes();
+            return GetPropertyWithCallback(receiver,
+                                           result->GetCallbackObject(),
+                                           name,
+                                           result->holder());
+          }
+        }
+        break;
+      }
+      case NORMAL:
+      case FIELD:
+      case CONSTANT_FUNCTION: {
+        // Search ALL_CAN_READ accessors in prototype chain.
+        LookupResult r;
+        result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
+        if (r.IsValid()) {
+          return GetPropertyWithFailedAccessCheck(receiver,
+                                                  &r,
+                                                  name,
+                                                  attributes);
+        }
+        break;
+      }
+      case INTERCEPTOR: {
+        // If the object has an interceptor, try real named properties.
+        // No access check in GetPropertyAttributeWithInterceptor.
+        LookupResult r;
+        result->holder()->LookupRealNamedProperty(name, &r);
+        if (r.IsValid()) {
+          return GetPropertyWithFailedAccessCheck(receiver,
+                                                  &r,
+                                                  name,
+                                                  attributes);
+        }
+      }
+      default: {
+        break;
+      }
+    }
+  }
+
+  // No accessible property found.
+  *attributes = ABSENT;
+  Top::ReportFailedAccessCheck(this, v8::ACCESS_GET);
+  return Heap::undefined_value();
+}
+
+
+PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
+    Object* receiver,
+    LookupResult* result,
+    String* name,
+    bool continue_search) {
+  if (result->IsValid()) {
+    switch (result->type()) {
+      case CALLBACKS: {
+        // Only allow API accessors.
+        Object* obj = result->GetCallbackObject();
+        if (obj->IsAccessorInfo()) {
+          AccessorInfo* info = AccessorInfo::cast(obj);
+          if (info->all_can_read()) {
+            return result->GetAttributes();
+          }
+        }
+        break;
+      }
+
+      case NORMAL:
+      case FIELD:
+      case CONSTANT_FUNCTION: {
+        if (!continue_search) break;
+        // Search ALL_CAN_READ accessors in prototype chain.
+        LookupResult r;
+        result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
+        if (r.IsValid()) {
+          return GetPropertyAttributeWithFailedAccessCheck(receiver,
+                                                           &r,
+                                                           name,
+                                                           continue_search);
+        }
+        break;
+      }
+
+      case INTERCEPTOR: {
+        // If the object has an interceptor, try real named properties.
+        // No access check in GetPropertyAttributeWithInterceptor.
+        LookupResult r;
+        if (continue_search) {
+          result->holder()->LookupRealNamedProperty(name, &r);
+        } else {
+          result->holder()->LocalLookupRealNamedProperty(name, &r);
+        }
+        if (r.IsValid()) {
+          return GetPropertyAttributeWithFailedAccessCheck(receiver,
+                                                           &r,
+                                                           name,
+                                                           continue_search);
+        }
+        break;
+      }
+
+      default: {
+        break;
+      }
+    }
+  }
+
+  Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+  return ABSENT;
+}
+
+
+Object* JSObject::GetLazyProperty(Object* receiver,
+                                  LookupResult* result,
+                                  String* name,
+                                  PropertyAttributes* attributes) {
+  HandleScope scope;
+  Handle<Object> this_handle(this);
+  Handle<Object> receiver_handle(receiver);
+  Handle<String> name_handle(name);
+  bool pending_exception;
+  LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())),
+           &pending_exception);
+  if (pending_exception) return Failure::Exception();
+  return this_handle->GetPropertyWithReceiver(*receiver_handle,
+                                              *name_handle,
+                                              attributes);
+}
+
+
+Object* JSObject::SetLazyProperty(LookupResult* result,
+                                  String* name,
+                                  Object* value,
+                                  PropertyAttributes attributes) {
+  ASSERT(!IsJSGlobalProxy());
+  HandleScope scope;
+  Handle<JSObject> this_handle(this);
+  Handle<String> name_handle(name);
+  Handle<Object> value_handle(value);
+  bool pending_exception;
+  LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())),
+           &pending_exception);
+  if (pending_exception) return Failure::Exception();
+  return this_handle->SetProperty(*name_handle, *value_handle, attributes);
+}
+
+
+Object* JSObject::DeleteLazyProperty(LookupResult* result,
+                                     String* name,
+                                     DeleteMode mode) {
+  HandleScope scope;
+  Handle<JSObject> this_handle(this);
+  Handle<String> name_handle(name);
+  bool pending_exception;
+  LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())),
+           &pending_exception);
+  if (pending_exception) return Failure::Exception();
+  return this_handle->DeleteProperty(*name_handle, mode);
+}
+
+
+Object* JSObject::GetNormalizedProperty(LookupResult* result) {
+  ASSERT(!HasFastProperties());
+  Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
+  if (IsGlobalObject()) {
+    value = JSGlobalPropertyCell::cast(value)->value();
+  }
+  ASSERT(!value->IsJSGlobalPropertyCell());
+  return value;
+}
+
+
+Object* JSObject::SetNormalizedProperty(LookupResult* result, Object* value) {
+  ASSERT(!HasFastProperties());
+  if (IsGlobalObject()) {
+    JSGlobalPropertyCell* cell =
+        JSGlobalPropertyCell::cast(
+            property_dictionary()->ValueAt(result->GetDictionaryEntry()));
+    cell->set_value(value);
+  } else {
+    property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
+  }
+  return value;
+}
+
+
+Object* JSObject::SetNormalizedProperty(String* name,
+                                        Object* value,
+                                        PropertyDetails details) {
+  ASSERT(!HasFastProperties());
+  int entry = property_dictionary()->FindEntry(name);
+  if (entry == StringDictionary::kNotFound) {
+    Object* store_value = value;
+    if (IsGlobalObject()) {
+      store_value = Heap::AllocateJSGlobalPropertyCell(value);
+      if (store_value->IsFailure()) return store_value;
+    }
+    Object* dict = property_dictionary()->Add(name, store_value, details);
+    if (dict->IsFailure()) return dict;
+    set_properties(StringDictionary::cast(dict));
+    return value;
+  }
+  // Preserve enumeration index.
+  details = PropertyDetails(details.attributes(),
+                            details.type(),
+                            property_dictionary()->DetailsAt(entry).index());
+  if (IsGlobalObject()) {
+    JSGlobalPropertyCell* cell =
+        JSGlobalPropertyCell::cast(property_dictionary()->ValueAt(entry));
+    cell->set_value(value);
+    // Please note we have to update the property details.
+    property_dictionary()->DetailsAtPut(entry, details);
+  } else {
+    property_dictionary()->SetEntry(entry, name, value, details);
+  }
+  return value;
+}
+
+
+Object* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
+  ASSERT(!HasFastProperties());
+  StringDictionary* dictionary = property_dictionary();
+  int entry = dictionary->FindEntry(name);
+  if (entry != StringDictionary::kNotFound) {
+    // If we have a global object set the cell to the hole.
+    if (IsGlobalObject()) {
+      PropertyDetails details = dictionary->DetailsAt(entry);
+      if (details.IsDontDelete()) {
+        if (mode != FORCE_DELETION) return Heap::false_value();
+        // When forced to delete global properties, we have to make a
+        // map change to invalidate any ICs that think they can load
+        // from the DontDelete cell without checking if it contains
+        // the hole value.
+        Object* new_map = map()->CopyDropDescriptors();
+        if (new_map->IsFailure()) return new_map;
+        set_map(Map::cast(new_map));
+      }
+      JSGlobalPropertyCell* cell =
+          JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
+      cell->set_value(Heap::the_hole_value());
+      dictionary->DetailsAtPut(entry, details.AsDeleted());
+    } else {
+      return dictionary->DeleteProperty(entry, mode);
+    }
+  }
+  return Heap::true_value();
+}
+
+
+bool JSObject::IsDirty() {
+  Object* cons_obj = map()->constructor();
+  if (!cons_obj->IsJSFunction())
+    return true;
+  JSFunction* fun = JSFunction::cast(cons_obj);
+  if (!fun->shared()->function_data()->IsFunctionTemplateInfo())
+    return true;
+  // If the object is fully fast case and has the same map it was
+  // created with then no changes can have been made to it.
+  return map() != fun->initial_map()
+      || !HasFastElements()
+      || !HasFastProperties();
+}
+
+
+Object* Object::GetProperty(Object* receiver,
+                            LookupResult* result,
+                            String* name,
+                            PropertyAttributes* attributes) {
+  // Make sure that the top context does not change when doing
+  // callbacks or interceptor calls.
+  AssertNoContextChange ncc;
+
+  // Traverse the prototype chain from the current object (this) to
+  // the holder and check for access rights. This avoid traversing the
+  // objects more than once in case of interceptors, because the
+  // holder will always be the interceptor holder and the search may
+  // only continue with a current object just after the interceptor
+  // holder in the prototype chain.
+  Object* last = result->IsValid() ? result->holder() : Heap::null_value();
+  for (Object* current = this; true; current = current->GetPrototype()) {
+    if (current->IsAccessCheckNeeded()) {
+      // Check if we're allowed to read from the current object. Note
+      // that even though we may not actually end up loading the named
+      // property from the current object, we still check that we have
+      // access to it.
+      JSObject* checked = JSObject::cast(current);
+      if (!Top::MayNamedAccess(checked, name, v8::ACCESS_GET)) {
+        return checked->GetPropertyWithFailedAccessCheck(receiver,
+                                                         result,
+                                                         name,
+                                                         attributes);
+      }
+    }
+    // Stop traversing the chain once we reach the last object in the
+    // chain; either the holder of the result or null in case of an
+    // absent property.
+    if (current == last) break;
+  }
+
+  if (!result->IsProperty()) {
+    *attributes = ABSENT;
+    return Heap::undefined_value();
+  }
+  *attributes = result->GetAttributes();
+  if (!result->IsLoaded()) {
+    return JSObject::cast(this)->GetLazyProperty(receiver,
+                                                 result,
+                                                 name,
+                                                 attributes);
+  }
+  Object* value;
+  JSObject* holder = result->holder();
+  switch (result->type()) {
+    case NORMAL:
+      value = holder->GetNormalizedProperty(result);
+      ASSERT(!value->IsTheHole() || result->IsReadOnly());
+      return value->IsTheHole() ? Heap::undefined_value() : value;
+    case FIELD:
+      value = holder->FastPropertyAt(result->GetFieldIndex());
+      ASSERT(!value->IsTheHole() || result->IsReadOnly());
+      return value->IsTheHole() ? Heap::undefined_value() : value;
+    case CONSTANT_FUNCTION:
+      return result->GetConstantFunction();
+    case CALLBACKS:
+      return GetPropertyWithCallback(receiver,
+                                     result->GetCallbackObject(),
+                                     name,
+                                     holder);
+    case INTERCEPTOR: {
+      JSObject* recvr = JSObject::cast(receiver);
+      return holder->GetPropertyWithInterceptor(recvr, name, attributes);
+    }
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+Object* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
+  // Non-JS objects do not have integer indexed properties.
+  if (!IsJSObject()) return Heap::undefined_value();
+  return JSObject::cast(this)->GetElementWithReceiver(JSObject::cast(receiver),
+                                                      index);
+}
+
+
+Object* Object::GetPrototype() {
+  // The object is either a number, a string, a boolean, or a real JS object.
+  if (IsJSObject()) return JSObject::cast(this)->map()->prototype();
+  Context* context = Top::context()->global_context();
+
+  if (IsNumber()) return context->number_function()->instance_prototype();
+  if (IsString()) return context->string_function()->instance_prototype();
+  if (IsBoolean()) {
+    return context->boolean_function()->instance_prototype();
+  } else {
+    return Heap::null_value();
+  }
+}
+
+
+void Object::ShortPrint() {
+  HeapStringAllocator allocator;
+  StringStream accumulator(&allocator);
+  ShortPrint(&accumulator);
+  accumulator.OutputToStdOut();
+}
+
+
+void Object::ShortPrint(StringStream* accumulator) {
+  if (IsSmi()) {
+    Smi::cast(this)->SmiPrint(accumulator);
+  } else if (IsFailure()) {
+    Failure::cast(this)->FailurePrint(accumulator);
+  } else {
+    HeapObject::cast(this)->HeapObjectShortPrint(accumulator);
+  }
+}
+
+
+void Smi::SmiPrint() {
+  PrintF("%d", value());
+}
+
+
+void Smi::SmiPrint(StringStream* accumulator) {
+  accumulator->Add("%d", value());
+}
+
+
+void Failure::FailurePrint(StringStream* accumulator) {
+  accumulator->Add("Failure(%d)", value());
+}
+
+
+void Failure::FailurePrint() {
+  PrintF("Failure(%d)", value());
+}
+
+
+Failure* Failure::RetryAfterGC(int requested_bytes, AllocationSpace space) {
+  ASSERT((space & ~kSpaceTagMask) == 0);
+  // TODO(X64): Stop using Smi validation for non-smi checks, even if they
+  // happen to be identical at the moment.
+
+  int requested = requested_bytes >> kObjectAlignmentBits;
+  int value = (requested << kSpaceTagSize) | space;
+  // We can't very well allocate a heap number in this situation, and if the
+  // requested memory is so large it seems reasonable to say that this is an
+  // out of memory situation.  This fixes a crash in
+  // js1_5/Regress/regress-303213.js.
+  if (value >> kSpaceTagSize != requested ||
+      !Smi::IsValid(value) ||
+      value != ((value << kFailureTypeTagSize) >> kFailureTypeTagSize) ||
+      !Smi::IsValid(value << kFailureTypeTagSize)) {
+    Top::context()->mark_out_of_memory();
+    return Failure::OutOfMemoryException();
+  }
+  return Construct(RETRY_AFTER_GC, value);
+}
+
+
+// Should a word be prefixed by 'a' or 'an' in order to read naturally in
+// English?  Returns false for non-ASCII or words that don't start with
+// a capital letter.  The a/an rule follows pronunciation in English.
+// We don't use the BBC's overcorrect "an historic occasion" though if
+// you speak a dialect you may well say "an 'istoric occasion".
+static bool AnWord(String* str) {
+  if (str->length() == 0) return false;  // A nothing.
+  int c0 = str->Get(0);
+  int c1 = str->length() > 1 ? str->Get(1) : 0;
+  if (c0 == 'U') {
+    if (c1 > 'Z') {
+      return true;  // An Umpire, but a UTF8String, a U.
+    }
+  } else if (c0 == 'A' || c0 == 'E' || c0 == 'I' || c0 == 'O') {
+    return true;    // An Ape, an ABCBook.
+  } else if ((c1 == 0 || (c1 >= 'A' && c1 <= 'Z')) &&
+           (c0 == 'F' || c0 == 'H' || c0 == 'M' || c0 == 'N' || c0 == 'R' ||
+            c0 == 'S' || c0 == 'X')) {
+    return true;    // An MP3File, an M.
+  }
+  return false;
+}
+
+
+Object* String::TryFlatten() {
+#ifdef DEBUG
+  // Do not attempt to flatten in debug mode when allocation is not
+  // allowed.  This is to avoid an assertion failure when allocating.
+  // Flattening strings is the only case where we always allow
+  // allocation because no GC is performed if the allocation fails.
+  if (!Heap::IsAllocationAllowed()) return this;
+#endif
+
+  switch (StringShape(this).representation_tag()) {
+    case kSlicedStringTag: {
+      SlicedString* ss = SlicedString::cast(this);
+      // The SlicedString constructor should ensure that there are no
+      // SlicedStrings that are constructed directly on top of other
+      // SlicedStrings.
+      String* buf = ss->buffer();
+      ASSERT(!buf->IsSlicedString());
+      Object* ok = buf->TryFlatten();
+      if (ok->IsFailure()) return ok;
+      // Under certain circumstances (TryFlattenIfNotFlat fails in
+      // String::Slice) we can have a cons string under a slice.
+      // In this case we need to get the flat string out of the cons!
+      if (StringShape(String::cast(ok)).IsCons()) {
+        ss->set_buffer(ConsString::cast(ok)->first());
+      }
+      return this;
+    }
+    case kConsStringTag: {
+      ConsString* cs = ConsString::cast(this);
+      if (cs->second()->length() == 0) {
+        return this;
+      }
+      // There's little point in putting the flat string in new space if the
+      // cons string is in old space.  It can never get GCed until there is
+      // an old space GC.
+      PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED : TENURED;
+      int len = length();
+      Object* object;
+      String* result;
+      if (IsAsciiRepresentation()) {
+        object = Heap::AllocateRawAsciiString(len, tenure);
+        if (object->IsFailure()) return object;
+        result = String::cast(object);
+        String* first = cs->first();
+        int first_length = first->length();
+        char* dest = SeqAsciiString::cast(result)->GetChars();
+        WriteToFlat(first, dest, 0, first_length);
+        String* second = cs->second();
+        WriteToFlat(second,
+                    dest + first_length,
+                    0,
+                    len - first_length);
+      } else {
+        object = Heap::AllocateRawTwoByteString(len, tenure);
+        if (object->IsFailure()) return object;
+        result = String::cast(object);
+        uc16* dest = SeqTwoByteString::cast(result)->GetChars();
+        String* first = cs->first();
+        int first_length = first->length();
+        WriteToFlat(first, dest, 0, first_length);
+        String* second = cs->second();
+        WriteToFlat(second,
+                    dest + first_length,
+                    0,
+                    len - first_length);
+      }
+      cs->set_first(result);
+      cs->set_second(Heap::empty_string());
+      return this;
+    }
+    default:
+      return this;
+  }
+}
+
+
+bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
+#ifdef DEBUG
+  {  // NOLINT (presubmit.py gets confused about if and braces)
+    // Assert that the resource and the string are equivalent.
+    ASSERT(static_cast<size_t>(this->length()) == resource->length());
+    SmartPointer<uc16> smart_chars = this->ToWideCString();
+    ASSERT(memcmp(*smart_chars,
+                  resource->data(),
+                  resource->length() * sizeof(**smart_chars)) == 0);
+  }
+#endif  // DEBUG
+
+  int size = this->Size();  // Byte size of the original string.
+  if (size < ExternalString::kSize) {
+    // The string is too small to fit an external String in its place. This can
+    // only happen for zero length strings.
+    return false;
+  }
+  ASSERT(size >= ExternalString::kSize);
+  bool is_symbol = this->IsSymbol();
+  int length = this->length();
+
+  // Morph the object to an external string by adjusting the map and
+  // reinitializing the fields.
+  this->set_map(ExternalTwoByteString::StringMap(length));
+  ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
+  self->set_length(length);
+  self->set_resource(resource);
+  // Additionally make the object into an external symbol if the original string
+  // was a symbol to start with.
+  if (is_symbol) {
+    self->Hash();  // Force regeneration of the hash value.
+    // Now morph this external string into a external symbol.
+    self->set_map(ExternalTwoByteString::SymbolMap(length));
+  }
+
+  // Fill the remainder of the string with dead wood.
+  int new_size = this->Size();  // Byte size of the external String object.
+  Heap::CreateFillerObjectAt(this->address() + new_size, size - new_size);
+  return true;
+}
+
+
+bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
+#ifdef DEBUG
+  {  // NOLINT (presubmit.py gets confused about if and braces)
+    // Assert that the resource and the string are equivalent.
+    ASSERT(static_cast<size_t>(this->length()) == resource->length());
+    SmartPointer<char> smart_chars = this->ToCString();
+    ASSERT(memcmp(*smart_chars,
+                  resource->data(),
+                  resource->length()*sizeof(**smart_chars)) == 0);
+  }
+#endif  // DEBUG
+
+  int size = this->Size();  // Byte size of the original string.
+  if (size < ExternalString::kSize) {
+    // The string is too small to fit an external String in its place. This can
+    // only happen for zero length strings.
+    return false;
+  }
+  ASSERT(size >= ExternalString::kSize);
+  bool is_symbol = this->IsSymbol();
+  int length = this->length();
+
+  // Morph the object to an external string by adjusting the map and
+  // reinitializing the fields.
+  this->set_map(ExternalAsciiString::StringMap(length));
+  ExternalAsciiString* self = ExternalAsciiString::cast(this);
+  self->set_length(length);
+  self->set_resource(resource);
+  // Additionally make the object into an external symbol if the original string
+  // was a symbol to start with.
+  if (is_symbol) {
+    self->Hash();  // Force regeneration of the hash value.
+    // Now morph this external string into a external symbol.
+    self->set_map(ExternalAsciiString::SymbolMap(length));
+  }
+
+  // Fill the remainder of the string with dead wood.
+  int new_size = this->Size();  // Byte size of the external String object.
+  Heap::CreateFillerObjectAt(this->address() + new_size, size - new_size);
+  return true;
+}
+
+
+void String::StringShortPrint(StringStream* accumulator) {
+  int len = length();
+  if (len > kMaxMediumStringSize) {
+    accumulator->Add("<Very long string[%u]>", len);
+    return;
+  }
+
+  if (!LooksValid()) {
+    accumulator->Add("<Invalid String>");
+    return;
+  }
+
+  StringInputBuffer buf(this);
+
+  bool truncated = false;
+  if (len > kMaxShortPrintLength) {
+    len = kMaxShortPrintLength;
+    truncated = true;
+  }
+  bool ascii = true;
+  for (int i = 0; i < len; i++) {
+    int c = buf.GetNext();
+
+    if (c < 32 || c >= 127) {
+      ascii = false;
+    }
+  }
+  buf.Reset(this);
+  if (ascii) {
+    accumulator->Add("<String[%u]: ", length());
+    for (int i = 0; i < len; i++) {
+      accumulator->Put(buf.GetNext());
+    }
+    accumulator->Put('>');
+  } else {
+    // Backslash indicates that the string contains control
+    // characters and that backslashes are therefore escaped.
+    accumulator->Add("<String[%u]\\: ", length());
+    for (int i = 0; i < len; i++) {
+      int c = buf.GetNext();
+      if (c == '\n') {
+        accumulator->Add("\\n");
+      } else if (c == '\r') {
+        accumulator->Add("\\r");
+      } else if (c == '\\') {
+        accumulator->Add("\\\\");
+      } else if (c < 32 || c > 126) {
+        accumulator->Add("\\x%02x", c);
+      } else {
+        accumulator->Put(c);
+      }
+    }
+    if (truncated) {
+      accumulator->Put('.');
+      accumulator->Put('.');
+      accumulator->Put('.');
+    }
+    accumulator->Put('>');
+  }
+  return;
+}
+
+
+void JSObject::JSObjectShortPrint(StringStream* accumulator) {
+  switch (map()->instance_type()) {
+    case JS_ARRAY_TYPE: {
+      double length = JSArray::cast(this)->length()->Number();
+      accumulator->Add("<JS array[%u]>", static_cast<uint32_t>(length));
+      break;
+    }
+    case JS_REGEXP_TYPE: {
+      accumulator->Add("<JS RegExp>");
+      break;
+    }
+    case JS_FUNCTION_TYPE: {
+      Object* fun_name = JSFunction::cast(this)->shared()->name();
+      bool printed = false;
+      if (fun_name->IsString()) {
+        String* str = String::cast(fun_name);
+        if (str->length() > 0) {
+          accumulator->Add("<JS Function ");
+          accumulator->Put(str);
+          accumulator->Put('>');
+          printed = true;
+        }
+      }
+      if (!printed) {
+        accumulator->Add("<JS Function>");
+      }
+      break;
+    }
+    // All other JSObjects are rather similar to each other (JSObject,
+    // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
+    default: {
+      Object* constructor = map()->constructor();
+      bool printed = false;
+      if (constructor->IsHeapObject() &&
+          !Heap::Contains(HeapObject::cast(constructor))) {
+        accumulator->Add("!!!INVALID CONSTRUCTOR!!!");
+      } else {
+        bool global_object = IsJSGlobalProxy();
+        if (constructor->IsJSFunction()) {
+          if (!Heap::Contains(JSFunction::cast(constructor)->shared())) {
+            accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
+          } else {
+            Object* constructor_name =
+                JSFunction::cast(constructor)->shared()->name();
+            if (constructor_name->IsString()) {
+              String* str = String::cast(constructor_name);
+              if (str->length() > 0) {
+                bool vowel = AnWord(str);
+                accumulator->Add("<%sa%s ",
+                       global_object ? "Global Object: " : "",
+                       vowel ? "n" : "");
+                accumulator->Put(str);
+                accumulator->Put('>');
+                printed = true;
+              }
+            }
+          }
+        }
+        if (!printed) {
+          accumulator->Add("<JS %sObject", global_object ? "Global " : "");
+        }
+      }
+      if (IsJSValue()) {
+        accumulator->Add(" value = ");
+        JSValue::cast(this)->value()->ShortPrint(accumulator);
+      }
+      accumulator->Put('>');
+      break;
+    }
+  }
+}
+
+
+void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
+  // if (!Heap::InNewSpace(this)) PrintF("*", this);
+  if (!Heap::Contains(this)) {
+    accumulator->Add("!!!INVALID POINTER!!!");
+    return;
+  }
+  if (!Heap::Contains(map())) {
+    accumulator->Add("!!!INVALID MAP!!!");
+    return;
+  }
+
+  accumulator->Add("%p ", this);
+
+  if (IsString()) {
+    String::cast(this)->StringShortPrint(accumulator);
+    return;
+  }
+  if (IsJSObject()) {
+    JSObject::cast(this)->JSObjectShortPrint(accumulator);
+    return;
+  }
+  switch (map()->instance_type()) {
+    case MAP_TYPE:
+      accumulator->Add("<Map>");
+      break;
+    case FIXED_ARRAY_TYPE:
+      accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length());
+      break;
+    case BYTE_ARRAY_TYPE:
+      accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
+      break;
+    case PIXEL_ARRAY_TYPE:
+      accumulator->Add("<PixelArray[%u]>", PixelArray::cast(this)->length());
+      break;
+    case SHARED_FUNCTION_INFO_TYPE:
+      accumulator->Add("<SharedFunctionInfo>");
+      break;
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+  case NAME##_TYPE:                        \
+    accumulator->Put('<');                 \
+    accumulator->Add(#Name);               \
+    accumulator->Put('>');                 \
+    break;
+  STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+    case CODE_TYPE:
+      accumulator->Add("<Code>");
+      break;
+    case ODDBALL_TYPE: {
+      if (IsUndefined())
+        accumulator->Add("<undefined>");
+      else if (IsTheHole())
+        accumulator->Add("<the hole>");
+      else if (IsNull())
+        accumulator->Add("<null>");
+      else if (IsTrue())
+        accumulator->Add("<true>");
+      else if (IsFalse())
+        accumulator->Add("<false>");
+      else
+        accumulator->Add("<Odd Oddball>");
+      break;
+    }
+    case HEAP_NUMBER_TYPE:
+      accumulator->Add("<Number: ");
+      HeapNumber::cast(this)->HeapNumberPrint(accumulator);
+      accumulator->Put('>');
+      break;
+    case PROXY_TYPE:
+      accumulator->Add("<Proxy>");
+      break;
+    case JS_GLOBAL_PROPERTY_CELL_TYPE:
+      accumulator->Add("Cell for ");
+      JSGlobalPropertyCell::cast(this)->value()->ShortPrint(accumulator);
+      break;
+    default:
+      accumulator->Add("<Other heap object (%d)>", map()->instance_type());
+      break;
+  }
+}
+
+
+int HeapObject::SlowSizeFromMap(Map* map) {
+  // Avoid calling functions such as FixedArray::cast during GC, which
+  // read map pointer of this object again.
+  InstanceType instance_type = map->instance_type();
+  uint32_t type = static_cast<uint32_t>(instance_type);
+
+  if (instance_type < FIRST_NONSTRING_TYPE
+      && (StringShape(instance_type).IsSequential())) {
+    if ((type & kStringEncodingMask) == kAsciiStringTag) {
+      SeqAsciiString* seq_ascii_this = reinterpret_cast<SeqAsciiString*>(this);
+      return seq_ascii_this->SeqAsciiStringSize(instance_type);
+    } else {
+      SeqTwoByteString* self = reinterpret_cast<SeqTwoByteString*>(this);
+      return self->SeqTwoByteStringSize(instance_type);
+    }
+  }
+
+  switch (instance_type) {
+    case FIXED_ARRAY_TYPE:
+      return reinterpret_cast<FixedArray*>(this)->FixedArraySize();
+    case BYTE_ARRAY_TYPE:
+      return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
+    case CODE_TYPE:
+      return reinterpret_cast<Code*>(this)->CodeSize();
+    case MAP_TYPE:
+      return Map::kSize;
+    default:
+      return map->instance_size();
+  }
+}
+
+
+void HeapObject::Iterate(ObjectVisitor* v) {
+  // Handle header
+  IteratePointer(v, kMapOffset);
+  // Handle object body
+  Map* m = map();
+  IterateBody(m->instance_type(), SizeFromMap(m), v);
+}
+
+
+void HeapObject::IterateBody(InstanceType type, int object_size,
+                             ObjectVisitor* v) {
+  // Avoiding <Type>::cast(this) because it accesses the map pointer field.
+  // During GC, the map pointer field is encoded.
+  if (type < FIRST_NONSTRING_TYPE) {
+    switch (type & kStringRepresentationMask) {
+      case kSeqStringTag:
+        break;
+      case kConsStringTag:
+        reinterpret_cast<ConsString*>(this)->ConsStringIterateBody(v);
+        break;
+      case kSlicedStringTag:
+        reinterpret_cast<SlicedString*>(this)->SlicedStringIterateBody(v);
+        break;
+    }
+    return;
+  }
+
+  switch (type) {
+    case FIXED_ARRAY_TYPE:
+      reinterpret_cast<FixedArray*>(this)->FixedArrayIterateBody(v);
+      break;
+    case JS_OBJECT_TYPE:
+    case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+    case JS_VALUE_TYPE:
+    case JS_ARRAY_TYPE:
+    case JS_REGEXP_TYPE:
+    case JS_FUNCTION_TYPE:
+    case JS_GLOBAL_PROXY_TYPE:
+    case JS_GLOBAL_OBJECT_TYPE:
+    case JS_BUILTINS_OBJECT_TYPE:
+      reinterpret_cast<JSObject*>(this)->JSObjectIterateBody(object_size, v);
+      break;
+    case ODDBALL_TYPE:
+      reinterpret_cast<Oddball*>(this)->OddballIterateBody(v);
+      break;
+    case PROXY_TYPE:
+      reinterpret_cast<Proxy*>(this)->ProxyIterateBody(v);
+      break;
+    case MAP_TYPE:
+      reinterpret_cast<Map*>(this)->MapIterateBody(v);
+      break;
+    case CODE_TYPE:
+      reinterpret_cast<Code*>(this)->CodeIterateBody(v);
+      break;
+    case JS_GLOBAL_PROPERTY_CELL_TYPE:
+      reinterpret_cast<JSGlobalPropertyCell*>(this)
+          ->JSGlobalPropertyCellIterateBody(v);
+      break;
+    case HEAP_NUMBER_TYPE:
+    case FILLER_TYPE:
+    case BYTE_ARRAY_TYPE:
+    case PIXEL_ARRAY_TYPE:
+      break;
+    case SHARED_FUNCTION_INFO_TYPE: {
+      SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this);
+      shared->SharedFunctionInfoIterateBody(v);
+      break;
+    }
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+        case NAME##_TYPE:
+      STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+      IterateStructBody(object_size, v);
+      break;
+    default:
+      PrintF("Unknown type: %d\n", type);
+      UNREACHABLE();
+  }
+}
+
+
+void HeapObject::IterateStructBody(int object_size, ObjectVisitor* v) {
+  IteratePointers(v, HeapObject::kHeaderSize, object_size);
+}
+
+
+Object* HeapNumber::HeapNumberToBoolean() {
+  // NaN, +0, and -0 should return the false object
+  switch (fpclassify(value())) {
+    case FP_NAN:  // fall through
+    case FP_ZERO: return Heap::false_value();
+    default: return Heap::true_value();
+  }
+}
+
+
+void HeapNumber::HeapNumberPrint() {
+  PrintF("%.16g", Number());
+}
+
+
+void HeapNumber::HeapNumberPrint(StringStream* accumulator) {
+  // The Windows version of vsnprintf can allocate when printing a %g string
+  // into a buffer that may not be big enough.  We don't want random memory
+  // allocation when producing post-crash stack traces, so we print into a
+  // buffer that is plenty big enough for any floating point number, then
+  // print that using vsnprintf (which may truncate but never allocate if
+  // there is no more space in the buffer).
+  EmbeddedVector<char, 100> buffer;
+  OS::SNPrintF(buffer, "%.16g", Number());
+  accumulator->Add("%s", buffer.start());
+}
+
+
+String* JSObject::class_name() {
+  if (IsJSFunction()) {
+    return Heap::function_class_symbol();
+  }
+  if (map()->constructor()->IsJSFunction()) {
+    JSFunction* constructor = JSFunction::cast(map()->constructor());
+    return String::cast(constructor->shared()->instance_class_name());
+  }
+  // If the constructor is not present, return "Object".
+  return Heap::Object_symbol();
+}
+
+
+String* JSObject::constructor_name() {
+  if (IsJSFunction()) {
+    return Heap::function_class_symbol();
+  }
+  if (map()->constructor()->IsJSFunction()) {
+    JSFunction* constructor = JSFunction::cast(map()->constructor());
+    String* name = String::cast(constructor->shared()->name());
+    return name->length() > 0 ? name : constructor->shared()->inferred_name();
+  }
+  // If the constructor is not present, return "Object".
+  return Heap::Object_symbol();
+}
+
+
+void JSObject::JSObjectIterateBody(int object_size, ObjectVisitor* v) {
+  // Iterate over all fields in the body. Assumes all are Object*.
+  IteratePointers(v, kPropertiesOffset, object_size);
+}
+
+
+Object* JSObject::AddFastPropertyUsingMap(Map* new_map,
+                                          String* name,
+                                          Object* value) {
+  int index = new_map->PropertyIndexFor(name);
+  if (map()->unused_property_fields() == 0) {
+    ASSERT(map()->unused_property_fields() == 0);
+    int new_unused = new_map->unused_property_fields();
+    Object* values =
+        properties()->CopySize(properties()->length() + new_unused + 1);
+    if (values->IsFailure()) return values;
+    set_properties(FixedArray::cast(values));
+  }
+  set_map(new_map);
+  return FastPropertyAtPut(index, value);
+}
+
+
+Object* JSObject::AddFastProperty(String* name,
+                                  Object* value,
+                                  PropertyAttributes attributes) {
+  // Normalize the object if the name is an actual string (not the
+  // hidden symbols) and is not a real identifier.
+  StringInputBuffer buffer(name);
+  if (!Scanner::IsIdentifier(&buffer) && name != Heap::hidden_symbol()) {
+    Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+    if (obj->IsFailure()) return obj;
+    return AddSlowProperty(name, value, attributes);
+  }
+
+  DescriptorArray* old_descriptors = map()->instance_descriptors();
+  // Compute the new index for new field.
+  int index = map()->NextFreePropertyIndex();
+
+  // Allocate new instance descriptors with (name, index) added
+  FieldDescriptor new_field(name, index, attributes);
+  Object* new_descriptors =
+      old_descriptors->CopyInsert(&new_field, REMOVE_TRANSITIONS);
+  if (new_descriptors->IsFailure()) return new_descriptors;
+
+  // Only allow map transition if the object's map is NOT equal to the
+  // global object_function's map and there is not a transition for name.
+  bool allow_map_transition =
+        !old_descriptors->Contains(name) &&
+        (Top::context()->global_context()->object_function()->map() != map());
+
+  ASSERT(index < map()->inobject_properties() ||
+         (index - map()->inobject_properties()) < properties()->length() ||
+         map()->unused_property_fields() == 0);
+  // Allocate a new map for the object.
+  Object* r = map()->CopyDropDescriptors();
+  if (r->IsFailure()) return r;
+  Map* new_map = Map::cast(r);
+  if (allow_map_transition) {
+    // Allocate new instance descriptors for the old map with map transition.
+    MapTransitionDescriptor d(name, Map::cast(new_map), attributes);
+    Object* r = old_descriptors->CopyInsert(&d, KEEP_TRANSITIONS);
+    if (r->IsFailure()) return r;
+    old_descriptors = DescriptorArray::cast(r);
+  }
+
+  if (map()->unused_property_fields() == 0) {
+    if (properties()->length() > kMaxFastProperties) {
+      Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+      if (obj->IsFailure()) return obj;
+      return AddSlowProperty(name, value, attributes);
+    }
+    // Make room for the new value
+    Object* values =
+        properties()->CopySize(properties()->length() + kFieldsAdded);
+    if (values->IsFailure()) return values;
+    set_properties(FixedArray::cast(values));
+    new_map->set_unused_property_fields(kFieldsAdded - 1);
+  } else {
+    new_map->set_unused_property_fields(map()->unused_property_fields() - 1);
+  }
+  // We have now allocated all the necessary objects.
+  // All the changes can be applied at once, so they are atomic.
+  map()->set_instance_descriptors(old_descriptors);
+  new_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+  set_map(new_map);
+  return FastPropertyAtPut(index, value);
+}
+
+
+Object* JSObject::AddConstantFunctionProperty(String* name,
+                                              JSFunction* function,
+                                              PropertyAttributes attributes) {
+  // Allocate new instance descriptors with (name, function) added
+  ConstantFunctionDescriptor d(name, function, attributes);
+  Object* new_descriptors =
+      map()->instance_descriptors()->CopyInsert(&d, REMOVE_TRANSITIONS);
+  if (new_descriptors->IsFailure()) return new_descriptors;
+
+  // Allocate a new map for the object.
+  Object* new_map = map()->CopyDropDescriptors();
+  if (new_map->IsFailure()) return new_map;
+
+  DescriptorArray* descriptors = DescriptorArray::cast(new_descriptors);
+  Map::cast(new_map)->set_instance_descriptors(descriptors);
+  Map* old_map = map();
+  set_map(Map::cast(new_map));
+
+  // If the old map is the global object map (from new Object()),
+  // then transitions are not added to it, so we are done.
+  if (old_map == Top::context()->global_context()->object_function()->map()) {
+    return function;
+  }
+
+  // Do not add CONSTANT_TRANSITIONS to global objects
+  if (IsGlobalObject()) {
+    return function;
+  }
+
+  // Add a CONSTANT_TRANSITION descriptor to the old map,
+  // so future assignments to this property on other objects
+  // of the same type will create a normal field, not a constant function.
+  // Don't do this for special properties, with non-trival attributes.
+  if (attributes != NONE) {
+    return function;
+  }
+  ConstTransitionDescriptor mark(name);
+  new_descriptors =
+      old_map->instance_descriptors()->CopyInsert(&mark, KEEP_TRANSITIONS);
+  if (new_descriptors->IsFailure()) {
+    return function;  // We have accomplished the main goal, so return success.
+  }
+  old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+
+  return function;
+}
+
+
+// Add property in slow mode
+Object* JSObject::AddSlowProperty(String* name,
+                                  Object* value,
+                                  PropertyAttributes attributes) {
+  ASSERT(!HasFastProperties());
+  StringDictionary* dict = property_dictionary();
+  Object* store_value = value;
+  if (IsGlobalObject()) {
+    // In case name is an orphaned property reuse the cell.
+    int entry = dict->FindEntry(name);
+    if (entry != StringDictionary::kNotFound) {
+      store_value = dict->ValueAt(entry);
+      JSGlobalPropertyCell::cast(store_value)->set_value(value);
+      // Assign an enumeration index to the property and update
+      // SetNextEnumerationIndex.
+      int index = dict->NextEnumerationIndex();
+      PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
+      dict->SetNextEnumerationIndex(index + 1);
+      dict->SetEntry(entry, name, store_value, details);
+      return value;
+    }
+    store_value = Heap::AllocateJSGlobalPropertyCell(value);
+    if (store_value->IsFailure()) return store_value;
+    JSGlobalPropertyCell::cast(store_value)->set_value(value);
+  }
+  PropertyDetails details = PropertyDetails(attributes, NORMAL);
+  Object* result = dict->Add(name, store_value, details);
+  if (result->IsFailure()) return result;
+  if (dict != result) set_properties(StringDictionary::cast(result));
+  return value;
+}
+
+
+Object* JSObject::AddProperty(String* name,
+                              Object* value,
+                              PropertyAttributes attributes) {
+  ASSERT(!IsJSGlobalProxy());
+  if (HasFastProperties()) {
+    // Ensure the descriptor array does not get too big.
+    if (map()->instance_descriptors()->number_of_descriptors() <
+        DescriptorArray::kMaxNumberOfDescriptors) {
+      if (value->IsJSFunction()) {
+        return AddConstantFunctionProperty(name,
+                                           JSFunction::cast(value),
+                                           attributes);
+      } else {
+        return AddFastProperty(name, value, attributes);
+      }
+    } else {
+      // Normalize the object to prevent very large instance descriptors.
+      // This eliminates unwanted N^2 allocation and lookup behavior.
+      Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+      if (obj->IsFailure()) return obj;
+    }
+  }
+  return AddSlowProperty(name, value, attributes);
+}
+
+
+Object* JSObject::SetPropertyPostInterceptor(String* name,
+                                             Object* value,
+                                             PropertyAttributes attributes) {
+  // Check local property, ignore interceptor.
+  LookupResult result;
+  LocalLookupRealNamedProperty(name, &result);
+  if (result.IsValid()) return SetProperty(&result, name, value, attributes);
+  // Add real property.
+  return AddProperty(name, value, attributes);
+}
+
+
+Object* JSObject::ReplaceSlowProperty(String* name,
+                                       Object* value,
+                                       PropertyAttributes attributes) {
+  StringDictionary* dictionary = property_dictionary();
+  int old_index = dictionary->FindEntry(name);
+  int new_enumeration_index = 0;  // 0 means "Use the next available index."
+  if (old_index != -1) {
+    // All calls to ReplaceSlowProperty have had all transitions removed.
+    ASSERT(!dictionary->DetailsAt(old_index).IsTransition());
+    new_enumeration_index = dictionary->DetailsAt(old_index).index();
+  }
+
+  PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
+  return SetNormalizedProperty(name, value, new_details);
+}
+
+Object* JSObject::ConvertDescriptorToFieldAndMapTransition(
+    String* name,
+    Object* new_value,
+    PropertyAttributes attributes) {
+  Map* old_map = map();
+  Object* result = ConvertDescriptorToField(name, new_value, attributes);
+  if (result->IsFailure()) return result;
+  // If we get to this point we have succeeded - do not return failure
+  // after this point.  Later stuff is optional.
+  if (!HasFastProperties()) {
+    return result;
+  }
+  // Do not add transitions to the map of "new Object()".
+  if (map() == Top::context()->global_context()->object_function()->map()) {
+    return result;
+  }
+
+  MapTransitionDescriptor transition(name,
+                                     map(),
+                                     attributes);
+  Object* new_descriptors =
+      old_map->instance_descriptors()->
+          CopyInsert(&transition, KEEP_TRANSITIONS);
+  if (new_descriptors->IsFailure()) return result;  // Yes, return _result_.
+  old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+  return result;
+}
+
+
+Object* JSObject::ConvertDescriptorToField(String* name,
+                                           Object* new_value,
+                                           PropertyAttributes attributes) {
+  if (map()->unused_property_fields() == 0 &&
+      properties()->length() > kMaxFastProperties) {
+    Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+    if (obj->IsFailure()) return obj;
+    return ReplaceSlowProperty(name, new_value, attributes);
+  }
+
+  int index = map()->NextFreePropertyIndex();
+  FieldDescriptor new_field(name, index, attributes);
+  // Make a new DescriptorArray replacing an entry with FieldDescriptor.
+  Object* descriptors_unchecked = map()->instance_descriptors()->
+      CopyInsert(&new_field, REMOVE_TRANSITIONS);
+  if (descriptors_unchecked->IsFailure()) return descriptors_unchecked;
+  DescriptorArray* new_descriptors =
+      DescriptorArray::cast(descriptors_unchecked);
+
+  // Make a new map for the object.
+  Object* new_map_unchecked = map()->CopyDropDescriptors();
+  if (new_map_unchecked->IsFailure()) return new_map_unchecked;
+  Map* new_map = Map::cast(new_map_unchecked);
+  new_map->set_instance_descriptors(new_descriptors);
+
+  // Make new properties array if necessary.
+  FixedArray* new_properties = 0;  // Will always be NULL or a valid pointer.
+  int new_unused_property_fields = map()->unused_property_fields() - 1;
+  if (map()->unused_property_fields() == 0) {
+     new_unused_property_fields = kFieldsAdded - 1;
+     Object* new_properties_unchecked =
+        properties()->CopySize(properties()->length() + kFieldsAdded);
+    if (new_properties_unchecked->IsFailure()) return new_properties_unchecked;
+    new_properties = FixedArray::cast(new_properties_unchecked);
+  }
+
+  // Update pointers to commit changes.
+  // Object points to the new map.
+  new_map->set_unused_property_fields(new_unused_property_fields);
+  set_map(new_map);
+  if (new_properties) {
+    set_properties(FixedArray::cast(new_properties));
+  }
+  return FastPropertyAtPut(index, new_value);
+}
+
+
+
+Object* JSObject::SetPropertyWithInterceptor(String* name,
+                                             Object* value,
+                                             PropertyAttributes attributes) {
+  HandleScope scope;
+  Handle<JSObject> this_handle(this);
+  Handle<String> name_handle(name);
+  Handle<Object> value_handle(value);
+  Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
+  if (!interceptor->setter()->IsUndefined()) {
+    LOG(ApiNamedPropertyAccess("interceptor-named-set", this, name));
+    CustomArguments args(interceptor->data(), this, this);
+    v8::AccessorInfo info(args.end());
+    v8::NamedPropertySetter setter =
+        v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
+    v8::Handle<v8::Value> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      Handle<Object> value_unhole(value->IsTheHole() ?
+                                  Heap::undefined_value() :
+                                  value);
+      result = setter(v8::Utils::ToLocal(name_handle),
+                      v8::Utils::ToLocal(value_unhole),
+                      info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (!result.IsEmpty()) return *value_handle;
+  }
+  Object* raw_result = this_handle->SetPropertyPostInterceptor(*name_handle,
+                                                               *value_handle,
+                                                               attributes);
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  return raw_result;
+}
+
+
+Object* JSObject::SetProperty(String* name,
+                              Object* value,
+                              PropertyAttributes attributes) {
+  LookupResult result;
+  LocalLookup(name, &result);
+  return SetProperty(&result, name, value, attributes);
+}
+
+
+Object* JSObject::SetPropertyWithCallback(Object* structure,
+                                          String* name,
+                                          Object* value,
+                                          JSObject* holder) {
+  HandleScope scope;
+
+  // We should never get here to initialize a const with the hole
+  // value since a const declaration would conflict with the setter.
+  ASSERT(!value->IsTheHole());
+  Handle<Object> value_handle(value);
+
+  // To accommodate both the old and the new api we switch on the
+  // data structure used to store the callbacks.  Eventually proxy
+  // callbacks should be phased out.
+  if (structure->IsProxy()) {
+    AccessorDescriptor* callback =
+        reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
+    Object* obj = (callback->setter)(this,  value, callback->data);
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (obj->IsFailure()) return obj;
+    return *value_handle;
+  }
+
+  if (structure->IsAccessorInfo()) {
+    // api style callbacks
+    AccessorInfo* data = AccessorInfo::cast(structure);
+    Object* call_obj = data->setter();
+    v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
+    if (call_fun == NULL) return value;
+    Handle<String> key(name);
+    LOG(ApiNamedPropertyAccess("store", this, name));
+    CustomArguments args(data->data(), this, JSObject::cast(holder));
+    v8::AccessorInfo info(args.end());
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      call_fun(v8::Utils::ToLocal(key),
+               v8::Utils::ToLocal(value_handle),
+               info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    return *value_handle;
+  }
+
+  if (structure->IsFixedArray()) {
+    Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
+    if (setter->IsJSFunction()) {
+     return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+    } else {
+      Handle<String> key(name);
+      Handle<Object> holder_handle(holder);
+      Handle<Object> args[2] = { key, holder_handle };
+      return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
+                                               HandleVector(args, 2)));
+    }
+  }
+
+  UNREACHABLE();
+  return 0;
+}
+
+
+Object* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter,
+                                               Object* value) {
+  Handle<Object> value_handle(value);
+  Handle<JSFunction> fun(JSFunction::cast(setter));
+  Handle<JSObject> self(this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Handle stepping into a setter if step into is active.
+  if (Debug::StepInActive()) {
+    Debug::HandleStepIn(fun, Handle<Object>::null(), 0, false);
+  }
+#endif
+  bool has_pending_exception;
+  Object** argv[] = { value_handle.location() };
+  Execution::Call(fun, self, 1, argv, &has_pending_exception);
+  // Check for pending exception and return the result.
+  if (has_pending_exception) return Failure::Exception();
+  return *value_handle;
+}
+
+
+void JSObject::LookupCallbackSetterInPrototypes(String* name,
+                                                LookupResult* result) {
+  for (Object* pt = GetPrototype();
+       pt != Heap::null_value();
+       pt = pt->GetPrototype()) {
+    JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
+    if (result->IsValid()) {
+      if (!result->IsTransitionType() && result->IsReadOnly()) {
+        result->NotFound();
+        return;
+      }
+      if (result->type() == CALLBACKS) {
+        return;
+      }
+    }
+  }
+  result->NotFound();
+}
+
+
+Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) {
+  for (Object* pt = GetPrototype();
+       pt != Heap::null_value();
+       pt = pt->GetPrototype()) {
+    if (!JSObject::cast(pt)->HasDictionaryElements()) {
+        continue;
+    }
+    NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
+    int entry = dictionary->FindEntry(index);
+    if (entry != NumberDictionary::kNotFound) {
+      Object* element = dictionary->ValueAt(entry);
+      PropertyDetails details = dictionary->DetailsAt(entry);
+      if (details.type() == CALLBACKS) {
+        // Only accessors allowed as elements.
+        return FixedArray::cast(element)->get(kSetterIndex);
+      }
+    }
+  }
+  return Heap::undefined_value();
+}
+
+
+void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
+  DescriptorArray* descriptors = map()->instance_descriptors();
+  int number = DescriptorLookupCache::Lookup(descriptors, name);
+  if (number == DescriptorLookupCache::kAbsent) {
+    number = descriptors->Search(name);
+    DescriptorLookupCache::Update(descriptors, name, number);
+  }
+  if (number != DescriptorArray::kNotFound) {
+    result->DescriptorResult(this, descriptors->GetDetails(number), number);
+  } else {
+    result->NotFound();
+  }
+}
+
+
+void JSObject::LocalLookupRealNamedProperty(String* name,
+                                            LookupResult* result) {
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return result->NotFound();
+    ASSERT(proto->IsJSGlobalObject());
+    return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
+  }
+
+  if (HasFastProperties()) {
+    LookupInDescriptor(name, result);
+    if (result->IsValid()) {
+      ASSERT(result->holder() == this && result->type() != NORMAL);
+      // Disallow caching for uninitialized constants. These can only
+      // occur as fields.
+      if (result->IsReadOnly() && result->type() == FIELD &&
+          FastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
+        result->DisallowCaching();
+      }
+      return;
+    }
+  } else {
+    int entry = property_dictionary()->FindEntry(name);
+    if (entry != StringDictionary::kNotFound) {
+      Object* value = property_dictionary()->ValueAt(entry);
+      if (IsGlobalObject()) {
+        PropertyDetails d = property_dictionary()->DetailsAt(entry);
+        if (d.IsDeleted()) {
+          result->NotFound();
+          return;
+        }
+        value = JSGlobalPropertyCell::cast(value)->value();
+        ASSERT(result->IsLoaded());
+      }
+      // Make sure to disallow caching for uninitialized constants
+      // found in the dictionary-mode objects.
+      if (value->IsTheHole()) result->DisallowCaching();
+      result->DictionaryResult(this, entry);
+      return;
+    }
+    // Slow case object skipped during lookup. Do not use inline caching.
+    if (!IsGlobalObject()) result->DisallowCaching();
+  }
+  result->NotFound();
+}
+
+
+void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) {
+  LocalLookupRealNamedProperty(name, result);
+  if (result->IsProperty()) return;
+
+  LookupRealNamedPropertyInPrototypes(name, result);
+}
+
+
+void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
+                                                   LookupResult* result) {
+  for (Object* pt = GetPrototype();
+       pt != Heap::null_value();
+       pt = JSObject::cast(pt)->GetPrototype()) {
+    JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
+    if (result->IsValid()) {
+      switch (result->type()) {
+        case NORMAL:
+        case FIELD:
+        case CONSTANT_FUNCTION:
+        case CALLBACKS:
+          return;
+        default: break;
+      }
+    }
+  }
+  result->NotFound();
+}
+
+
+// We only need to deal with CALLBACKS and INTERCEPTORS
+Object* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
+                                                   String* name,
+                                                   Object* value) {
+  if (!result->IsProperty()) {
+    LookupCallbackSetterInPrototypes(name, result);
+  }
+
+  if (result->IsProperty()) {
+    if (!result->IsReadOnly()) {
+      switch (result->type()) {
+        case CALLBACKS: {
+          Object* obj = result->GetCallbackObject();
+          if (obj->IsAccessorInfo()) {
+            AccessorInfo* info = AccessorInfo::cast(obj);
+            if (info->all_can_write()) {
+              return SetPropertyWithCallback(result->GetCallbackObject(),
+                                             name,
+                                             value,
+                                             result->holder());
+            }
+          }
+          break;
+        }
+        case INTERCEPTOR: {
+          // Try lookup real named properties. Note that only property can be
+          // set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
+          LookupResult r;
+          LookupRealNamedProperty(name, &r);
+          if (r.IsProperty()) {
+            return SetPropertyWithFailedAccessCheck(&r, name, value);
+          }
+          break;
+        }
+        default: {
+          break;
+        }
+      }
+    }
+  }
+
+  Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+  return value;
+}
+
+
+Object* JSObject::SetProperty(LookupResult* result,
+                              String* name,
+                              Object* value,
+                              PropertyAttributes attributes) {
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc;
+
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded()
+      && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    return SetPropertyWithFailedAccessCheck(result, name, value);
+  }
+
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return value;
+    ASSERT(proto->IsJSGlobalObject());
+    return JSObject::cast(proto)->SetProperty(result, name, value, attributes);
+  }
+
+  if (!result->IsProperty() && !IsJSContextExtensionObject()) {
+    // We could not find a local property so let's check whether there is an
+    // accessor that wants to handle the property.
+    LookupResult accessor_result;
+    LookupCallbackSetterInPrototypes(name, &accessor_result);
+    if (accessor_result.IsValid()) {
+      return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
+                                     name,
+                                     value,
+                                     accessor_result.holder());
+    }
+  }
+  if (result->IsNotFound()) {
+    return AddProperty(name, value, attributes);
+  }
+  if (!result->IsLoaded()) {
+    return SetLazyProperty(result, name, value, attributes);
+  }
+  if (result->IsReadOnly() && result->IsProperty()) return value;
+  // This is a real property that is not read-only, or it is a
+  // transition or null descriptor and there are no setters in the prototypes.
+  switch (result->type()) {
+    case NORMAL:
+      return SetNormalizedProperty(result, value);
+    case FIELD:
+      return FastPropertyAtPut(result->GetFieldIndex(), value);
+    case MAP_TRANSITION:
+      if (attributes == result->GetAttributes()) {
+        // Only use map transition if the attributes match.
+        return AddFastPropertyUsingMap(result->GetTransitionMap(),
+                                       name,
+                                       value);
+      }
+      return ConvertDescriptorToField(name, value, attributes);
+    case CONSTANT_FUNCTION:
+      // Only replace the function if necessary.
+      if (value == result->GetConstantFunction()) return value;
+      // Preserve the attributes of this existing property.
+      attributes = result->GetAttributes();
+      return ConvertDescriptorToField(name, value, attributes);
+    case CALLBACKS:
+      return SetPropertyWithCallback(result->GetCallbackObject(),
+                                     name,
+                                     value,
+                                     result->holder());
+    case INTERCEPTOR:
+      return SetPropertyWithInterceptor(name, value, attributes);
+    case CONSTANT_TRANSITION:
+      // Replace with a MAP_TRANSITION to a new map with a FIELD, even
+      // if the value is a function.
+      return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
+    case NULL_DESCRIPTOR:
+      return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
+    default:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return value;
+}
+
+
+// Set a real local property, even if it is READ_ONLY.  If the property is not
+// present, add it with attributes NONE.  This code is an exact clone of
+// SetProperty, with the check for IsReadOnly and the check for a
+// callback setter removed.  The two lines looking up the LookupResult
+// result are also added.  If one of the functions is changed, the other
+// should be.
+Object* JSObject::IgnoreAttributesAndSetLocalProperty(
+    String* name,
+    Object* value,
+    PropertyAttributes attributes) {
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc;
+  // ADDED TO CLONE
+  LookupResult result_struct;
+  LocalLookup(name, &result_struct);
+  LookupResult* result = &result_struct;
+  // END ADDED TO CLONE
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded()
+    && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    return SetPropertyWithFailedAccessCheck(result, name, value);
+  }
+
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return value;
+    ASSERT(proto->IsJSGlobalObject());
+    return JSObject::cast(proto)->IgnoreAttributesAndSetLocalProperty(
+        name,
+        value,
+        attributes);
+  }
+
+  // Check for accessor in prototype chain removed here in clone.
+  if (result->IsNotFound()) {
+    return AddProperty(name, value, attributes);
+  }
+  if (!result->IsLoaded()) {
+    return SetLazyProperty(result, name, value, attributes);
+  }
+  // Check of IsReadOnly removed from here in clone.
+  switch (result->type()) {
+    case NORMAL:
+      return SetNormalizedProperty(result, value);
+    case FIELD:
+      return FastPropertyAtPut(result->GetFieldIndex(), value);
+    case MAP_TRANSITION:
+      if (attributes == result->GetAttributes()) {
+        // Only use map transition if the attributes match.
+        return AddFastPropertyUsingMap(result->GetTransitionMap(),
+                                       name,
+                                       value);
+      }
+      return ConvertDescriptorToField(name, value, attributes);
+    case CONSTANT_FUNCTION:
+      // Only replace the function if necessary.
+      if (value == result->GetConstantFunction()) return value;
+      // Preserve the attributes of this existing property.
+      attributes = result->GetAttributes();
+      return ConvertDescriptorToField(name, value, attributes);
+    case CALLBACKS:
+    case INTERCEPTOR:
+      // Override callback in clone
+      return ConvertDescriptorToField(name, value, attributes);
+    case CONSTANT_TRANSITION:
+      // Replace with a MAP_TRANSITION to a new map with a FIELD, even
+      // if the value is a function.
+      return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
+    case NULL_DESCRIPTOR:
+      return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
+    default:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return value;
+}
+
+
+PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
+      JSObject* receiver,
+      String* name,
+      bool continue_search) {
+  // Check local property, ignore interceptor.
+  LookupResult result;
+  LocalLookupRealNamedProperty(name, &result);
+  if (result.IsProperty()) return result.GetAttributes();
+
+  if (continue_search) {
+    // Continue searching via the prototype chain.
+    Object* pt = GetPrototype();
+    if (pt != Heap::null_value()) {
+      return JSObject::cast(pt)->
+        GetPropertyAttributeWithReceiver(receiver, name);
+    }
+  }
+  return ABSENT;
+}
+
+
+PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
+      JSObject* receiver,
+      String* name,
+      bool continue_search) {
+  // Make sure that the top context does not change when doing
+  // callbacks or interceptor calls.
+  AssertNoContextChange ncc;
+
+  HandleScope scope;
+  Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
+  Handle<JSObject> receiver_handle(receiver);
+  Handle<JSObject> holder_handle(this);
+  Handle<String> name_handle(name);
+  CustomArguments args(interceptor->data(), receiver, this);
+  v8::AccessorInfo info(args.end());
+  if (!interceptor->query()->IsUndefined()) {
+    v8::NamedPropertyQuery query =
+        v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
+    LOG(ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
+    v8::Handle<v8::Boolean> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = query(v8::Utils::ToLocal(name_handle), info);
+    }
+    if (!result.IsEmpty()) {
+      // Convert the boolean result to a property attribute
+      // specification.
+      return result->IsTrue() ? NONE : ABSENT;
+    }
+  } else if (!interceptor->getter()->IsUndefined()) {
+    v8::NamedPropertyGetter getter =
+        v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
+    LOG(ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
+    v8::Handle<v8::Value> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = getter(v8::Utils::ToLocal(name_handle), info);
+    }
+    if (!result.IsEmpty()) return NONE;
+  }
+  return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
+                                                            *name_handle,
+                                                            continue_search);
+}
+
+
+PropertyAttributes JSObject::GetPropertyAttributeWithReceiver(
+      JSObject* receiver,
+      String* key) {
+  uint32_t index = 0;
+  if (key->AsArrayIndex(&index)) {
+    if (HasElementWithReceiver(receiver, index)) return NONE;
+    return ABSENT;
+  }
+  // Named property.
+  LookupResult result;
+  Lookup(key, &result);
+  return GetPropertyAttribute(receiver, &result, key, true);
+}
+
+
+PropertyAttributes JSObject::GetPropertyAttribute(JSObject* receiver,
+                                                  LookupResult* result,
+                                                  String* name,
+                                                  bool continue_search) {
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
+    return GetPropertyAttributeWithFailedAccessCheck(receiver,
+                                                     result,
+                                                     name,
+                                                     continue_search);
+  }
+  if (result->IsValid()) {
+    switch (result->type()) {
+      case NORMAL:  // fall through
+      case FIELD:
+      case CONSTANT_FUNCTION:
+      case CALLBACKS:
+        return result->GetAttributes();
+      case INTERCEPTOR:
+        return result->holder()->
+          GetPropertyAttributeWithInterceptor(receiver, name, continue_search);
+      case MAP_TRANSITION:
+      case CONSTANT_TRANSITION:
+      case NULL_DESCRIPTOR:
+        return ABSENT;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+  return ABSENT;
+}
+
+
+PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
+  // Check whether the name is an array index.
+  uint32_t index = 0;
+  if (name->AsArrayIndex(&index)) {
+    if (HasLocalElement(index)) return NONE;
+    return ABSENT;
+  }
+  // Named property.
+  LookupResult result;
+  LocalLookup(name, &result);
+  return GetPropertyAttribute(this, &result, name, false);
+}
+
+
+Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
+                                      int expected_additional_properties) {
+  if (!HasFastProperties()) return this;
+
+  // The global object is always normalized.
+  ASSERT(!IsGlobalObject());
+
+  // Allocate new content.
+  int property_count = map()->NumberOfDescribedProperties();
+  if (expected_additional_properties > 0) {
+    property_count += expected_additional_properties;
+  } else {
+    property_count += 2;  // Make space for two more properties.
+  }
+  Object* obj =
+      StringDictionary::Allocate(property_count * 2);
+  if (obj->IsFailure()) return obj;
+  StringDictionary* dictionary = StringDictionary::cast(obj);
+
+  DescriptorArray* descs = map()->instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    PropertyDetails details = descs->GetDetails(i);
+    switch (details.type()) {
+      case CONSTANT_FUNCTION: {
+        PropertyDetails d =
+            PropertyDetails(details.attributes(), NORMAL, details.index());
+        Object* value = descs->GetConstantFunction(i);
+        Object* result = dictionary->Add(descs->GetKey(i), value, d);
+        if (result->IsFailure()) return result;
+        dictionary = StringDictionary::cast(result);
+        break;
+      }
+      case FIELD: {
+        PropertyDetails d =
+            PropertyDetails(details.attributes(), NORMAL, details.index());
+        Object* value = FastPropertyAt(descs->GetFieldIndex(i));
+        Object* result = dictionary->Add(descs->GetKey(i), value, d);
+        if (result->IsFailure()) return result;
+        dictionary = StringDictionary::cast(result);
+        break;
+      }
+      case CALLBACKS: {
+        PropertyDetails d =
+            PropertyDetails(details.attributes(), CALLBACKS, details.index());
+        Object* value = descs->GetCallbacksObject(i);
+        Object* result = dictionary->Add(descs->GetKey(i), value, d);
+        if (result->IsFailure()) return result;
+        dictionary = StringDictionary::cast(result);
+        break;
+      }
+      case MAP_TRANSITION:
+      case CONSTANT_TRANSITION:
+      case NULL_DESCRIPTOR:
+      case INTERCEPTOR:
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  // Copy the next enumeration index from instance descriptor.
+  int index = map()->instance_descriptors()->NextEnumerationIndex();
+  dictionary->SetNextEnumerationIndex(index);
+
+  // Allocate new map.
+  obj = map()->CopyDropDescriptors();
+  if (obj->IsFailure()) return obj;
+  Map* new_map = Map::cast(obj);
+
+  // Clear inobject properties if needed by adjusting the instance size and
+  // putting in a filler object instead of the inobject properties.
+  if (mode == CLEAR_INOBJECT_PROPERTIES && map()->inobject_properties() > 0) {
+    int instance_size_delta = map()->inobject_properties() * kPointerSize;
+    int new_instance_size = map()->instance_size() - instance_size_delta;
+    new_map->set_inobject_properties(0);
+    new_map->set_instance_size(new_instance_size);
+    Heap::CreateFillerObjectAt(this->address() + new_instance_size,
+                               instance_size_delta);
+  }
+  new_map->set_unused_property_fields(0);
+
+  // We have now successfully allocated all the necessary objects.
+  // Changes can now be made with the guarantee that all of them take effect.
+  set_map(new_map);
+  map()->set_instance_descriptors(Heap::empty_descriptor_array());
+
+  set_properties(dictionary);
+
+  Counters::props_to_dictionary.Increment();
+
+#ifdef DEBUG
+  if (FLAG_trace_normalization) {
+    PrintF("Object properties have been normalized:\n");
+    Print();
+  }
+#endif
+  return this;
+}
+
+
+Object* JSObject::TransformToFastProperties(int unused_property_fields) {
+  if (HasFastProperties()) return this;
+  ASSERT(!IsGlobalObject());
+  return property_dictionary()->
+      TransformPropertiesToFastFor(this, unused_property_fields);
+}
+
+
+Object* JSObject::NormalizeElements() {
+  ASSERT(!HasPixelElements());
+  if (HasDictionaryElements()) return this;
+
+  // Get number of entries.
+  FixedArray* array = FixedArray::cast(elements());
+
+  // Compute the effective length.
+  int length = IsJSArray() ?
+               Smi::cast(JSArray::cast(this)->length())->value() :
+               array->length();
+  Object* obj = NumberDictionary::Allocate(length);
+  if (obj->IsFailure()) return obj;
+  NumberDictionary* dictionary = NumberDictionary::cast(obj);
+  // Copy entries.
+  for (int i = 0; i < length; i++) {
+    Object* value = array->get(i);
+    if (!value->IsTheHole()) {
+      PropertyDetails details = PropertyDetails(NONE, NORMAL);
+      Object* result = dictionary->AddNumberEntry(i, array->get(i), details);
+      if (result->IsFailure()) return result;
+      dictionary = NumberDictionary::cast(result);
+    }
+  }
+  // Switch to using the dictionary as the backing storage for elements.
+  set_elements(dictionary);
+
+  Counters::elements_to_dictionary.Increment();
+
+#ifdef DEBUG
+  if (FLAG_trace_normalization) {
+    PrintF("Object elements have been normalized:\n");
+    Print();
+  }
+#endif
+
+  return this;
+}
+
+
+Object* JSObject::DeletePropertyPostInterceptor(String* name, DeleteMode mode) {
+  // Check local property, ignore interceptor.
+  LookupResult result;
+  LocalLookupRealNamedProperty(name, &result);
+  if (!result.IsValid()) return Heap::true_value();
+
+  // Normalize object if needed.
+  Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+  if (obj->IsFailure()) return obj;
+
+  return DeleteNormalizedProperty(name, mode);
+}
+
+
+Object* JSObject::DeletePropertyWithInterceptor(String* name) {
+  HandleScope scope;
+  Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
+  Handle<String> name_handle(name);
+  Handle<JSObject> this_handle(this);
+  if (!interceptor->deleter()->IsUndefined()) {
+    v8::NamedPropertyDeleter deleter =
+        v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
+    LOG(ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
+    CustomArguments args(interceptor->data(), this, this);
+    v8::AccessorInfo info(args.end());
+    v8::Handle<v8::Boolean> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = deleter(v8::Utils::ToLocal(name_handle), info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (!result.IsEmpty()) {
+      ASSERT(result->IsBoolean());
+      return *v8::Utils::OpenHandle(*result);
+    }
+  }
+  Object* raw_result =
+      this_handle->DeletePropertyPostInterceptor(*name_handle, NORMAL_DELETION);
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  return raw_result;
+}
+
+
+Object* JSObject::DeleteElementPostInterceptor(uint32_t index,
+                                               DeleteMode mode) {
+  ASSERT(!HasPixelElements());
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+      static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
+      static_cast<uint32_t>(FixedArray::cast(elements())->length());
+      if (index < length) {
+        FixedArray::cast(elements())->set_the_hole(index);
+      }
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      NumberDictionary* dictionary = element_dictionary();
+      int entry = dictionary->FindEntry(index);
+      if (entry != NumberDictionary::kNotFound) {
+        return dictionary->DeleteProperty(entry, mode);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return Heap::true_value();
+}
+
+
+Object* JSObject::DeleteElementWithInterceptor(uint32_t index) {
+  // Make sure that the top context does not change when doing
+  // callbacks or interceptor calls.
+  AssertNoContextChange ncc;
+  HandleScope scope;
+  Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
+  if (interceptor->deleter()->IsUndefined()) return Heap::false_value();
+  v8::IndexedPropertyDeleter deleter =
+      v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
+  Handle<JSObject> this_handle(this);
+  LOG(ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
+  CustomArguments args(interceptor->data(), this, this);
+  v8::AccessorInfo info(args.end());
+  v8::Handle<v8::Boolean> result;
+  {
+    // Leaving JavaScript.
+    VMState state(EXTERNAL);
+    result = deleter(index, info);
+  }
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  if (!result.IsEmpty()) {
+    ASSERT(result->IsBoolean());
+    return *v8::Utils::OpenHandle(*result);
+  }
+  Object* raw_result =
+      this_handle->DeleteElementPostInterceptor(index, NORMAL_DELETION);
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  return raw_result;
+}
+
+
+Object* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
+    return Heap::false_value();
+  }
+
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return Heap::false_value();
+    ASSERT(proto->IsJSGlobalObject());
+    return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
+  }
+
+  if (HasIndexedInterceptor()) {
+    // Skip interceptor if forcing deletion.
+    if (mode == FORCE_DELETION) {
+      return DeleteElementPostInterceptor(index, mode);
+    }
+    return DeleteElementWithInterceptor(index);
+  }
+
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+      static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
+      static_cast<uint32_t>(FixedArray::cast(elements())->length());
+      if (index < length) {
+        FixedArray::cast(elements())->set_the_hole(index);
+      }
+      break;
+    }
+    case PIXEL_ELEMENTS: {
+      // Pixel elements cannot be deleted. Just silently ignore here.
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      NumberDictionary* dictionary = element_dictionary();
+      int entry = dictionary->FindEntry(index);
+      if (entry != NumberDictionary::kNotFound) {
+        return dictionary->DeleteProperty(entry, mode);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return Heap::true_value();
+}
+
+
+Object* JSObject::DeleteProperty(String* name, DeleteMode mode) {
+  // ECMA-262, 3rd, 8.6.2.5
+  ASSERT(name->IsString());
+
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayNamedAccess(this, name, v8::ACCESS_DELETE)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
+    return Heap::false_value();
+  }
+
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return Heap::false_value();
+    ASSERT(proto->IsJSGlobalObject());
+    return JSGlobalObject::cast(proto)->DeleteProperty(name, mode);
+  }
+
+  uint32_t index = 0;
+  if (name->AsArrayIndex(&index)) {
+    return DeleteElement(index, mode);
+  } else {
+    LookupResult result;
+    LocalLookup(name, &result);
+    if (!result.IsValid()) return Heap::true_value();
+    // Ignore attributes if forcing a deletion.
+    if (result.IsDontDelete() && mode != FORCE_DELETION) {
+      return Heap::false_value();
+    }
+    // Check for interceptor.
+    if (result.type() == INTERCEPTOR) {
+      // Skip interceptor if forcing a deletion.
+      if (mode == FORCE_DELETION) {
+        return DeletePropertyPostInterceptor(name, mode);
+      }
+      return DeletePropertyWithInterceptor(name);
+    }
+    if (!result.IsLoaded()) {
+      return JSObject::cast(this)->DeleteLazyProperty(&result,
+                                                      name,
+                                                      mode);
+    }
+    // Normalize object if needed.
+    Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+    if (obj->IsFailure()) return obj;
+    // Make sure the properties are normalized before removing the entry.
+    return DeleteNormalizedProperty(name, mode);
+  }
+}
+
+
+// Check whether this object references another object.
+bool JSObject::ReferencesObject(Object* obj) {
+  AssertNoAllocation no_alloc;
+
+  // Is the object the constructor for this object?
+  if (map()->constructor() == obj) {
+    return true;
+  }
+
+  // Is the object the prototype for this object?
+  if (map()->prototype() == obj) {
+    return true;
+  }
+
+  // Check if the object is among the named properties.
+  Object* key = SlowReverseLookup(obj);
+  if (key != Heap::undefined_value()) {
+    return true;
+  }
+
+  // Check if the object is among the indexed properties.
+  switch (GetElementsKind()) {
+    case PIXEL_ELEMENTS:
+      // Raw pixels do not reference other objects.
+      break;
+    case FAST_ELEMENTS: {
+      int length = IsJSArray() ?
+          Smi::cast(JSArray::cast(this)->length())->value() :
+          FixedArray::cast(elements())->length();
+      for (int i = 0; i < length; i++) {
+        Object* element = FixedArray::cast(elements())->get(i);
+        if (!element->IsTheHole() && element == obj) {
+          return true;
+        }
+      }
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      key = element_dictionary()->SlowReverseLookup(obj);
+      if (key != Heap::undefined_value()) {
+        return true;
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // For functions check the context. Boilerplate functions do
+  // not have to be traversed since they have no real context.
+  if (IsJSFunction() && !JSFunction::cast(this)->IsBoilerplate()) {
+    // Get the constructor function for arguments array.
+    JSObject* arguments_boilerplate =
+        Top::context()->global_context()->arguments_boilerplate();
+    JSFunction* arguments_function =
+        JSFunction::cast(arguments_boilerplate->map()->constructor());
+
+    // Get the context and don't check if it is the global context.
+    JSFunction* f = JSFunction::cast(this);
+    Context* context = f->context();
+    if (context->IsGlobalContext()) {
+      return false;
+    }
+
+    // Check the non-special context slots.
+    for (int i = Context::MIN_CONTEXT_SLOTS; i < context->length(); i++) {
+      // Only check JS objects.
+      if (context->get(i)->IsJSObject()) {
+        JSObject* ctxobj = JSObject::cast(context->get(i));
+        // If it is an arguments array check the content.
+        if (ctxobj->map()->constructor() == arguments_function) {
+          if (ctxobj->ReferencesObject(obj)) {
+            return true;
+          }
+        } else if (ctxobj == obj) {
+          return true;
+        }
+      }
+    }
+
+    // Check the context extension if any.
+    if (context->has_extension()) {
+      return context->extension()->ReferencesObject(obj);
+    }
+  }
+
+  // No references to object.
+  return false;
+}
+
+
+// Tests for the fast common case for property enumeration:
+// - this object has an enum cache
+// - this object has no elements
+// - no prototype has enumerable properties/elements
+// - neither this object nor any prototype has interceptors
+bool JSObject::IsSimpleEnum() {
+  JSObject* arguments_boilerplate =
+      Top::context()->global_context()->arguments_boilerplate();
+  JSFunction* arguments_function =
+      JSFunction::cast(arguments_boilerplate->map()->constructor());
+  if (IsAccessCheckNeeded()) return false;
+  if (map()->constructor() == arguments_function) return false;
+
+  for (Object* o = this;
+       o != Heap::null_value();
+       o = JSObject::cast(o)->GetPrototype()) {
+    JSObject* curr = JSObject::cast(o);
+    if (!curr->HasFastProperties()) return false;
+    if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
+    if (curr->NumberOfEnumElements() > 0) return false;
+    if (curr->HasNamedInterceptor()) return false;
+    if (curr->HasIndexedInterceptor()) return false;
+    if (curr != this) {
+      FixedArray* curr_fixed_array =
+          FixedArray::cast(curr->map()->instance_descriptors()->GetEnumCache());
+      if (curr_fixed_array->length() > 0) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+
+int Map::NumberOfDescribedProperties() {
+  int result = 0;
+  DescriptorArray* descs = instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    if (descs->IsProperty(i)) result++;
+  }
+  return result;
+}
+
+
+int Map::PropertyIndexFor(String* name) {
+  DescriptorArray* descs = instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    if (name->Equals(descs->GetKey(i)) && !descs->IsNullDescriptor(i)) {
+      return descs->GetFieldIndex(i);
+    }
+  }
+  return -1;
+}
+
+
+int Map::NextFreePropertyIndex() {
+  int max_index = -1;
+  DescriptorArray* descs = instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    if (descs->GetType(i) == FIELD) {
+      int current_index = descs->GetFieldIndex(i);
+      if (current_index > max_index) max_index = current_index;
+    }
+  }
+  return max_index + 1;
+}
+
+
+AccessorDescriptor* Map::FindAccessor(String* name) {
+  DescriptorArray* descs = instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    if (name->Equals(descs->GetKey(i)) && descs->GetType(i) == CALLBACKS) {
+      return descs->GetCallbacks(i);
+    }
+  }
+  return NULL;
+}
+
+
+void JSObject::LocalLookup(String* name, LookupResult* result) {
+  ASSERT(name->IsString());
+
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return result->NotFound();
+    ASSERT(proto->IsJSGlobalObject());
+    return JSObject::cast(proto)->LocalLookup(name, result);
+  }
+
+  // Do not use inline caching if the object is a non-global object
+  // that requires access checks.
+  if (!IsJSGlobalProxy() && IsAccessCheckNeeded()) {
+    result->DisallowCaching();
+  }
+
+  // Check __proto__ before interceptor.
+  if (name->Equals(Heap::Proto_symbol()) && !IsJSContextExtensionObject()) {
+    result->ConstantResult(this);
+    return;
+  }
+
+  // Check for lookup interceptor except when bootstrapping.
+  if (HasNamedInterceptor() && !Bootstrapper::IsActive()) {
+    result->InterceptorResult(this);
+    return;
+  }
+
+  LocalLookupRealNamedProperty(name, result);
+}
+
+
+void JSObject::Lookup(String* name, LookupResult* result) {
+  // Ecma-262 3rd 8.6.2.4
+  for (Object* current = this;
+       current != Heap::null_value();
+       current = JSObject::cast(current)->GetPrototype()) {
+    JSObject::cast(current)->LocalLookup(name, result);
+    if (result->IsValid() && !result->IsTransitionType()) return;
+  }
+  result->NotFound();
+}
+
+
+// Search object and it's prototype chain for callback properties.
+void JSObject::LookupCallback(String* name, LookupResult* result) {
+  for (Object* current = this;
+       current != Heap::null_value();
+       current = JSObject::cast(current)->GetPrototype()) {
+    JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
+    if (result->IsValid() && result->type() == CALLBACKS) return;
+  }
+  result->NotFound();
+}
+
+
+Object* JSObject::DefineGetterSetter(String* name,
+                                     PropertyAttributes attributes) {
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc;
+
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+    return Heap::undefined_value();
+  }
+
+  // Try to flatten before operating on the string.
+  name->TryFlattenIfNotFlat();
+
+  // Check if there is an API defined callback object which prohibits
+  // callback overwriting in this object or it's prototype chain.
+  // This mechanism is needed for instance in a browser setting, where
+  // certain accessors such as window.location should not be allowed
+  // to be overwritten because allowing overwriting could potentially
+  // cause security problems.
+  LookupResult callback_result;
+  LookupCallback(name, &callback_result);
+  if (callback_result.IsValid()) {
+    Object* obj = callback_result.GetCallbackObject();
+    if (obj->IsAccessorInfo() &&
+        AccessorInfo::cast(obj)->prohibits_overwriting()) {
+      return Heap::undefined_value();
+    }
+  }
+
+  uint32_t index;
+  bool is_element = name->AsArrayIndex(&index);
+  if (is_element && IsJSArray()) return Heap::undefined_value();
+
+  if (is_element) {
+    switch (GetElementsKind()) {
+      case FAST_ELEMENTS:
+        break;
+      case PIXEL_ELEMENTS:
+        // Ignore getters and setters on pixel elements.
+        return Heap::undefined_value();
+      case DICTIONARY_ELEMENTS: {
+        // Lookup the index.
+        NumberDictionary* dictionary = element_dictionary();
+        int entry = dictionary->FindEntry(index);
+        if (entry != NumberDictionary::kNotFound) {
+          Object* result = dictionary->ValueAt(entry);
+          PropertyDetails details = dictionary->DetailsAt(entry);
+          if (details.IsReadOnly()) return Heap::undefined_value();
+          if (details.type() == CALLBACKS) {
+            // Only accessors allowed as elements.
+            ASSERT(result->IsFixedArray());
+            return result;
+          }
+        }
+        break;
+      }
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    // Lookup the name.
+    LookupResult result;
+    LocalLookup(name, &result);
+    if (result.IsValid()) {
+      if (result.IsReadOnly()) return Heap::undefined_value();
+      if (result.type() == CALLBACKS) {
+        Object* obj = result.GetCallbackObject();
+        if (obj->IsFixedArray()) return obj;
+      }
+    }
+  }
+
+  // Allocate the fixed array to hold getter and setter.
+  Object* structure = Heap::AllocateFixedArray(2, TENURED);
+  if (structure->IsFailure()) return structure;
+  PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+
+  if (is_element) {
+    // Normalize object to make this operation simple.
+    Object* ok = NormalizeElements();
+    if (ok->IsFailure()) return ok;
+
+    // Update the dictionary with the new CALLBACKS property.
+    Object* dict =
+        element_dictionary()->Set(index, structure, details);
+    if (dict->IsFailure()) return dict;
+
+    // If name is an index we need to stay in slow case.
+    NumberDictionary* elements = NumberDictionary::cast(dict);
+    elements->set_requires_slow_elements();
+    // Set the potential new dictionary on the object.
+    set_elements(NumberDictionary::cast(dict));
+  } else {
+    // Normalize object to make this operation simple.
+    Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+    if (ok->IsFailure()) return ok;
+
+    // For the global object allocate a new map to invalidate the global inline
+    // caches which have a global property cell reference directly in the code.
+    if (IsGlobalObject()) {
+      Object* new_map = map()->CopyDropDescriptors();
+      if (new_map->IsFailure()) return new_map;
+      set_map(Map::cast(new_map));
+    }
+
+    // Update the dictionary with the new CALLBACKS property.
+    return SetNormalizedProperty(name, structure, details);
+  }
+
+  return structure;
+}
+
+
+Object* JSObject::DefineAccessor(String* name, bool is_getter, JSFunction* fun,
+                                 PropertyAttributes attributes) {
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    return Heap::undefined_value();
+  }
+
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return this;
+    ASSERT(proto->IsJSGlobalObject());
+    return JSObject::cast(proto)->DefineAccessor(name, is_getter,
+                                                 fun, attributes);
+  }
+
+  Object* array = DefineGetterSetter(name, attributes);
+  if (array->IsFailure() || array->IsUndefined()) return array;
+  FixedArray::cast(array)->set(is_getter ? 0 : 1, fun);
+  return this;
+}
+
+
+Object* JSObject::LookupAccessor(String* name, bool is_getter) {
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc;
+
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    return Heap::undefined_value();
+  }
+
+  // Make the lookup and include prototypes.
+  int accessor_index = is_getter ? kGetterIndex : kSetterIndex;
+  uint32_t index;
+  if (name->AsArrayIndex(&index)) {
+    for (Object* obj = this;
+         obj != Heap::null_value();
+         obj = JSObject::cast(obj)->GetPrototype()) {
+      JSObject* js_object = JSObject::cast(obj);
+      if (js_object->HasDictionaryElements()) {
+        NumberDictionary* dictionary = js_object->element_dictionary();
+        int entry = dictionary->FindEntry(index);
+        if (entry != NumberDictionary::kNotFound) {
+          Object* element = dictionary->ValueAt(entry);
+          PropertyDetails details = dictionary->DetailsAt(entry);
+          if (details.type() == CALLBACKS) {
+            // Only accessors allowed as elements.
+            return FixedArray::cast(element)->get(accessor_index);
+          }
+        }
+      }
+    }
+  } else {
+    for (Object* obj = this;
+         obj != Heap::null_value();
+         obj = JSObject::cast(obj)->GetPrototype()) {
+      LookupResult result;
+      JSObject::cast(obj)->LocalLookup(name, &result);
+      if (result.IsValid()) {
+        if (result.IsReadOnly()) return Heap::undefined_value();
+        if (result.type() == CALLBACKS) {
+          Object* obj = result.GetCallbackObject();
+          if (obj->IsFixedArray()) {
+            return FixedArray::cast(obj)->get(accessor_index);
+          }
+        }
+      }
+    }
+  }
+  return Heap::undefined_value();
+}
+
+
+Object* JSObject::SlowReverseLookup(Object* value) {
+  if (HasFastProperties()) {
+    DescriptorArray* descs = map()->instance_descriptors();
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      if (descs->GetType(i) == FIELD) {
+        if (FastPropertyAt(descs->GetFieldIndex(i)) == value) {
+          return descs->GetKey(i);
+        }
+      } else if (descs->GetType(i) == CONSTANT_FUNCTION) {
+        if (descs->GetConstantFunction(i) == value) {
+          return descs->GetKey(i);
+        }
+      }
+    }
+    return Heap::undefined_value();
+  } else {
+    return property_dictionary()->SlowReverseLookup(value);
+  }
+}
+
+
+Object* Map::CopyDropDescriptors() {
+  Object* result = Heap::AllocateMap(instance_type(), instance_size());
+  if (result->IsFailure()) return result;
+  Map::cast(result)->set_prototype(prototype());
+  Map::cast(result)->set_constructor(constructor());
+  // Don't copy descriptors, so map transitions always remain a forest.
+  // If we retained the same descriptors we would have two maps
+  // pointing to the same transition which is bad because the garbage
+  // collector relies on being able to reverse pointers from transitions
+  // to maps.  If properties need to be retained use CopyDropTransitions.
+  Map::cast(result)->set_instance_descriptors(Heap::empty_descriptor_array());
+  // Please note instance_type and instance_size are set when allocated.
+  Map::cast(result)->set_inobject_properties(inobject_properties());
+  Map::cast(result)->set_unused_property_fields(unused_property_fields());
+
+  // If the map has pre-allocated properties always start out with a descriptor
+  // array describing these properties.
+  if (pre_allocated_property_fields() > 0) {
+    ASSERT(constructor()->IsJSFunction());
+    JSFunction* ctor = JSFunction::cast(constructor());
+    Object* descriptors =
+        ctor->initial_map()->instance_descriptors()->RemoveTransitions();
+    if (descriptors->IsFailure()) return descriptors;
+    Map::cast(result)->set_instance_descriptors(
+        DescriptorArray::cast(descriptors));
+    Map::cast(result)->set_pre_allocated_property_fields(
+        pre_allocated_property_fields());
+  }
+  Map::cast(result)->set_bit_field(bit_field());
+  Map::cast(result)->set_bit_field2(bit_field2());
+  Map::cast(result)->ClearCodeCache();
+  return result;
+}
+
+
+Object* Map::CopyDropTransitions() {
+  Object* new_map = CopyDropDescriptors();
+  if (new_map->IsFailure()) return new_map;
+  Object* descriptors = instance_descriptors()->RemoveTransitions();
+  if (descriptors->IsFailure()) return descriptors;
+  cast(new_map)->set_instance_descriptors(DescriptorArray::cast(descriptors));
+  return cast(new_map);
+}
+
+
+Object* Map::UpdateCodeCache(String* name, Code* code) {
+  ASSERT(code->ic_state() == MONOMORPHIC);
+  FixedArray* cache = code_cache();
+
+  // When updating the code cache we disregard the type encoded in the
+  // flags. This allows call constant stubs to overwrite call field
+  // stubs, etc.
+  Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
+
+  // First check whether we can update existing code cache without
+  // extending it.
+  int length = cache->length();
+  int deleted_index = -1;
+  for (int i = 0; i < length; i += 2) {
+    Object* key = cache->get(i);
+    if (key->IsNull()) {
+      if (deleted_index < 0) deleted_index = i;
+      continue;
+    }
+    if (key->IsUndefined()) {
+      if (deleted_index >= 0) i = deleted_index;
+      cache->set(i + 0, name);
+      cache->set(i + 1, code);
+      return this;
+    }
+    if (name->Equals(String::cast(key))) {
+      Code::Flags found = Code::cast(cache->get(i + 1))->flags();
+      if (Code::RemoveTypeFromFlags(found) == flags) {
+        cache->set(i + 1, code);
+        return this;
+      }
+    }
+  }
+
+  // Reached the end of the code cache.  If there were deleted
+  // elements, reuse the space for the first of them.
+  if (deleted_index >= 0) {
+    cache->set(deleted_index + 0, name);
+    cache->set(deleted_index + 1, code);
+    return this;
+  }
+
+  // Extend the code cache with some new entries (at least one).
+  int new_length = length + ((length >> 1) & ~1) + 2;
+  ASSERT((new_length & 1) == 0);  // must be a multiple of two
+  Object* result = cache->CopySize(new_length);
+  if (result->IsFailure()) return result;
+
+  // Add the (name, code) pair to the new cache.
+  cache = FixedArray::cast(result);
+  cache->set(length + 0, name);
+  cache->set(length + 1, code);
+  set_code_cache(cache);
+  return this;
+}
+
+
+Object* Map::FindInCodeCache(String* name, Code::Flags flags) {
+  FixedArray* cache = code_cache();
+  int length = cache->length();
+  for (int i = 0; i < length; i += 2) {
+    Object* key = cache->get(i);
+    // Skip deleted elements.
+    if (key->IsNull()) continue;
+    if (key->IsUndefined()) return key;
+    if (name->Equals(String::cast(key))) {
+      Code* code = Code::cast(cache->get(i + 1));
+      if (code->flags() == flags) return code;
+    }
+  }
+  return Heap::undefined_value();
+}
+
+
+int Map::IndexInCodeCache(Code* code) {
+  FixedArray* array = code_cache();
+  int len = array->length();
+  for (int i = 0; i < len; i += 2) {
+    if (array->get(i + 1) == code) return i + 1;
+  }
+  return -1;
+}
+
+
+void Map::RemoveFromCodeCache(int index) {
+  FixedArray* array = code_cache();
+  ASSERT(array->length() >= index && array->get(index)->IsCode());
+  // Use null instead of undefined for deleted elements to distinguish
+  // deleted elements from unused elements.  This distinction is used
+  // when looking up in the cache and when updating the cache.
+  array->set_null(index - 1);  // key
+  array->set_null(index);  // code
+}
+
+
+void FixedArray::FixedArrayIterateBody(ObjectVisitor* v) {
+  IteratePointers(v, kHeaderSize, kHeaderSize + length() * kPointerSize);
+}
+
+
+static bool HasKey(FixedArray* array, Object* key) {
+  int len0 = array->length();
+  for (int i = 0; i < len0; i++) {
+    Object* element = array->get(i);
+    if (element->IsSmi() && key->IsSmi() && (element == key)) return true;
+    if (element->IsString() &&
+        key->IsString() && String::cast(element)->Equals(String::cast(key))) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+Object* FixedArray::AddKeysFromJSArray(JSArray* array) {
+  ASSERT(!array->HasPixelElements());
+  switch (array->GetElementsKind()) {
+    case JSObject::FAST_ELEMENTS:
+      return UnionOfKeys(FixedArray::cast(array->elements()));
+    case JSObject::DICTIONARY_ELEMENTS: {
+      NumberDictionary* dict = array->element_dictionary();
+      int size = dict->NumberOfElements();
+
+      // Allocate a temporary fixed array.
+      Object* object = Heap::AllocateFixedArray(size);
+      if (object->IsFailure()) return object;
+      FixedArray* key_array = FixedArray::cast(object);
+
+      int capacity = dict->Capacity();
+      int pos = 0;
+      // Copy the elements from the JSArray to the temporary fixed array.
+      for (int i = 0; i < capacity; i++) {
+        if (dict->IsKey(dict->KeyAt(i))) {
+          key_array->set(pos++, dict->ValueAt(i));
+        }
+      }
+      // Compute the union of this and the temporary fixed array.
+      return UnionOfKeys(key_array);
+    }
+    default:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return Heap::null_value();  // Failure case needs to "return" a value.
+}
+
+
+Object* FixedArray::UnionOfKeys(FixedArray* other) {
+  int len0 = length();
+  int len1 = other->length();
+  // Optimize if either is empty.
+  if (len0 == 0) return other;
+  if (len1 == 0) return this;
+
+  // Compute how many elements are not in this.
+  int extra = 0;
+  for (int y = 0; y < len1; y++) {
+    Object* value = other->get(y);
+    if (!value->IsTheHole() && !HasKey(this, value)) extra++;
+  }
+
+  if (extra == 0) return this;
+
+  // Allocate the result
+  Object* obj = Heap::AllocateFixedArray(len0 + extra);
+  if (obj->IsFailure()) return obj;
+  // Fill in the content
+  FixedArray* result = FixedArray::cast(obj);
+  WriteBarrierMode mode = result->GetWriteBarrierMode();
+  for (int i = 0; i < len0; i++) {
+    result->set(i, get(i), mode);
+  }
+  // Fill in the extra keys.
+  int index = 0;
+  for (int y = 0; y < len1; y++) {
+    Object* value = other->get(y);
+    if (!value->IsTheHole() && !HasKey(this, value)) {
+      result->set(len0 + index, other->get(y), mode);
+      index++;
+    }
+  }
+  ASSERT(extra == index);
+  return result;
+}
+
+
+Object* FixedArray::CopySize(int new_length) {
+  if (new_length == 0) return Heap::empty_fixed_array();
+  Object* obj = Heap::AllocateFixedArray(new_length);
+  if (obj->IsFailure()) return obj;
+  FixedArray* result = FixedArray::cast(obj);
+  // Copy the content
+  int len = length();
+  if (new_length < len) len = new_length;
+  result->set_map(map());
+  WriteBarrierMode mode = result->GetWriteBarrierMode();
+  for (int i = 0; i < len; i++) {
+    result->set(i, get(i), mode);
+  }
+  return result;
+}
+
+
+void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
+  WriteBarrierMode mode = dest->GetWriteBarrierMode();
+  for (int index = 0; index < len; index++) {
+    dest->set(dest_pos+index, get(pos+index), mode);
+  }
+}
+
+
+#ifdef DEBUG
+bool FixedArray::IsEqualTo(FixedArray* other) {
+  if (length() != other->length()) return false;
+  for (int i = 0 ; i < length(); ++i) {
+    if (get(i) != other->get(i)) return false;
+  }
+  return true;
+}
+#endif
+
+
+Object* DescriptorArray::Allocate(int number_of_descriptors) {
+  if (number_of_descriptors == 0) {
+    return Heap::empty_descriptor_array();
+  }
+  // Allocate the array of keys.
+  Object* array = Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors));
+  if (array->IsFailure()) return array;
+  // Do not use DescriptorArray::cast on incomplete object.
+  FixedArray* result = FixedArray::cast(array);
+
+  // Allocate the content array and set it in the descriptor array.
+  array = Heap::AllocateFixedArray(number_of_descriptors << 1);
+  if (array->IsFailure()) return array;
+  result->set(kContentArrayIndex, array);
+  result->set(kEnumerationIndexIndex,
+              Smi::FromInt(PropertyDetails::kInitialIndex),
+              SKIP_WRITE_BARRIER);
+  return result;
+}
+
+
+void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
+                                   FixedArray* new_cache) {
+  ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
+  if (HasEnumCache()) {
+    FixedArray::cast(get(kEnumerationIndexIndex))->
+      set(kEnumCacheBridgeCacheIndex, new_cache);
+  } else {
+    if (IsEmpty()) return;  // Do nothing for empty descriptor array.
+    FixedArray::cast(bridge_storage)->
+      set(kEnumCacheBridgeCacheIndex, new_cache);
+    fast_set(FixedArray::cast(bridge_storage),
+             kEnumCacheBridgeEnumIndex,
+             get(kEnumerationIndexIndex));
+    set(kEnumerationIndexIndex, bridge_storage);
+  }
+}
+
+
+Object* DescriptorArray::CopyInsert(Descriptor* descriptor,
+                                    TransitionFlag transition_flag) {
+  // Transitions are only kept when inserting another transition.
+  // This precondition is not required by this function's implementation, but
+  // is currently required by the semantics of maps, so we check it.
+  // Conversely, we filter after replacing, so replacing a transition and
+  // removing all other transitions is not supported.
+  bool remove_transitions = transition_flag == REMOVE_TRANSITIONS;
+  ASSERT(remove_transitions == !descriptor->GetDetails().IsTransition());
+  ASSERT(descriptor->GetDetails().type() != NULL_DESCRIPTOR);
+
+  // Ensure the key is a symbol.
+  Object* result = descriptor->KeyToSymbol();
+  if (result->IsFailure()) return result;
+
+  int transitions = 0;
+  int null_descriptors = 0;
+  if (remove_transitions) {
+    for (int i = 0; i < number_of_descriptors(); i++) {
+      if (IsTransition(i)) transitions++;
+      if (IsNullDescriptor(i)) null_descriptors++;
+    }
+  } else {
+    for (int i = 0; i < number_of_descriptors(); i++) {
+      if (IsNullDescriptor(i)) null_descriptors++;
+    }
+  }
+  int new_size = number_of_descriptors() - transitions - null_descriptors;
+
+  // If key is in descriptor, we replace it in-place when filtering.
+  // Count a null descriptor for key as inserted, not replaced.
+  int index = Search(descriptor->GetKey());
+  const bool inserting = (index == kNotFound);
+  const bool replacing = !inserting;
+  bool keep_enumeration_index = false;
+  if (inserting) {
+    ++new_size;
+  }
+  if (replacing) {
+    // We are replacing an existing descriptor.  We keep the enumeration
+    // index of a visible property.
+    PropertyType t = PropertyDetails(GetDetails(index)).type();
+    if (t == CONSTANT_FUNCTION ||
+        t == FIELD ||
+        t == CALLBACKS ||
+        t == INTERCEPTOR) {
+      keep_enumeration_index = true;
+    } else if (remove_transitions) {
+     // Replaced descriptor has been counted as removed if it is
+     // a transition that will be replaced.  Adjust count in this case.
+      ++new_size;
+    }
+  }
+  result = Allocate(new_size);
+  if (result->IsFailure()) return result;
+  DescriptorArray* new_descriptors = DescriptorArray::cast(result);
+  // Set the enumeration index in the descriptors and set the enumeration index
+  // in the result.
+  int enumeration_index = NextEnumerationIndex();
+  if (!descriptor->GetDetails().IsTransition()) {
+    if (keep_enumeration_index) {
+      descriptor->SetEnumerationIndex(
+          PropertyDetails(GetDetails(index)).index());
+    } else {
+      descriptor->SetEnumerationIndex(enumeration_index);
+      ++enumeration_index;
+    }
+  }
+  new_descriptors->SetNextEnumerationIndex(enumeration_index);
+
+  // Copy the descriptors, filtering out transitions and null descriptors,
+  // and inserting or replacing a descriptor.
+  uint32_t descriptor_hash = descriptor->GetKey()->Hash();
+  int from_index = 0;
+  int to_index = 0;
+
+  for (; from_index < number_of_descriptors(); from_index++) {
+    String* key = GetKey(from_index);
+    if (key->Hash() > descriptor_hash || key == descriptor->GetKey()) {
+      break;
+    }
+    if (IsNullDescriptor(from_index)) continue;
+    if (remove_transitions && IsTransition(from_index)) continue;
+    new_descriptors->CopyFrom(to_index++, this, from_index);
+  }
+
+  new_descriptors->Set(to_index++, descriptor);
+  if (replacing) from_index++;
+
+  for (; from_index < number_of_descriptors(); from_index++) {
+    if (IsNullDescriptor(from_index)) continue;
+    if (remove_transitions && IsTransition(from_index)) continue;
+    new_descriptors->CopyFrom(to_index++, this, from_index);
+  }
+
+  ASSERT(to_index == new_descriptors->number_of_descriptors());
+  SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
+
+  return new_descriptors;
+}
+
+
+Object* DescriptorArray::RemoveTransitions() {
+  // Remove all transitions and null descriptors. Return a copy of the array
+  // with all transitions removed, or a Failure object if the new array could
+  // not be allocated.
+
+  // Compute the size of the map transition entries to be removed.
+  int num_removed = 0;
+  for (int i = 0; i < number_of_descriptors(); i++) {
+    if (!IsProperty(i)) num_removed++;
+  }
+
+  // Allocate the new descriptor array.
+  Object* result = Allocate(number_of_descriptors() - num_removed);
+  if (result->IsFailure()) return result;
+  DescriptorArray* new_descriptors = DescriptorArray::cast(result);
+
+  // Copy the content.
+  int next_descriptor = 0;
+  for (int i = 0; i < number_of_descriptors(); i++) {
+    if (IsProperty(i)) new_descriptors->CopyFrom(next_descriptor++, this, i);
+  }
+  ASSERT(next_descriptor == new_descriptors->number_of_descriptors());
+
+  return new_descriptors;
+}
+
+
+void DescriptorArray::Sort() {
+  // In-place heap sort.
+  int len = number_of_descriptors();
+
+  // Bottom-up max-heap construction.
+  for (int i = 1; i < len; ++i) {
+    int child_index = i;
+    while (child_index > 0) {
+      int parent_index = ((child_index + 1) >> 1) - 1;
+      uint32_t parent_hash = GetKey(parent_index)->Hash();
+      uint32_t child_hash = GetKey(child_index)->Hash();
+      if (parent_hash < child_hash) {
+        Swap(parent_index, child_index);
+      } else {
+        break;
+      }
+      child_index = parent_index;
+    }
+  }
+
+  // Extract elements and create sorted array.
+  for (int i = len - 1; i > 0; --i) {
+    // Put max element at the back of the array.
+    Swap(0, i);
+    // Sift down the new top element.
+    int parent_index = 0;
+    while (true) {
+      int child_index = ((parent_index + 1) << 1) - 1;
+      if (child_index >= i) break;
+      uint32_t child1_hash = GetKey(child_index)->Hash();
+      uint32_t child2_hash = GetKey(child_index + 1)->Hash();
+      uint32_t parent_hash = GetKey(parent_index)->Hash();
+      if (child_index + 1 >= i || child1_hash > child2_hash) {
+        if (parent_hash > child1_hash) break;
+        Swap(parent_index, child_index);
+        parent_index = child_index;
+      } else {
+        if (parent_hash > child2_hash) break;
+        Swap(parent_index, child_index + 1);
+        parent_index = child_index + 1;
+      }
+    }
+  }
+
+  SLOW_ASSERT(IsSortedNoDuplicates());
+}
+
+
+int DescriptorArray::BinarySearch(String* name, int low, int high) {
+  uint32_t hash = name->Hash();
+
+  while (low <= high) {
+    int mid = (low + high) / 2;
+    String* mid_name = GetKey(mid);
+    uint32_t mid_hash = mid_name->Hash();
+
+    if (mid_hash > hash) {
+      high = mid - 1;
+      continue;
+    }
+    if (mid_hash < hash) {
+      low = mid + 1;
+      continue;
+    }
+    // Found an element with the same hash-code.
+    ASSERT(hash == mid_hash);
+    // There might be more, so we find the first one and
+    // check them all to see if we have a match.
+    if (name == mid_name  && !is_null_descriptor(mid)) return mid;
+    while ((mid > low) && (GetKey(mid - 1)->Hash() == hash)) mid--;
+    for (; (mid <= high) && (GetKey(mid)->Hash() == hash); mid++) {
+      if (GetKey(mid)->Equals(name) && !is_null_descriptor(mid)) return mid;
+    }
+    break;
+  }
+  return kNotFound;
+}
+
+
+int DescriptorArray::LinearSearch(String* name, int len) {
+  uint32_t hash = name->Hash();
+  for (int number = 0; number < len; number++) {
+    String* entry = GetKey(number);
+    if ((entry->Hash() == hash) &&
+        name->Equals(entry) &&
+        !is_null_descriptor(number)) {
+      return number;
+    }
+  }
+  return kNotFound;
+}
+
+
+#ifdef DEBUG
+bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
+  if (IsEmpty()) return other->IsEmpty();
+  if (other->IsEmpty()) return false;
+  if (length() != other->length()) return false;
+  for (int i = 0; i < length(); ++i) {
+    if (get(i) != other->get(i) && i != kContentArrayIndex) return false;
+  }
+  return GetContentArray()->IsEqualTo(other->GetContentArray());
+}
+#endif
+
+
+static StaticResource<StringInputBuffer> string_input_buffer;
+
+
+bool String::LooksValid() {
+  if (!Heap::Contains(this)) return false;
+  return true;
+}
+
+
+int String::Utf8Length() {
+  if (IsAsciiRepresentation()) return length();
+  // Attempt to flatten before accessing the string.  It probably
+  // doesn't make Utf8Length faster, but it is very likely that
+  // the string will be accessed later (for example by WriteUtf8)
+  // so it's still a good idea.
+  TryFlattenIfNotFlat();
+  Access<StringInputBuffer> buffer(&string_input_buffer);
+  buffer->Reset(0, this);
+  int result = 0;
+  while (buffer->has_more())
+    result += unibrow::Utf8::Length(buffer->GetNext());
+  return result;
+}
+
+
+Vector<const char> String::ToAsciiVector() {
+  ASSERT(IsAsciiRepresentation());
+  ASSERT(IsFlat());
+
+  int offset = 0;
+  int length = this->length();
+  StringRepresentationTag string_tag = StringShape(this).representation_tag();
+  String* string = this;
+  if (string_tag == kSlicedStringTag) {
+    SlicedString* sliced = SlicedString::cast(string);
+    offset += sliced->start();
+    string = sliced->buffer();
+    string_tag = StringShape(string).representation_tag();
+  } else if (string_tag == kConsStringTag) {
+    ConsString* cons = ConsString::cast(string);
+    ASSERT(cons->second()->length() == 0);
+    string = cons->first();
+    string_tag = StringShape(string).representation_tag();
+  }
+  if (string_tag == kSeqStringTag) {
+    SeqAsciiString* seq = SeqAsciiString::cast(string);
+    char* start = seq->GetChars();
+    return Vector<const char>(start + offset, length);
+  }
+  ASSERT(string_tag == kExternalStringTag);
+  ExternalAsciiString* ext = ExternalAsciiString::cast(string);
+  const char* start = ext->resource()->data();
+  return Vector<const char>(start + offset, length);
+}
+
+
+Vector<const uc16> String::ToUC16Vector() {
+  ASSERT(IsTwoByteRepresentation());
+  ASSERT(IsFlat());
+
+  int offset = 0;
+  int length = this->length();
+  StringRepresentationTag string_tag = StringShape(this).representation_tag();
+  String* string = this;
+  if (string_tag == kSlicedStringTag) {
+    SlicedString* sliced = SlicedString::cast(string);
+    offset += sliced->start();
+    string = String::cast(sliced->buffer());
+    string_tag = StringShape(string).representation_tag();
+  } else if (string_tag == kConsStringTag) {
+    ConsString* cons = ConsString::cast(string);
+    ASSERT(cons->second()->length() == 0);
+    string = cons->first();
+    string_tag = StringShape(string).representation_tag();
+  }
+  if (string_tag == kSeqStringTag) {
+    SeqTwoByteString* seq = SeqTwoByteString::cast(string);
+    return Vector<const uc16>(seq->GetChars() + offset, length);
+  }
+  ASSERT(string_tag == kExternalStringTag);
+  ExternalTwoByteString* ext = ExternalTwoByteString::cast(string);
+  const uc16* start =
+      reinterpret_cast<const uc16*>(ext->resource()->data());
+  return Vector<const uc16>(start + offset, length);
+}
+
+
+SmartPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
+                                     RobustnessFlag robust_flag,
+                                     int offset,
+                                     int length,
+                                     int* length_return) {
+  ASSERT(NativeAllocationChecker::allocation_allowed());
+  if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
+    return SmartPointer<char>(NULL);
+  }
+
+  // Negative length means the to the end of the string.
+  if (length < 0) length = kMaxInt - offset;
+
+  // Compute the size of the UTF-8 string. Start at the specified offset.
+  Access<StringInputBuffer> buffer(&string_input_buffer);
+  buffer->Reset(offset, this);
+  int character_position = offset;
+  int utf8_bytes = 0;
+  while (buffer->has_more()) {
+    uint16_t character = buffer->GetNext();
+    if (character_position < offset + length) {
+      utf8_bytes += unibrow::Utf8::Length(character);
+    }
+    character_position++;
+  }
+
+  if (length_return) {
+    *length_return = utf8_bytes;
+  }
+
+  char* result = NewArray<char>(utf8_bytes + 1);
+
+  // Convert the UTF-16 string to a UTF-8 buffer. Start at the specified offset.
+  buffer->Rewind();
+  buffer->Seek(offset);
+  character_position = offset;
+  int utf8_byte_position = 0;
+  while (buffer->has_more()) {
+    uint16_t character = buffer->GetNext();
+    if (character_position < offset + length) {
+      if (allow_nulls == DISALLOW_NULLS && character == 0) {
+        character = ' ';
+      }
+      utf8_byte_position +=
+          unibrow::Utf8::Encode(result + utf8_byte_position, character);
+    }
+    character_position++;
+  }
+  result[utf8_byte_position] = 0;
+  return SmartPointer<char>(result);
+}
+
+
+SmartPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
+                                     RobustnessFlag robust_flag,
+                                     int* length_return) {
+  return ToCString(allow_nulls, robust_flag, 0, -1, length_return);
+}
+
+
+const uc16* String::GetTwoByteData() {
+  return GetTwoByteData(0);
+}
+
+
+const uc16* String::GetTwoByteData(unsigned start) {
+  ASSERT(!IsAsciiRepresentation());
+  switch (StringShape(this).representation_tag()) {
+    case kSeqStringTag:
+      return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start);
+    case kExternalStringTag:
+      return ExternalTwoByteString::cast(this)->
+        ExternalTwoByteStringGetData(start);
+    case kSlicedStringTag: {
+      SlicedString* sliced_string = SlicedString::cast(this);
+      String* buffer = sliced_string->buffer();
+      if (StringShape(buffer).IsCons()) {
+        ConsString* cs = ConsString::cast(buffer);
+        // Flattened string.
+        ASSERT(cs->second()->length() == 0);
+        buffer = cs->first();
+      }
+      return buffer->GetTwoByteData(start + sliced_string->start());
+    }
+    case kConsStringTag:
+      UNREACHABLE();
+      return NULL;
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+SmartPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
+  ASSERT(NativeAllocationChecker::allocation_allowed());
+
+  if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
+    return SmartPointer<uc16>();
+  }
+
+  Access<StringInputBuffer> buffer(&string_input_buffer);
+  buffer->Reset(this);
+
+  uc16* result = NewArray<uc16>(length() + 1);
+
+  int i = 0;
+  while (buffer->has_more()) {
+    uint16_t character = buffer->GetNext();
+    result[i++] = character;
+  }
+  result[i] = 0;
+  return SmartPointer<uc16>(result);
+}
+
+
+const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
+  return reinterpret_cast<uc16*>(
+      reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize) + start;
+}
+
+
+void SeqTwoByteString::SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+                                                           unsigned* offset_ptr,
+                                                           unsigned max_chars) {
+  unsigned chars_read = 0;
+  unsigned offset = *offset_ptr;
+  while (chars_read < max_chars) {
+    uint16_t c = *reinterpret_cast<uint16_t*>(
+        reinterpret_cast<char*>(this) -
+            kHeapObjectTag + kHeaderSize + offset * kShortSize);
+    if (c <= kMaxAsciiCharCode) {
+      // Fast case for ASCII characters.   Cursor is an input output argument.
+      if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
+                                                          rbb->util_buffer,
+                                                          rbb->capacity,
+                                                          rbb->cursor)) {
+        break;
+      }
+    } else {
+      if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
+                                                             rbb->util_buffer,
+                                                             rbb->capacity,
+                                                             rbb->cursor)) {
+        break;
+      }
+    }
+    offset++;
+    chars_read++;
+  }
+  *offset_ptr = offset;
+  rbb->remaining += chars_read;
+}
+
+
+const unibrow::byte* SeqAsciiString::SeqAsciiStringReadBlock(
+    unsigned* remaining,
+    unsigned* offset_ptr,
+    unsigned max_chars) {
+  const unibrow::byte* b = reinterpret_cast<unibrow::byte*>(this) -
+      kHeapObjectTag + kHeaderSize + *offset_ptr * kCharSize;
+  *remaining = max_chars;
+  *offset_ptr += max_chars;
+  return b;
+}
+
+
+// This will iterate unless the block of string data spans two 'halves' of
+// a ConsString, in which case it will recurse.  Since the block of string
+// data to be read has a maximum size this limits the maximum recursion
+// depth to something sane.  Since C++ does not have tail call recursion
+// elimination, the iteration must be explicit. Since this is not an
+// -IntoBuffer method it can delegate to one of the efficient
+// *AsciiStringReadBlock routines.
+const unibrow::byte* ConsString::ConsStringReadBlock(ReadBlockBuffer* rbb,
+                                                     unsigned* offset_ptr,
+                                                     unsigned max_chars) {
+  ConsString* current = this;
+  unsigned offset = *offset_ptr;
+  int offset_correction = 0;
+
+  while (true) {
+    String* left = current->first();
+    unsigned left_length = (unsigned)left->length();
+    if (left_length > offset &&
+        (max_chars <= left_length - offset ||
+         (rbb->capacity <= left_length - offset &&
+          (max_chars = left_length - offset, true)))) {  // comma operator!
+      // Left hand side only - iterate unless we have reached the bottom of
+      // the cons tree.  The assignment on the left of the comma operator is
+      // in order to make use of the fact that the -IntoBuffer routines can
+      // produce at most 'capacity' characters.  This enables us to postpone
+      // the point where we switch to the -IntoBuffer routines (below) in order
+      // to maximize the chances of delegating a big chunk of work to the
+      // efficient *AsciiStringReadBlock routines.
+      if (StringShape(left).IsCons()) {
+        current = ConsString::cast(left);
+        continue;
+      } else {
+        const unibrow::byte* answer =
+            String::ReadBlock(left, rbb, &offset, max_chars);
+        *offset_ptr = offset + offset_correction;
+        return answer;
+      }
+    } else if (left_length <= offset) {
+      // Right hand side only - iterate unless we have reached the bottom of
+      // the cons tree.
+      String* right = current->second();
+      offset -= left_length;
+      offset_correction += left_length;
+      if (StringShape(right).IsCons()) {
+        current = ConsString::cast(right);
+        continue;
+      } else {
+        const unibrow::byte* answer =
+            String::ReadBlock(right, rbb, &offset, max_chars);
+        *offset_ptr = offset + offset_correction;
+        return answer;
+      }
+    } else {
+      // The block to be read spans two sides of the ConsString, so we call the
+      // -IntoBuffer version, which will recurse.  The -IntoBuffer methods
+      // are able to assemble data from several part strings because they use
+      // the util_buffer to store their data and never return direct pointers
+      // to their storage.  We don't try to read more than the buffer capacity
+      // here or we can get too much recursion.
+      ASSERT(rbb->remaining == 0);
+      ASSERT(rbb->cursor == 0);
+      current->ConsStringReadBlockIntoBuffer(
+          rbb,
+          &offset,
+          max_chars > rbb->capacity ? rbb->capacity : max_chars);
+      *offset_ptr = offset + offset_correction;
+      return rbb->util_buffer;
+    }
+  }
+}
+
+
+const unibrow::byte* SlicedString::SlicedStringReadBlock(ReadBlockBuffer* rbb,
+                                                         unsigned* offset_ptr,
+                                                         unsigned max_chars) {
+  String* backing = buffer();
+  unsigned offset = start() + *offset_ptr;
+  unsigned length = backing->length();
+  if (max_chars > length - offset) {
+    max_chars = length - offset;
+  }
+  const unibrow::byte* answer =
+      String::ReadBlock(backing, rbb, &offset, max_chars);
+  *offset_ptr = offset - start();
+  return answer;
+}
+
+
+uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return resource()->data()[index];
+}
+
+
+const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock(
+      unsigned* remaining,
+      unsigned* offset_ptr,
+      unsigned max_chars) {
+  // Cast const char* to unibrow::byte* (signedness difference).
+  const unibrow::byte* b =
+      reinterpret_cast<const unibrow::byte*>(resource()->data()) + *offset_ptr;
+  *remaining = max_chars;
+  *offset_ptr += max_chars;
+  return b;
+}
+
+
+const uc16* ExternalTwoByteString::ExternalTwoByteStringGetData(
+      unsigned start) {
+  return resource()->data() + start;
+}
+
+
+uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return resource()->data()[index];
+}
+
+
+void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
+      ReadBlockBuffer* rbb,
+      unsigned* offset_ptr,
+      unsigned max_chars) {
+  unsigned chars_read = 0;
+  unsigned offset = *offset_ptr;
+  const uint16_t* data = resource()->data();
+  while (chars_read < max_chars) {
+    uint16_t c = data[offset];
+    if (c <= kMaxAsciiCharCode) {
+      // Fast case for ASCII characters. Cursor is an input output argument.
+      if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
+                                                          rbb->util_buffer,
+                                                          rbb->capacity,
+                                                          rbb->cursor))
+        break;
+    } else {
+      if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
+                                                             rbb->util_buffer,
+                                                             rbb->capacity,
+                                                             rbb->cursor))
+        break;
+    }
+    offset++;
+    chars_read++;
+  }
+  *offset_ptr = offset;
+  rbb->remaining += chars_read;
+}
+
+
+void SeqAsciiString::SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+                                                 unsigned* offset_ptr,
+                                                 unsigned max_chars) {
+  unsigned capacity = rbb->capacity - rbb->cursor;
+  if (max_chars > capacity) max_chars = capacity;
+  memcpy(rbb->util_buffer + rbb->cursor,
+         reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize +
+             *offset_ptr * kCharSize,
+         max_chars);
+  rbb->remaining += max_chars;
+  *offset_ptr += max_chars;
+  rbb->cursor += max_chars;
+}
+
+
+void ExternalAsciiString::ExternalAsciiStringReadBlockIntoBuffer(
+      ReadBlockBuffer* rbb,
+      unsigned* offset_ptr,
+      unsigned max_chars) {
+  unsigned capacity = rbb->capacity - rbb->cursor;
+  if (max_chars > capacity) max_chars = capacity;
+  memcpy(rbb->util_buffer + rbb->cursor,
+         resource()->data() + *offset_ptr,
+         max_chars);
+  rbb->remaining += max_chars;
+  *offset_ptr += max_chars;
+  rbb->cursor += max_chars;
+}
+
+
+// This method determines the type of string involved and then copies
+// a whole chunk of characters into a buffer, or returns a pointer to a buffer
+// where they can be found.  The pointer is not necessarily valid across a GC
+// (see AsciiStringReadBlock).
+const unibrow::byte* String::ReadBlock(String* input,
+                                       ReadBlockBuffer* rbb,
+                                       unsigned* offset_ptr,
+                                       unsigned max_chars) {
+  ASSERT(*offset_ptr <= static_cast<unsigned>(input->length()));
+  if (max_chars == 0) {
+    rbb->remaining = 0;
+    return NULL;
+  }
+  switch (StringShape(input).representation_tag()) {
+    case kSeqStringTag:
+      if (input->IsAsciiRepresentation()) {
+        SeqAsciiString* str = SeqAsciiString::cast(input);
+        return str->SeqAsciiStringReadBlock(&rbb->remaining,
+                                            offset_ptr,
+                                            max_chars);
+      } else {
+        SeqTwoByteString* str = SeqTwoByteString::cast(input);
+        str->SeqTwoByteStringReadBlockIntoBuffer(rbb,
+                                                 offset_ptr,
+                                                 max_chars);
+        return rbb->util_buffer;
+      }
+    case kConsStringTag:
+      return ConsString::cast(input)->ConsStringReadBlock(rbb,
+                                                          offset_ptr,
+                                                          max_chars);
+    case kSlicedStringTag:
+      return SlicedString::cast(input)->SlicedStringReadBlock(rbb,
+                                                              offset_ptr,
+                                                              max_chars);
+    case kExternalStringTag:
+      if (input->IsAsciiRepresentation()) {
+        return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock(
+            &rbb->remaining,
+            offset_ptr,
+            max_chars);
+      } else {
+        ExternalTwoByteString::cast(input)->
+            ExternalTwoByteStringReadBlockIntoBuffer(rbb,
+                                                     offset_ptr,
+                                                     max_chars);
+        return rbb->util_buffer;
+      }
+    default:
+      break;
+  }
+
+  UNREACHABLE();
+  return 0;
+}
+
+
+Relocatable* Relocatable::top_ = NULL;
+
+
+void Relocatable::PostGarbageCollectionProcessing() {
+  Relocatable* current = top_;
+  while (current != NULL) {
+    current->PostGarbageCollection();
+    current = current->prev_;
+  }
+}
+
+
+// Reserve space for statics needing saving and restoring.
+int Relocatable::ArchiveSpacePerThread() {
+  return sizeof(top_);
+}
+
+
+// Archive statics that are thread local.
+char* Relocatable::ArchiveState(char* to) {
+  *reinterpret_cast<Relocatable**>(to) = top_;
+  top_ = NULL;
+  return to + ArchiveSpacePerThread();
+}
+
+
+// Restore statics that are thread local.
+char* Relocatable::RestoreState(char* from) {
+  top_ = *reinterpret_cast<Relocatable**>(from);
+  return from + ArchiveSpacePerThread();
+}
+
+
+char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
+  Relocatable* top = *reinterpret_cast<Relocatable**>(thread_storage);
+  Iterate(v, top);
+  return thread_storage + ArchiveSpacePerThread();
+}
+
+
+void Relocatable::Iterate(ObjectVisitor* v) {
+  Iterate(v, top_);
+}
+
+
+void Relocatable::Iterate(ObjectVisitor* v, Relocatable* top) {
+  Relocatable* current = top;
+  while (current != NULL) {
+    current->IterateInstance(v);
+    current = current->prev_;
+  }
+}
+
+
+FlatStringReader::FlatStringReader(Handle<String> str)
+    : str_(str.location()),
+      length_(str->length()) {
+  PostGarbageCollection();
+}
+
+
+FlatStringReader::FlatStringReader(Vector<const char> input)
+    : str_(0),
+      is_ascii_(true),
+      length_(input.length()),
+      start_(input.start()) { }
+
+
+void FlatStringReader::PostGarbageCollection() {
+  if (str_ == NULL) return;
+  Handle<String> str(str_);
+  ASSERT(str->IsFlat());
+  is_ascii_ = str->IsAsciiRepresentation();
+  if (is_ascii_) {
+    start_ = str->ToAsciiVector().start();
+  } else {
+    start_ = str->ToUC16Vector().start();
+  }
+}
+
+
+void StringInputBuffer::Seek(unsigned pos) {
+  Reset(pos, input_);
+}
+
+
+void SafeStringInputBuffer::Seek(unsigned pos) {
+  Reset(pos, input_);
+}
+
+
+// This method determines the type of string involved and then copies
+// a whole chunk of characters into a buffer.  It can be used with strings
+// that have been glued together to form a ConsString and which must cooperate
+// to fill up a buffer.
+void String::ReadBlockIntoBuffer(String* input,
+                                 ReadBlockBuffer* rbb,
+                                 unsigned* offset_ptr,
+                                 unsigned max_chars) {
+  ASSERT(*offset_ptr <= (unsigned)input->length());
+  if (max_chars == 0) return;
+
+  switch (StringShape(input).representation_tag()) {
+    case kSeqStringTag:
+      if (input->IsAsciiRepresentation()) {
+        SeqAsciiString::cast(input)->SeqAsciiStringReadBlockIntoBuffer(rbb,
+                                                                 offset_ptr,
+                                                                 max_chars);
+        return;
+      } else {
+        SeqTwoByteString::cast(input)->SeqTwoByteStringReadBlockIntoBuffer(rbb,
+                                                                     offset_ptr,
+                                                                     max_chars);
+        return;
+      }
+    case kConsStringTag:
+      ConsString::cast(input)->ConsStringReadBlockIntoBuffer(rbb,
+                                                             offset_ptr,
+                                                             max_chars);
+      return;
+    case kSlicedStringTag:
+      SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb,
+                                                                 offset_ptr,
+                                                                 max_chars);
+      return;
+    case kExternalStringTag:
+      if (input->IsAsciiRepresentation()) {
+         ExternalAsciiString::cast(input)->
+             ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
+       } else {
+         ExternalTwoByteString::cast(input)->
+             ExternalTwoByteStringReadBlockIntoBuffer(rbb,
+                                                      offset_ptr,
+                                                      max_chars);
+       }
+       return;
+    default:
+      break;
+  }
+
+  UNREACHABLE();
+  return;
+}
+
+
+const unibrow::byte* String::ReadBlock(String* input,
+                                       unibrow::byte* util_buffer,
+                                       unsigned capacity,
+                                       unsigned* remaining,
+                                       unsigned* offset_ptr) {
+  ASSERT(*offset_ptr <= (unsigned)input->length());
+  unsigned chars = input->length() - *offset_ptr;
+  ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
+  const unibrow::byte* answer = ReadBlock(input, &rbb, offset_ptr, chars);
+  ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
+  *remaining = rbb.remaining;
+  return answer;
+}
+
+
+const unibrow::byte* String::ReadBlock(String** raw_input,
+                                       unibrow::byte* util_buffer,
+                                       unsigned capacity,
+                                       unsigned* remaining,
+                                       unsigned* offset_ptr) {
+  Handle<String> input(raw_input);
+  ASSERT(*offset_ptr <= (unsigned)input->length());
+  unsigned chars = input->length() - *offset_ptr;
+  if (chars > capacity) chars = capacity;
+  ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
+  ReadBlockIntoBuffer(*input, &rbb, offset_ptr, chars);
+  ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
+  *remaining = rbb.remaining;
+  return rbb.util_buffer;
+}
+
+
+// This will iterate unless the block of string data spans two 'halves' of
+// a ConsString, in which case it will recurse.  Since the block of string
+// data to be read has a maximum size this limits the maximum recursion
+// depth to something sane.  Since C++ does not have tail call recursion
+// elimination, the iteration must be explicit.
+void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+                                               unsigned* offset_ptr,
+                                               unsigned max_chars) {
+  ConsString* current = this;
+  unsigned offset = *offset_ptr;
+  int offset_correction = 0;
+
+  while (true) {
+    String* left = current->first();
+    unsigned left_length = (unsigned)left->length();
+    if (left_length > offset &&
+      max_chars <= left_length - offset) {
+      // Left hand side only - iterate unless we have reached the bottom of
+      // the cons tree.
+      if (StringShape(left).IsCons()) {
+        current = ConsString::cast(left);
+        continue;
+      } else {
+        String::ReadBlockIntoBuffer(left, rbb, &offset, max_chars);
+        *offset_ptr = offset + offset_correction;
+        return;
+      }
+    } else if (left_length <= offset) {
+      // Right hand side only - iterate unless we have reached the bottom of
+      // the cons tree.
+      offset -= left_length;
+      offset_correction += left_length;
+      String* right = current->second();
+      if (StringShape(right).IsCons()) {
+        current = ConsString::cast(right);
+        continue;
+      } else {
+        String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
+        *offset_ptr = offset + offset_correction;
+        return;
+      }
+    } else {
+      // The block to be read spans two sides of the ConsString, so we recurse.
+      // First recurse on the left.
+      max_chars -= left_length - offset;
+      String::ReadBlockIntoBuffer(left, rbb, &offset, left_length - offset);
+      // We may have reached the max or there may not have been enough space
+      // in the buffer for the characters in the left hand side.
+      if (offset == left_length) {
+        // Recurse on the right.
+        String* right = String::cast(current->second());
+        offset -= left_length;
+        offset_correction += left_length;
+        String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
+      }
+      *offset_ptr = offset + offset_correction;
+      return;
+    }
+  }
+}
+
+
+void SlicedString::SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+                                                   unsigned* offset_ptr,
+                                                   unsigned max_chars) {
+  String* backing = buffer();
+  unsigned offset = start() + *offset_ptr;
+  unsigned length = backing->length();
+  if (max_chars > length - offset) {
+    max_chars = length - offset;
+  }
+  String::ReadBlockIntoBuffer(backing, rbb, &offset, max_chars);
+  *offset_ptr = offset - start();
+}
+
+
+void ConsString::ConsStringIterateBody(ObjectVisitor* v) {
+  IteratePointers(v, kFirstOffset, kSecondOffset + kPointerSize);
+}
+
+
+void JSGlobalPropertyCell::JSGlobalPropertyCellIterateBody(ObjectVisitor* v) {
+  IteratePointers(v, kValueOffset, kValueOffset + kPointerSize);
+}
+
+
+uint16_t ConsString::ConsStringGet(int index) {
+  ASSERT(index >= 0 && index < this->length());
+
+  // Check for a flattened cons string
+  if (second()->length() == 0) {
+    String* left = first();
+    return left->Get(index);
+  }
+
+  String* string = String::cast(this);
+
+  while (true) {
+    if (StringShape(string).IsCons()) {
+      ConsString* cons_string = ConsString::cast(string);
+      String* left = cons_string->first();
+      if (left->length() > index) {
+        string = left;
+      } else {
+        index -= left->length();
+        string = cons_string->second();
+      }
+    } else {
+      return string->Get(index);
+    }
+  }
+
+  UNREACHABLE();
+  return 0;
+}
+
+
+template <typename sinkchar>
+void String::WriteToFlat(String* src,
+                         sinkchar* sink,
+                         int f,
+                         int t) {
+  String* source = src;
+  int from = f;
+  int to = t;
+  while (true) {
+    ASSERT(0 <= from && from <= to && to <= source->length());
+    switch (StringShape(source).full_representation_tag()) {
+      case kAsciiStringTag | kExternalStringTag: {
+        CopyChars(sink,
+                  ExternalAsciiString::cast(source)->resource()->data() + from,
+                  to - from);
+        return;
+      }
+      case kTwoByteStringTag | kExternalStringTag: {
+        const uc16* data =
+            ExternalTwoByteString::cast(source)->resource()->data();
+        CopyChars(sink,
+                  data + from,
+                  to - from);
+        return;
+      }
+      case kAsciiStringTag | kSeqStringTag: {
+        CopyChars(sink,
+                  SeqAsciiString::cast(source)->GetChars() + from,
+                  to - from);
+        return;
+      }
+      case kTwoByteStringTag | kSeqStringTag: {
+        CopyChars(sink,
+                  SeqTwoByteString::cast(source)->GetChars() + from,
+                  to - from);
+        return;
+      }
+      case kAsciiStringTag | kSlicedStringTag:
+      case kTwoByteStringTag | kSlicedStringTag: {
+        SlicedString* sliced_string = SlicedString::cast(source);
+        int start = sliced_string->start();
+        from += start;
+        to += start;
+        source = String::cast(sliced_string->buffer());
+        break;
+      }
+      case kAsciiStringTag | kConsStringTag:
+      case kTwoByteStringTag | kConsStringTag: {
+        ConsString* cons_string = ConsString::cast(source);
+        String* first = cons_string->first();
+        int boundary = first->length();
+        if (to - boundary >= boundary - from) {
+          // Right hand side is longer.  Recurse over left.
+          if (from < boundary) {
+            WriteToFlat(first, sink, from, boundary);
+            sink += boundary - from;
+            from = 0;
+          } else {
+            from -= boundary;
+          }
+          to -= boundary;
+          source = cons_string->second();
+        } else {
+          // Left hand side is longer.  Recurse over right.
+          if (to > boundary) {
+            String* second = cons_string->second();
+            WriteToFlat(second,
+                        sink + boundary - from,
+                        0,
+                        to - boundary);
+            to = boundary;
+          }
+          source = first;
+        }
+        break;
+      }
+    }
+  }
+}
+
+
+void SlicedString::SlicedStringIterateBody(ObjectVisitor* v) {
+  IteratePointer(v, kBufferOffset);
+}
+
+
+uint16_t SlicedString::SlicedStringGet(int index) {
+  ASSERT(index >= 0 && index < this->length());
+  // Delegate to the buffer string.
+  String* underlying = buffer();
+  return underlying->Get(start() + index);
+}
+
+
+template <typename IteratorA, typename IteratorB>
+static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
+  // General slow case check.  We know that the ia and ib iterators
+  // have the same length.
+  while (ia->has_more()) {
+    uc32 ca = ia->GetNext();
+    uc32 cb = ib->GetNext();
+    if (ca != cb)
+      return false;
+  }
+  return true;
+}
+
+
+// Compares the contents of two strings by reading and comparing
+// int-sized blocks of characters.
+template <typename Char>
+static inline bool CompareRawStringContents(Vector<Char> a, Vector<Char> b) {
+  int length = a.length();
+  ASSERT_EQ(length, b.length());
+  const Char* pa = a.start();
+  const Char* pb = b.start();
+  int i = 0;
+#ifndef V8_HOST_CAN_READ_UNALIGNED
+  // If this architecture isn't comfortable reading unaligned ints
+  // then we have to check that the strings are aligned before
+  // comparing them blockwise.
+  const int kAlignmentMask = sizeof(uint32_t) - 1;  // NOLINT
+  uint32_t pa_addr = reinterpret_cast<uint32_t>(pa);
+  uint32_t pb_addr = reinterpret_cast<uint32_t>(pb);
+  if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) {
+#endif
+    const int kStepSize = sizeof(int) / sizeof(Char);  // NOLINT
+    int endpoint = length - kStepSize;
+    // Compare blocks until we reach near the end of the string.
+    for (; i <= endpoint; i += kStepSize) {
+      uint32_t wa = *reinterpret_cast<const uint32_t*>(pa + i);
+      uint32_t wb = *reinterpret_cast<const uint32_t*>(pb + i);
+      if (wa != wb) {
+        return false;
+      }
+    }
+#ifndef V8_HOST_CAN_READ_UNALIGNED
+  }
+#endif
+  // Compare the remaining characters that didn't fit into a block.
+  for (; i < length; i++) {
+    if (a[i] != b[i]) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+static StringInputBuffer string_compare_buffer_b;
+
+
+template <typename IteratorA>
+static inline bool CompareStringContentsPartial(IteratorA* ia, String* b) {
+  if (b->IsFlat()) {
+    if (b->IsAsciiRepresentation()) {
+      VectorIterator<char> ib(b->ToAsciiVector());
+      return CompareStringContents(ia, &ib);
+    } else {
+      VectorIterator<uc16> ib(b->ToUC16Vector());
+      return CompareStringContents(ia, &ib);
+    }
+  } else {
+    string_compare_buffer_b.Reset(0, b);
+    return CompareStringContents(ia, &string_compare_buffer_b);
+  }
+}
+
+
+static StringInputBuffer string_compare_buffer_a;
+
+
+bool String::SlowEquals(String* other) {
+  // Fast check: negative check with lengths.
+  int len = length();
+  if (len != other->length()) return false;
+  if (len == 0) return true;
+
+  // Fast check: if hash code is computed for both strings
+  // a fast negative check can be performed.
+  if (HasHashCode() && other->HasHashCode()) {
+    if (Hash() != other->Hash()) return false;
+  }
+
+  if (StringShape(this).IsSequentialAscii() &&
+      StringShape(other).IsSequentialAscii()) {
+    const char* str1 = SeqAsciiString::cast(this)->GetChars();
+    const char* str2 = SeqAsciiString::cast(other)->GetChars();
+    return CompareRawStringContents(Vector<const char>(str1, len),
+                                    Vector<const char>(str2, len));
+  }
+
+  if (this->IsFlat()) {
+    if (IsAsciiRepresentation()) {
+      Vector<const char> vec1 = this->ToAsciiVector();
+      if (other->IsFlat()) {
+        if (other->IsAsciiRepresentation()) {
+          Vector<const char> vec2 = other->ToAsciiVector();
+          return CompareRawStringContents(vec1, vec2);
+        } else {
+          VectorIterator<char> buf1(vec1);
+          VectorIterator<uc16> ib(other->ToUC16Vector());
+          return CompareStringContents(&buf1, &ib);
+        }
+      } else {
+        VectorIterator<char> buf1(vec1);
+        string_compare_buffer_b.Reset(0, other);
+        return CompareStringContents(&buf1, &string_compare_buffer_b);
+      }
+    } else {
+      Vector<const uc16> vec1 = this->ToUC16Vector();
+      if (other->IsFlat()) {
+        if (other->IsAsciiRepresentation()) {
+          VectorIterator<uc16> buf1(vec1);
+          VectorIterator<char> ib(other->ToAsciiVector());
+          return CompareStringContents(&buf1, &ib);
+        } else {
+          Vector<const uc16> vec2(other->ToUC16Vector());
+          return CompareRawStringContents(vec1, vec2);
+        }
+      } else {
+        VectorIterator<uc16> buf1(vec1);
+        string_compare_buffer_b.Reset(0, other);
+        return CompareStringContents(&buf1, &string_compare_buffer_b);
+      }
+    }
+  } else {
+    string_compare_buffer_a.Reset(0, this);
+    return CompareStringContentsPartial(&string_compare_buffer_a, other);
+  }
+}
+
+
+bool String::MarkAsUndetectable() {
+  if (StringShape(this).IsSymbol()) return false;
+
+  Map* map = this->map();
+  if (map == Heap::short_string_map()) {
+    this->set_map(Heap::undetectable_short_string_map());
+    return true;
+  } else if (map == Heap::medium_string_map()) {
+    this->set_map(Heap::undetectable_medium_string_map());
+    return true;
+  } else if (map == Heap::long_string_map()) {
+    this->set_map(Heap::undetectable_long_string_map());
+    return true;
+  } else if (map == Heap::short_ascii_string_map()) {
+    this->set_map(Heap::undetectable_short_ascii_string_map());
+    return true;
+  } else if (map == Heap::medium_ascii_string_map()) {
+    this->set_map(Heap::undetectable_medium_ascii_string_map());
+    return true;
+  } else if (map == Heap::long_ascii_string_map()) {
+    this->set_map(Heap::undetectable_long_ascii_string_map());
+    return true;
+  }
+  // Rest cannot be marked as undetectable
+  return false;
+}
+
+
+bool String::IsEqualTo(Vector<const char> str) {
+  int slen = length();
+  Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
+  decoder->Reset(str.start(), str.length());
+  int i;
+  for (i = 0; i < slen && decoder->has_more(); i++) {
+    uc32 r = decoder->GetNext();
+    if (Get(i) != r) return false;
+  }
+  return i == slen && !decoder->has_more();
+}
+
+
+uint32_t String::ComputeAndSetHash() {
+  // Should only be called if hash code has not yet been computed.
+  ASSERT(!(length_field() & kHashComputedMask));
+
+  // Compute the hash code.
+  StringInputBuffer buffer(this);
+  uint32_t field = ComputeLengthAndHashField(&buffer, length());
+
+  // Store the hash code in the object.
+  set_length_field(field);
+
+  // Check the hash code is there.
+  ASSERT(length_field() & kHashComputedMask);
+  uint32_t result = field >> kHashShift;
+  ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
+  return result;
+}
+
+
+bool String::ComputeArrayIndex(unibrow::CharacterStream* buffer,
+                               uint32_t* index,
+                               int length) {
+  if (length == 0 || length > kMaxArrayIndexSize) return false;
+  uc32 ch = buffer->GetNext();
+
+  // If the string begins with a '0' character, it must only consist
+  // of it to be a legal array index.
+  if (ch == '0') {
+    *index = 0;
+    return length == 1;
+  }
+
+  // Convert string to uint32 array index; character by character.
+  int d = ch - '0';
+  if (d < 0 || d > 9) return false;
+  uint32_t result = d;
+  while (buffer->has_more()) {
+    d = buffer->GetNext() - '0';
+    if (d < 0 || d > 9) return false;
+    // Check that the new result is below the 32 bit limit.
+    if (result > 429496729U - ((d > 5) ? 1 : 0)) return false;
+    result = (result * 10) + d;
+  }
+
+  *index = result;
+  return true;
+}
+
+
+bool String::SlowAsArrayIndex(uint32_t* index) {
+  if (length() <= kMaxCachedArrayIndexLength) {
+    Hash();  // force computation of hash code
+    uint32_t field = length_field();
+    if ((field & kIsArrayIndexMask) == 0) return false;
+    *index = (field & ((1 << kShortLengthShift) - 1)) >> kLongLengthShift;
+    return true;
+  } else {
+    StringInputBuffer buffer(this);
+    return ComputeArrayIndex(&buffer, index, length());
+  }
+}
+
+
+static inline uint32_t HashField(uint32_t hash, bool is_array_index) {
+  uint32_t result =
+      (hash << String::kLongLengthShift) | String::kHashComputedMask;
+  if (is_array_index) result |= String::kIsArrayIndexMask;
+  return result;
+}
+
+
+uint32_t StringHasher::GetHashField() {
+  ASSERT(is_valid());
+  if (length_ <= String::kMaxShortStringSize) {
+    uint32_t payload;
+    if (is_array_index()) {
+      payload = v8::internal::HashField(array_index(), true);
+    } else {
+      payload = v8::internal::HashField(GetHash(), false);
+    }
+    return (payload & ((1 << String::kShortLengthShift) - 1)) |
+           (length_ << String::kShortLengthShift);
+  } else if (length_ <= String::kMaxMediumStringSize) {
+    uint32_t payload = v8::internal::HashField(GetHash(), false);
+    return (payload & ((1 << String::kMediumLengthShift) - 1)) |
+           (length_ << String::kMediumLengthShift);
+  } else {
+    return v8::internal::HashField(length_, false);
+  }
+}
+
+
+uint32_t String::ComputeLengthAndHashField(unibrow::CharacterStream* buffer,
+                                           int length) {
+  StringHasher hasher(length);
+
+  // Very long strings have a trivial hash that doesn't inspect the
+  // string contents.
+  if (hasher.has_trivial_hash()) {
+    return hasher.GetHashField();
+  }
+
+  // Do the iterative array index computation as long as there is a
+  // chance this is an array index.
+  while (buffer->has_more() && hasher.is_array_index()) {
+    hasher.AddCharacter(buffer->GetNext());
+  }
+
+  // Process the remaining characters without updating the array
+  // index.
+  while (buffer->has_more()) {
+    hasher.AddCharacterNoIndex(buffer->GetNext());
+  }
+
+  return hasher.GetHashField();
+}
+
+
+Object* String::Slice(int start, int end) {
+  if (start == 0 && end == length()) return this;
+  if (StringShape(this).representation_tag() == kSlicedStringTag) {
+    // Translate slices of a SlicedString into slices of the
+    // underlying string buffer.
+    SlicedString* str = SlicedString::cast(this);
+    String* buf = str->buffer();
+    return Heap::AllocateSlicedString(buf,
+                                      str->start() + start,
+                                      str->start() + end);
+  }
+  Object* result = Heap::AllocateSlicedString(this, start, end);
+  if (result->IsFailure()) {
+    return result;
+  }
+  // Due to the way we retry after GC on allocation failure we are not allowed
+  // to fail on allocation after this point.  This is the one-allocation rule.
+
+  // Try to flatten a cons string that is under the sliced string.
+  // This is to avoid memory leaks and possible stack overflows caused by
+  // building 'towers' of sliced strings on cons strings.
+  // This may fail due to an allocation failure (when a GC is needed), but it
+  // will succeed often enough to avoid the problem.  We only have to do this
+  // if Heap::AllocateSlicedString actually returned a SlicedString.  It will
+  // return flat strings for small slices for efficiency reasons.
+  String* answer = String::cast(result);
+  if (StringShape(answer).IsSliced() &&
+      StringShape(this).representation_tag() == kConsStringTag) {
+    TryFlatten();
+    // If the flatten succeeded we might as well make the sliced string point
+    // to the flat string rather than the cons string.
+    String* second = ConsString::cast(this)->second();
+    if (second->length() == 0) {
+      SlicedString::cast(answer)->set_buffer(ConsString::cast(this)->first());
+    }
+  }
+  return answer;
+}
+
+
+void String::PrintOn(FILE* file) {
+  int length = this->length();
+  for (int i = 0; i < length; i++) {
+    fprintf(file, "%c", Get(i));
+  }
+}
+
+
+void Map::CreateBackPointers() {
+  DescriptorArray* descriptors = instance_descriptors();
+  for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
+    if (descriptors->GetType(i) == MAP_TRANSITION) {
+      // Get target.
+      Map* target = Map::cast(descriptors->GetValue(i));
+#ifdef DEBUG
+      // Verify target.
+      Object* source_prototype = prototype();
+      Object* target_prototype = target->prototype();
+      ASSERT(source_prototype->IsJSObject() ||
+             source_prototype->IsMap() ||
+             source_prototype->IsNull());
+      ASSERT(target_prototype->IsJSObject() ||
+             target_prototype->IsNull());
+      ASSERT(source_prototype->IsMap() ||
+             source_prototype == target_prototype);
+#endif
+      // Point target back to source.  set_prototype() will not let us set
+      // the prototype to a map, as we do here.
+      *RawField(target, kPrototypeOffset) = this;
+    }
+  }
+}
+
+
+void Map::ClearNonLiveTransitions(Object* real_prototype) {
+  // Live DescriptorArray objects will be marked, so we must use
+  // low-level accessors to get and modify their data.
+  DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
+      *RawField(this, Map::kInstanceDescriptorsOffset));
+  if (d == Heap::raw_unchecked_empty_descriptor_array()) return;
+  Smi* NullDescriptorDetails =
+    PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
+  FixedArray* contents = reinterpret_cast<FixedArray*>(
+      d->get(DescriptorArray::kContentArrayIndex));
+  ASSERT(contents->length() >= 2);
+  for (int i = 0; i < contents->length(); i += 2) {
+    // If the pair (value, details) is a map transition,
+    // check if the target is live.  If not, null the descriptor.
+    // Also drop the back pointer for that map transition, so that this
+    // map is not reached again by following a back pointer from a
+    // non-live object.
+    PropertyDetails details(Smi::cast(contents->get(i + 1)));
+    if (details.type() == MAP_TRANSITION) {
+      Map* target = reinterpret_cast<Map*>(contents->get(i));
+      ASSERT(target->IsHeapObject());
+      if (!target->IsMarked()) {
+        ASSERT(target->IsMap());
+        contents->set(i + 1, NullDescriptorDetails, SKIP_WRITE_BARRIER);
+        contents->set(i, Heap::null_value(), SKIP_WRITE_BARRIER);
+        ASSERT(target->prototype() == this ||
+               target->prototype() == real_prototype);
+        // Getter prototype() is read-only, set_prototype() has side effects.
+        *RawField(target, Map::kPrototypeOffset) = real_prototype;
+      }
+    }
+  }
+}
+
+
+void Map::MapIterateBody(ObjectVisitor* v) {
+  // Assumes all Object* members are contiguously allocated!
+  IteratePointers(v, kPrototypeOffset, kCodeCacheOffset + kPointerSize);
+}
+
+
+Object* JSFunction::SetInstancePrototype(Object* value) {
+  ASSERT(value->IsJSObject());
+
+  if (has_initial_map()) {
+    initial_map()->set_prototype(value);
+  } else {
+    // Put the value in the initial map field until an initial map is
+    // needed.  At that point, a new initial map is created and the
+    // prototype is put into the initial map where it belongs.
+    set_prototype_or_initial_map(value);
+  }
+  return value;
+}
+
+
+
+Object* JSFunction::SetPrototype(Object* value) {
+  Object* construct_prototype = value;
+
+  // If the value is not a JSObject, store the value in the map's
+  // constructor field so it can be accessed.  Also, set the prototype
+  // used for constructing objects to the original object prototype.
+  // See ECMA-262 13.2.2.
+  if (!value->IsJSObject()) {
+    // Copy the map so this does not affect unrelated functions.
+    // Remove map transitions because they point to maps with a
+    // different prototype.
+    Object* new_map = map()->CopyDropTransitions();
+    if (new_map->IsFailure()) return new_map;
+    set_map(Map::cast(new_map));
+    map()->set_constructor(value);
+    map()->set_non_instance_prototype(true);
+    construct_prototype =
+        Top::context()->global_context()->initial_object_prototype();
+  } else {
+    map()->set_non_instance_prototype(false);
+  }
+
+  return SetInstancePrototype(construct_prototype);
+}
+
+
+Object* JSFunction::SetInstanceClassName(String* name) {
+  shared()->set_instance_class_name(name);
+  return this;
+}
+
+
+Context* JSFunction::GlobalContextFromLiterals(FixedArray* literals) {
+  return Context::cast(literals->get(JSFunction::kLiteralGlobalContextIndex));
+}
+
+
+void Oddball::OddballIterateBody(ObjectVisitor* v) {
+  // Assumes all Object* members are contiguously allocated!
+  IteratePointers(v, kToStringOffset, kToNumberOffset + kPointerSize);
+}
+
+
+Object* Oddball::Initialize(const char* to_string, Object* to_number) {
+  Object* symbol = Heap::LookupAsciiSymbol(to_string);
+  if (symbol->IsFailure()) return symbol;
+  set_to_string(String::cast(symbol));
+  set_to_number(to_number);
+  return this;
+}
+
+
+bool SharedFunctionInfo::HasSourceCode() {
+  return !script()->IsUndefined() &&
+         !Script::cast(script())->source()->IsUndefined();
+}
+
+
+Object* SharedFunctionInfo::GetSourceCode() {
+  HandleScope scope;
+  if (script()->IsUndefined()) return Heap::undefined_value();
+  Object* source = Script::cast(script())->source();
+  if (source->IsUndefined()) return Heap::undefined_value();
+  return *SubString(Handle<String>(String::cast(source)),
+                    start_position(), end_position());
+}
+
+
+int SharedFunctionInfo::CalculateInstanceSize() {
+  int instance_size =
+      JSObject::kHeaderSize +
+      expected_nof_properties() * kPointerSize;
+  if (instance_size > JSObject::kMaxInstanceSize) {
+    instance_size = JSObject::kMaxInstanceSize;
+  }
+  return instance_size;
+}
+
+
+int SharedFunctionInfo::CalculateInObjectProperties() {
+  return (CalculateInstanceSize() - JSObject::kHeaderSize) / kPointerSize;
+}
+
+
+void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
+    bool only_this_property_assignments,
+    bool only_simple_this_property_assignments,
+    FixedArray* assignments) {
+  set_compiler_hints(BooleanBit::set(compiler_hints(),
+                                     kHasOnlyThisPropertyAssignments,
+                                     only_this_property_assignments));
+  set_compiler_hints(BooleanBit::set(compiler_hints(),
+                                     kHasOnlySimpleThisPropertyAssignments,
+                                     only_simple_this_property_assignments));
+  set_this_property_assignments(assignments);
+  set_this_property_assignments_count(assignments->length() / 3);
+}
+
+
+void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
+  set_compiler_hints(BooleanBit::set(compiler_hints(),
+                                     kHasOnlyThisPropertyAssignments,
+                                     false));
+  set_compiler_hints(BooleanBit::set(compiler_hints(),
+                                     kHasOnlySimpleThisPropertyAssignments,
+                                     false));
+  set_this_property_assignments(Heap::undefined_value());
+  set_this_property_assignments_count(0);
+}
+
+
+String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) {
+  Object* obj = this_property_assignments();
+  ASSERT(obj->IsFixedArray());
+  ASSERT(index < this_property_assignments_count());
+  obj = FixedArray::cast(obj)->get(index * 3);
+  ASSERT(obj->IsString());
+  return String::cast(obj);
+}
+
+
+bool SharedFunctionInfo::IsThisPropertyAssignmentArgument(int index) {
+  Object* obj = this_property_assignments();
+  ASSERT(obj->IsFixedArray());
+  ASSERT(index < this_property_assignments_count());
+  obj = FixedArray::cast(obj)->get(index * 3 + 1);
+  return Smi::cast(obj)->value() != -1;
+}
+
+
+int SharedFunctionInfo::GetThisPropertyAssignmentArgument(int index) {
+  ASSERT(IsThisPropertyAssignmentArgument(index));
+  Object* obj =
+      FixedArray::cast(this_property_assignments())->get(index * 3 + 1);
+  return Smi::cast(obj)->value();
+}
+
+
+Object* SharedFunctionInfo::GetThisPropertyAssignmentConstant(int index) {
+  ASSERT(!IsThisPropertyAssignmentArgument(index));
+  Object* obj =
+      FixedArray::cast(this_property_assignments())->get(index * 3 + 2);
+  return obj;
+}
+
+
+
+// Support function for printing the source code to a StringStream
+// without any allocation in the heap.
+void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
+                                         int max_length) {
+  // For some native functions there is no source.
+  if (script()->IsUndefined() ||
+      Script::cast(script())->source()->IsUndefined()) {
+    accumulator->Add("<No Source>");
+    return;
+  }
+
+  // Get the slice of the source for this function.
+  // Don't use String::cast because we don't want more assertion errors while
+  // we are already creating a stack dump.
+  String* script_source =
+      reinterpret_cast<String*>(Script::cast(script())->source());
+
+  if (!script_source->LooksValid()) {
+    accumulator->Add("<Invalid Source>");
+    return;
+  }
+
+  if (!is_toplevel()) {
+    accumulator->Add("function ");
+    Object* name = this->name();
+    if (name->IsString() && String::cast(name)->length() > 0) {
+      accumulator->PrintName(name);
+    }
+  }
+
+  int len = end_position() - start_position();
+  if (len > max_length) {
+    accumulator->Put(script_source,
+                     start_position(),
+                     start_position() + max_length);
+    accumulator->Add("...\n");
+  } else {
+    accumulator->Put(script_source, start_position(), end_position());
+  }
+}
+
+
+void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
+  IteratePointers(v, kNameOffset, kConstructStubOffset + kPointerSize);
+  IteratePointers(v, kInstanceClassNameOffset, kScriptOffset + kPointerSize);
+  IteratePointers(v, kDebugInfoOffset, kInferredNameOffset + kPointerSize);
+  IteratePointers(v, kThisPropertyAssignmentsOffset,
+      kThisPropertyAssignmentsOffset + kPointerSize);
+}
+
+
+void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
+  ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+  Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+  Object* old_target = target;
+  VisitPointer(&target);
+  CHECK_EQ(target, old_target);  // VisitPointer doesn't change Code* *target.
+}
+
+
+void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsCallInstruction());
+  Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+  Object* old_target = target;
+  VisitPointer(&target);
+  CHECK_EQ(target, old_target);  // VisitPointer doesn't change Code* *target.
+}
+
+
+void Code::CodeIterateBody(ObjectVisitor* v) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+    RelocInfo::Mode rmode = it.rinfo()->rmode();
+    if (rmode == RelocInfo::EMBEDDED_OBJECT) {
+      v->VisitPointer(it.rinfo()->target_object_address());
+    } else if (RelocInfo::IsCodeTarget(rmode)) {
+      v->VisitCodeTarget(it.rinfo());
+    } else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+      v->VisitExternalReference(it.rinfo()->target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    } else if (Debug::has_break_points() &&
+               RelocInfo::IsJSReturn(rmode) &&
+               it.rinfo()->IsCallInstruction()) {
+      v->VisitDebugTarget(it.rinfo());
+#endif
+    } else if (rmode == RelocInfo::RUNTIME_ENTRY) {
+      v->VisitRuntimeEntry(it.rinfo());
+    }
+  }
+
+  ScopeInfo<>::IterateScopeInfo(this, v);
+}
+
+
+void Code::Relocate(int delta) {
+  for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
+    it.rinfo()->apply(delta);
+  }
+  CPU::FlushICache(instruction_start(), instruction_size());
+}
+
+
+void Code::CopyFrom(const CodeDesc& desc) {
+  // copy code
+  memmove(instruction_start(), desc.buffer, desc.instr_size);
+
+  // fill gap with zero bytes
+  { byte* p = instruction_start() + desc.instr_size;
+    byte* q = relocation_start();
+    while (p < q) {
+      *p++ = 0;
+    }
+  }
+
+  // copy reloc info
+  memmove(relocation_start(),
+          desc.buffer + desc.buffer_size - desc.reloc_size,
+          desc.reloc_size);
+
+  // unbox handles and relocate
+  int delta = instruction_start() - desc.buffer;
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::kApplyMask;
+  for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+    RelocInfo::Mode mode = it.rinfo()->rmode();
+    if (mode == RelocInfo::EMBEDDED_OBJECT) {
+      Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+      it.rinfo()->set_target_object(*p);
+    } else if (RelocInfo::IsCodeTarget(mode)) {
+      // rewrite code handles in inline cache targets to direct
+      // pointers to the first instruction in the code object
+      Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+      Code* code = Code::cast(*p);
+      it.rinfo()->set_target_address(code->instruction_start());
+    } else {
+      it.rinfo()->apply(delta);
+    }
+  }
+  CPU::FlushICache(instruction_start(), instruction_size());
+}
+
+
+// Locate the source position which is closest to the address in the code. This
+// is using the source position information embedded in the relocation info.
+// The position returned is relative to the beginning of the script where the
+// source for this function is found.
+int Code::SourcePosition(Address pc) {
+  int distance = kMaxInt;
+  int position = RelocInfo::kNoPosition;  // Initially no position found.
+  // Run through all the relocation info to find the best matching source
+  // position. All the code needs to be considered as the sequence of the
+  // instructions in the code does not necessarily follow the same order as the
+  // source.
+  RelocIterator it(this, RelocInfo::kPositionMask);
+  while (!it.done()) {
+    // Only look at positions after the current pc.
+    if (it.rinfo()->pc() < pc) {
+      // Get position and distance.
+      int dist = pc - it.rinfo()->pc();
+      int pos = it.rinfo()->data();
+      // If this position is closer than the current candidate or if it has the
+      // same distance as the current candidate and the position is higher then
+      // this position is the new candidate.
+      if ((dist < distance) ||
+          (dist == distance && pos > position)) {
+        position = pos;
+        distance = dist;
+      }
+    }
+    it.next();
+  }
+  return position;
+}
+
+
+// Same as Code::SourcePosition above except it only looks for statement
+// positions.
+int Code::SourceStatementPosition(Address pc) {
+  // First find the position as close as possible using all position
+  // information.
+  int position = SourcePosition(pc);
+  // Now find the closest statement position before the position.
+  int statement_position = 0;
+  RelocIterator it(this, RelocInfo::kPositionMask);
+  while (!it.done()) {
+    if (RelocInfo::IsStatementPosition(it.rinfo()->rmode())) {
+      int p = it.rinfo()->data();
+      if (statement_position < p && p <= position) {
+        statement_position = p;
+      }
+    }
+    it.next();
+  }
+  return statement_position;
+}
+
+
+#ifdef ENABLE_DISASSEMBLER
+// Identify kind of code.
+const char* Code::Kind2String(Kind kind) {
+  switch (kind) {
+    case FUNCTION: return "FUNCTION";
+    case STUB: return "STUB";
+    case BUILTIN: return "BUILTIN";
+    case LOAD_IC: return "LOAD_IC";
+    case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
+    case STORE_IC: return "STORE_IC";
+    case KEYED_STORE_IC: return "KEYED_STORE_IC";
+    case CALL_IC: return "CALL_IC";
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+const char* Code::ICState2String(InlineCacheState state) {
+  switch (state) {
+    case UNINITIALIZED: return "UNINITIALIZED";
+    case PREMONOMORPHIC: return "PREMONOMORPHIC";
+    case MONOMORPHIC: return "MONOMORPHIC";
+    case MONOMORPHIC_PROTOTYPE_FAILURE: return "MONOMORPHIC_PROTOTYPE_FAILURE";
+    case MEGAMORPHIC: return "MEGAMORPHIC";
+    case DEBUG_BREAK: return "DEBUG_BREAK";
+    case DEBUG_PREPARE_STEP_IN: return "DEBUG_PREPARE_STEP_IN";
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+const char* Code::PropertyType2String(PropertyType type) {
+  switch (type) {
+    case NORMAL: return "NORMAL";
+    case FIELD: return "FIELD";
+    case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION";
+    case CALLBACKS: return "CALLBACKS";
+    case INTERCEPTOR: return "INTERCEPTOR";
+    case MAP_TRANSITION: return "MAP_TRANSITION";
+    case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
+    case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+void Code::Disassemble(const char* name) {
+  PrintF("kind = %s\n", Kind2String(kind()));
+  if (is_inline_cache_stub()) {
+    PrintF("ic_state = %s\n", ICState2String(ic_state()));
+    PrintF("ic_in_loop = %d\n", ic_in_loop() == IN_LOOP);
+    if (ic_state() == MONOMORPHIC) {
+      PrintF("type = %s\n", PropertyType2String(type()));
+    }
+  }
+  if ((name != NULL) && (name[0] != '\0')) {
+    PrintF("name = %s\n", name);
+  }
+
+  PrintF("Instructions (size = %d)\n", instruction_size());
+  Disassembler::Decode(NULL, this);
+  PrintF("\n");
+
+  PrintF("RelocInfo (size = %d)\n", relocation_size());
+  for (RelocIterator it(this); !it.done(); it.next())
+    it.rinfo()->Print();
+  PrintF("\n");
+}
+#endif  // ENABLE_DISASSEMBLER
+
+
+void JSObject::SetFastElements(FixedArray* elems) {
+  // We should never end in here with a pixel array.
+  ASSERT(!HasPixelElements());
+#ifdef DEBUG
+  // Check the provided array is filled with the_hole.
+  uint32_t len = static_cast<uint32_t>(elems->length());
+  for (uint32_t i = 0; i < len; i++) ASSERT(elems->get(i)->IsTheHole());
+#endif
+  WriteBarrierMode mode = elems->GetWriteBarrierMode();
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      FixedArray* old_elements = FixedArray::cast(elements());
+      uint32_t old_length = static_cast<uint32_t>(old_elements->length());
+      // Fill out the new array with this content and array holes.
+      for (uint32_t i = 0; i < old_length; i++) {
+        elems->set(i, old_elements->get(i), mode);
+      }
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      NumberDictionary* dictionary = NumberDictionary::cast(elements());
+      for (int i = 0; i < dictionary->Capacity(); i++) {
+        Object* key = dictionary->KeyAt(i);
+        if (key->IsNumber()) {
+          uint32_t entry = static_cast<uint32_t>(key->Number());
+          elems->set(entry, dictionary->ValueAt(i), mode);
+        }
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  set_elements(elems);
+}
+
+
+Object* JSObject::SetSlowElements(Object* len) {
+  // We should never end in here with a pixel array.
+  ASSERT(!HasPixelElements());
+
+  uint32_t new_length = static_cast<uint32_t>(len->Number());
+
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      // Make sure we never try to shrink dense arrays into sparse arrays.
+      ASSERT(static_cast<uint32_t>(FixedArray::cast(elements())->length()) <=
+                                   new_length);
+      Object* obj = NormalizeElements();
+      if (obj->IsFailure()) return obj;
+
+      // Update length for JSArrays.
+      if (IsJSArray()) JSArray::cast(this)->set_length(len);
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      if (IsJSArray()) {
+        uint32_t old_length =
+        static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
+        element_dictionary()->RemoveNumberEntries(new_length, old_length),
+        JSArray::cast(this)->set_length(len);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return this;
+}
+
+
+Object* JSArray::Initialize(int capacity) {
+  ASSERT(capacity >= 0);
+  set_length(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+  FixedArray* new_elements;
+  if (capacity == 0) {
+    new_elements = Heap::empty_fixed_array();
+  } else {
+    Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+    if (obj->IsFailure()) return obj;
+    new_elements = FixedArray::cast(obj);
+  }
+  set_elements(new_elements);
+  return this;
+}
+
+
+void JSArray::Expand(int required_size) {
+  Handle<JSArray> self(this);
+  Handle<FixedArray> old_backing(FixedArray::cast(elements()));
+  int old_size = old_backing->length();
+  // Doubling in size would be overkill, but leave some slack to avoid
+  // constantly growing.
+  int new_size = required_size + (required_size >> 3);
+  Handle<FixedArray> new_backing = Factory::NewFixedArray(new_size);
+  // Can't use this any more now because we may have had a GC!
+  for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
+  self->SetContent(*new_backing);
+}
+
+
+// Computes the new capacity when expanding the elements of a JSObject.
+static int NewElementsCapacity(int old_capacity) {
+  // (old_capacity + 50%) + 16
+  return old_capacity + (old_capacity >> 1) + 16;
+}
+
+
+static Object* ArrayLengthRangeError() {
+  HandleScope scope;
+  return Top::Throw(*Factory::NewRangeError("invalid_array_length",
+                                            HandleVector<Object>(NULL, 0)));
+}
+
+
+Object* JSObject::SetElementsLength(Object* len) {
+  // We should never end in here with a pixel array.
+  ASSERT(!HasPixelElements());
+
+  Object* smi_length = len->ToSmi();
+  if (smi_length->IsSmi()) {
+    int value = Smi::cast(smi_length)->value();
+    if (value < 0) return ArrayLengthRangeError();
+    switch (GetElementsKind()) {
+      case FAST_ELEMENTS: {
+        int old_capacity = FixedArray::cast(elements())->length();
+        if (value <= old_capacity) {
+          if (IsJSArray()) {
+            int old_length = FastD2I(JSArray::cast(this)->length()->Number());
+            // NOTE: We may be able to optimize this by removing the
+            // last part of the elements backing storage array and
+            // setting the capacity to the new size.
+            for (int i = value; i < old_length; i++) {
+              FixedArray::cast(elements())->set_the_hole(i);
+            }
+            JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER);
+          }
+          return this;
+        }
+        int min = NewElementsCapacity(old_capacity);
+        int new_capacity = value > min ? value : min;
+        if (new_capacity <= kMaxFastElementsLength ||
+            !ShouldConvertToSlowElements(new_capacity)) {
+          Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
+          if (obj->IsFailure()) return obj;
+          if (IsJSArray()) JSArray::cast(this)->set_length(smi_length,
+                                                           SKIP_WRITE_BARRIER);
+          SetFastElements(FixedArray::cast(obj));
+          return this;
+        }
+        break;
+      }
+      case DICTIONARY_ELEMENTS: {
+        if (IsJSArray()) {
+          if (value == 0) {
+            // If the length of a slow array is reset to zero, we clear
+            // the array and flush backing storage. This has the added
+            // benefit that the array returns to fast mode.
+            initialize_elements();
+          } else {
+            // Remove deleted elements.
+            uint32_t old_length =
+            static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
+            element_dictionary()->RemoveNumberEntries(value, old_length);
+          }
+          JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER);
+        }
+        return this;
+      }
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+
+  // General slow case.
+  if (len->IsNumber()) {
+    uint32_t length;
+    if (Array::IndexFromObject(len, &length)) {
+      return SetSlowElements(len);
+    } else {
+      return ArrayLengthRangeError();
+    }
+  }
+
+  // len is not a number so make the array size one and
+  // set only element to len.
+  Object* obj = Heap::AllocateFixedArray(1);
+  if (obj->IsFailure()) return obj;
+  FixedArray::cast(obj)->set(0, len);
+  if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1),
+                                                   SKIP_WRITE_BARRIER);
+  set_elements(FixedArray::cast(obj));
+  return this;
+}
+
+
+bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>
+              (Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedArray::cast(elements())->length());
+      if ((index < length) &&
+          !FixedArray::cast(elements())->get(index)->IsTheHole()) {
+        return true;
+      }
+      break;
+    }
+    case PIXEL_ELEMENTS: {
+      // TODO(iposva): Add testcase.
+      PixelArray* pixels = PixelArray::cast(elements());
+      if (index < static_cast<uint32_t>(pixels->length())) {
+        return true;
+      }
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      if (element_dictionary()->FindEntry(index)
+          != NumberDictionary::kNotFound) {
+        return true;
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // Handle [] on String objects.
+  if (this->IsStringObjectWithCharacterAt(index)) return true;
+
+  Object* pt = GetPrototype();
+  if (pt == Heap::null_value()) return false;
+  return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
+}
+
+
+bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
+  // Make sure that the top context does not change when doing
+  // callbacks or interceptor calls.
+  AssertNoContextChange ncc;
+  HandleScope scope;
+  Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
+  Handle<JSObject> receiver_handle(receiver);
+  Handle<JSObject> holder_handle(this);
+  CustomArguments args(interceptor->data(), receiver, this);
+  v8::AccessorInfo info(args.end());
+  if (!interceptor->query()->IsUndefined()) {
+    v8::IndexedPropertyQuery query =
+        v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
+    LOG(ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
+    v8::Handle<v8::Boolean> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = query(index, info);
+    }
+    if (!result.IsEmpty()) return result->IsTrue();
+  } else if (!interceptor->getter()->IsUndefined()) {
+    v8::IndexedPropertyGetter getter =
+        v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
+    LOG(ApiIndexedPropertyAccess("interceptor-indexed-has-get", this, index));
+    v8::Handle<v8::Value> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = getter(index, info);
+    }
+    if (!result.IsEmpty()) return true;
+  }
+  return holder_handle->HasElementPostInterceptor(*receiver_handle, index);
+}
+
+
+bool JSObject::HasLocalElement(uint32_t index) {
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    return false;
+  }
+
+  // Check for lookup interceptor
+  if (HasIndexedInterceptor()) {
+    return HasElementWithInterceptor(this, index);
+  }
+
+  // Handle [] on String objects.
+  if (this->IsStringObjectWithCharacterAt(index)) return true;
+
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>
+              (Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedArray::cast(elements())->length());
+      return (index < length) &&
+          !FixedArray::cast(elements())->get(index)->IsTheHole();
+    }
+    case PIXEL_ELEMENTS: {
+      PixelArray* pixels = PixelArray::cast(elements());
+      return (index < static_cast<uint32_t>(pixels->length()));
+    }
+    case DICTIONARY_ELEMENTS: {
+      return element_dictionary()->FindEntry(index)
+          != NumberDictionary::kNotFound;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  UNREACHABLE();
+  return Heap::null_value();
+}
+
+
+bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    return false;
+  }
+
+  // Check for lookup interceptor
+  if (HasIndexedInterceptor()) {
+    return HasElementWithInterceptor(receiver, index);
+  }
+
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>
+              (Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedArray::cast(elements())->length());
+      if ((index < length) &&
+          !FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
+      break;
+    }
+    case PIXEL_ELEMENTS: {
+      PixelArray* pixels = PixelArray::cast(elements());
+      if (index < static_cast<uint32_t>(pixels->length())) {
+        return true;
+      }
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      if (element_dictionary()->FindEntry(index)
+          != NumberDictionary::kNotFound) {
+        return true;
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // Handle [] on String objects.
+  if (this->IsStringObjectWithCharacterAt(index)) return true;
+
+  Object* pt = GetPrototype();
+  if (pt == Heap::null_value()) return false;
+  return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
+}
+
+
+Object* JSObject::SetElementWithInterceptor(uint32_t index, Object* value) {
+  // Make sure that the top context does not change when doing
+  // callbacks or interceptor calls.
+  AssertNoContextChange ncc;
+  HandleScope scope;
+  Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
+  Handle<JSObject> this_handle(this);
+  Handle<Object> value_handle(value);
+  if (!interceptor->setter()->IsUndefined()) {
+    v8::IndexedPropertySetter setter =
+        v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
+    LOG(ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
+    CustomArguments args(interceptor->data(), this, this);
+    v8::AccessorInfo info(args.end());
+    v8::Handle<v8::Value> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = setter(index, v8::Utils::ToLocal(value_handle), info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (!result.IsEmpty()) return *value_handle;
+  }
+  Object* raw_result =
+      this_handle->SetElementWithoutInterceptor(index, *value_handle);
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  return raw_result;
+}
+
+
+// Adding n elements in fast case is O(n*n).
+// Note: revisit design to have dual undefined values to capture absent
+// elements.
+Object* JSObject::SetFastElement(uint32_t index, Object* value) {
+  ASSERT(HasFastElements());
+
+  FixedArray* elms = FixedArray::cast(elements());
+  uint32_t elms_length = static_cast<uint32_t>(elms->length());
+
+  if (!IsJSArray() && (index >= elms_length || elms->get(index)->IsTheHole())) {
+    Object* setter = LookupCallbackSetterInPrototypes(index);
+    if (setter->IsJSFunction()) {
+      return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+    }
+  }
+
+  // Check whether there is extra space in fixed array..
+  if (index < elms_length) {
+    elms->set(index, value);
+    if (IsJSArray()) {
+      // Update the length of the array if needed.
+      uint32_t array_length = 0;
+      CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
+                                   &array_length));
+      if (index >= array_length) {
+        JSArray::cast(this)->set_length(Smi::FromInt(index + 1),
+                                        SKIP_WRITE_BARRIER);
+      }
+    }
+    return value;
+  }
+
+  // Allow gap in fast case.
+  if ((index - elms_length) < kMaxGap) {
+    // Try allocating extra space.
+    int new_capacity = NewElementsCapacity(index+1);
+    if (new_capacity <= kMaxFastElementsLength ||
+        !ShouldConvertToSlowElements(new_capacity)) {
+      ASSERT(static_cast<uint32_t>(new_capacity) > index);
+      Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
+      if (obj->IsFailure()) return obj;
+      SetFastElements(FixedArray::cast(obj));
+      if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(index + 1),
+                                                       SKIP_WRITE_BARRIER);
+      FixedArray::cast(elements())->set(index, value);
+      return value;
+    }
+  }
+
+  // Otherwise default to slow case.
+  Object* obj = NormalizeElements();
+  if (obj->IsFailure()) return obj;
+  ASSERT(HasDictionaryElements());
+  return SetElement(index, value);
+}
+
+Object* JSObject::SetElement(uint32_t index, Object* value) {
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayIndexedAccess(this, index, v8::ACCESS_SET)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+    return value;
+  }
+
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return value;
+    ASSERT(proto->IsJSGlobalObject());
+    return JSObject::cast(proto)->SetElement(index, value);
+  }
+
+  // Check for lookup interceptor
+  if (HasIndexedInterceptor()) {
+    return SetElementWithInterceptor(index, value);
+  }
+
+  return SetElementWithoutInterceptor(index, value);
+}
+
+
+Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS:
+      // Fast case.
+      return SetFastElement(index, value);
+    case PIXEL_ELEMENTS: {
+      PixelArray* pixels = PixelArray::cast(elements());
+      return pixels->SetValue(index, value);
+    }
+    case DICTIONARY_ELEMENTS: {
+      // Insert element in the dictionary.
+      FixedArray* elms = FixedArray::cast(elements());
+      NumberDictionary* dictionary = NumberDictionary::cast(elms);
+
+      int entry = dictionary->FindEntry(index);
+      if (entry != NumberDictionary::kNotFound) {
+        Object* element = dictionary->ValueAt(entry);
+        PropertyDetails details = dictionary->DetailsAt(entry);
+        if (details.type() == CALLBACKS) {
+          // Only accessors allowed as elements.
+          FixedArray* structure = FixedArray::cast(element);
+          if (structure->get(kSetterIndex)->IsJSFunction()) {
+            JSFunction* setter = JSFunction::cast(structure->get(kSetterIndex));
+            return SetPropertyWithDefinedSetter(setter, value);
+          } else {
+            Handle<Object> self(this);
+            Handle<Object> key(Factory::NewNumberFromUint(index));
+            Handle<Object> args[2] = { key, self };
+            return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
+                                                     HandleVector(args, 2)));
+          }
+        } else {
+          dictionary->UpdateMaxNumberKey(index);
+          dictionary->ValueAtPut(entry, value);
+        }
+      } else {
+        // Index not already used. Look for an accessor in the prototype chain.
+        if (!IsJSArray()) {
+          Object* setter = LookupCallbackSetterInPrototypes(index);
+          if (setter->IsJSFunction()) {
+            return SetPropertyWithDefinedSetter(JSFunction::cast(setter),
+                                                value);
+          }
+        }
+        Object* result = dictionary->AtNumberPut(index, value);
+        if (result->IsFailure()) return result;
+        if (elms != FixedArray::cast(result)) {
+          set_elements(FixedArray::cast(result));
+        }
+      }
+
+      // Update the array length if this JSObject is an array.
+      if (IsJSArray()) {
+        JSArray* array = JSArray::cast(this);
+        Object* return_value = array->JSArrayUpdateLengthFromIndex(index,
+                                                                   value);
+        if (return_value->IsFailure()) return return_value;
+      }
+
+      // Attempt to put this object back in fast case.
+      if (ShouldConvertToFastElements()) {
+        uint32_t new_length = 0;
+        if (IsJSArray()) {
+          CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
+                                       &new_length));
+          JSArray::cast(this)->set_length(Smi::FromInt(new_length));
+        } else {
+          new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
+        }
+        Object* obj = Heap::AllocateFixedArrayWithHoles(new_length);
+        if (obj->IsFailure()) return obj;
+        SetFastElements(FixedArray::cast(obj));
+#ifdef DEBUG
+        if (FLAG_trace_normalization) {
+          PrintF("Object elements are fast case again:\n");
+          Print();
+        }
+#endif
+      }
+
+      return value;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  // All possible cases have been handled above. Add a return to avoid the
+  // complaints from the compiler.
+  UNREACHABLE();
+  return Heap::null_value();
+}
+
+
+Object* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index, Object* value) {
+  uint32_t old_len = 0;
+  CHECK(Array::IndexFromObject(length(), &old_len));
+  // Check to see if we need to update the length. For now, we make
+  // sure that the length stays within 32-bits (unsigned).
+  if (index >= old_len && index != 0xffffffff) {
+    Object* len =
+        Heap::NumberFromDouble(static_cast<double>(index) + 1);
+    if (len->IsFailure()) return len;
+    set_length(len);
+  }
+  return value;
+}
+
+
+Object* JSObject::GetElementPostInterceptor(JSObject* receiver,
+                                            uint32_t index) {
+  // Get element works for both JSObject and JSArray since
+  // JSArray::length cannot change.
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      FixedArray* elms = FixedArray::cast(elements());
+      if (index < static_cast<uint32_t>(elms->length())) {
+        Object* value = elms->get(index);
+        if (!value->IsTheHole()) return value;
+      }
+      break;
+    }
+    case PIXEL_ELEMENTS: {
+      // TODO(iposva): Add testcase and implement.
+      UNIMPLEMENTED();
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      NumberDictionary* dictionary = element_dictionary();
+      int entry = dictionary->FindEntry(index);
+      if (entry != NumberDictionary::kNotFound) {
+        Object* element = dictionary->ValueAt(entry);
+        PropertyDetails details = dictionary->DetailsAt(entry);
+        if (details.type() == CALLBACKS) {
+          // Only accessors allowed as elements.
+          FixedArray* structure = FixedArray::cast(element);
+          Object* getter = structure->get(kGetterIndex);
+          if (getter->IsJSFunction()) {
+            return GetPropertyWithDefinedGetter(receiver,
+                                                JSFunction::cast(getter));
+          } else {
+            // Getter is not a function.
+            return Heap::undefined_value();
+          }
+        }
+        return element;
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // Continue searching via the prototype chain.
+  Object* pt = GetPrototype();
+  if (pt == Heap::null_value()) return Heap::undefined_value();
+  return pt->GetElementWithReceiver(receiver, index);
+}
+
+
+Object* JSObject::GetElementWithInterceptor(JSObject* receiver,
+                                            uint32_t index) {
+  // Make sure that the top context does not change when doing
+  // callbacks or interceptor calls.
+  AssertNoContextChange ncc;
+  HandleScope scope;
+  Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
+  Handle<JSObject> this_handle(receiver);
+  Handle<JSObject> holder_handle(this);
+
+  if (!interceptor->getter()->IsUndefined()) {
+    v8::IndexedPropertyGetter getter =
+        v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
+    LOG(ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
+    CustomArguments args(interceptor->data(), receiver, this);
+    v8::AccessorInfo info(args.end());
+    v8::Handle<v8::Value> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = getter(index, info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+  }
+
+  Object* raw_result =
+      holder_handle->GetElementPostInterceptor(*this_handle, index);
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  return raw_result;
+}
+
+
+Object* JSObject::GetElementWithReceiver(JSObject* receiver, uint32_t index) {
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayIndexedAccess(this, index, v8::ACCESS_GET)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_GET);
+    return Heap::undefined_value();
+  }
+
+  if (HasIndexedInterceptor()) {
+    return GetElementWithInterceptor(receiver, index);
+  }
+
+  // Get element works for both JSObject and JSArray since
+  // JSArray::length cannot change.
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      FixedArray* elms = FixedArray::cast(elements());
+      if (index < static_cast<uint32_t>(elms->length())) {
+        Object* value = elms->get(index);
+        if (!value->IsTheHole()) return value;
+      }
+      break;
+    }
+    case PIXEL_ELEMENTS: {
+      PixelArray* pixels = PixelArray::cast(elements());
+      if (index < static_cast<uint32_t>(pixels->length())) {
+        uint8_t value = pixels->get(index);
+        return Smi::FromInt(value);
+      }
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      NumberDictionary* dictionary = element_dictionary();
+      int entry = dictionary->FindEntry(index);
+      if (entry != NumberDictionary::kNotFound) {
+        Object* element = dictionary->ValueAt(entry);
+        PropertyDetails details = dictionary->DetailsAt(entry);
+        if (details.type() == CALLBACKS) {
+          // Only accessors allowed as elements.
+          FixedArray* structure = FixedArray::cast(element);
+          Object* getter = structure->get(kGetterIndex);
+          if (getter->IsJSFunction()) {
+            return GetPropertyWithDefinedGetter(receiver,
+                                                JSFunction::cast(getter));
+          } else {
+            // Getter is not a function.
+            return Heap::undefined_value();
+          }
+        }
+        return element;
+      }
+      break;
+    }
+  }
+
+  Object* pt = GetPrototype();
+  if (pt == Heap::null_value()) return Heap::undefined_value();
+  return pt->GetElementWithReceiver(receiver, index);
+}
+
+
+bool JSObject::HasDenseElements() {
+  int capacity = 0;
+  int number_of_elements = 0;
+
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      FixedArray* elms = FixedArray::cast(elements());
+      capacity = elms->length();
+      for (int i = 0; i < capacity; i++) {
+        if (!elms->get(i)->IsTheHole()) number_of_elements++;
+      }
+      break;
+    }
+    case PIXEL_ELEMENTS: {
+      return true;
+    }
+    case DICTIONARY_ELEMENTS: {
+      NumberDictionary* dictionary = NumberDictionary::cast(elements());
+      capacity = dictionary->Capacity();
+      number_of_elements = dictionary->NumberOfElements();
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  if (capacity == 0) return true;
+  return (number_of_elements > (capacity / 2));
+}
+
+
+bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
+  ASSERT(HasFastElements());
+  // Keep the array in fast case if the current backing storage is
+  // almost filled and if the new capacity is no more than twice the
+  // old capacity.
+  int elements_length = FixedArray::cast(elements())->length();
+  return !HasDenseElements() || ((new_capacity / 2) > elements_length);
+}
+
+
+bool JSObject::ShouldConvertToFastElements() {
+  ASSERT(HasDictionaryElements());
+  NumberDictionary* dictionary = NumberDictionary::cast(elements());
+  // If the elements are sparse, we should not go back to fast case.
+  if (!HasDenseElements()) return false;
+  // If an element has been added at a very high index in the elements
+  // dictionary, we cannot go back to fast case.
+  if (dictionary->requires_slow_elements()) return false;
+  // An object requiring access checks is never allowed to have fast
+  // elements.  If it had fast elements we would skip security checks.
+  if (IsAccessCheckNeeded()) return false;
+  // If the dictionary backing storage takes up roughly half as much
+  // space as a fast-case backing storage would the array should have
+  // fast elements.
+  uint32_t length = 0;
+  if (IsJSArray()) {
+    CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &length));
+  } else {
+    length = dictionary->max_number_key();
+  }
+  return static_cast<uint32_t>(dictionary->Capacity()) >=
+      (length / (2 * NumberDictionary::kEntrySize));
+}
+
+
+// Certain compilers request function template instantiation when they
+// see the definition of the other template functions in the
+// class. This requires us to have the template functions put
+// together, so even though this function belongs in objects-debug.cc,
+// we keep it here instead to satisfy certain compilers.
+#ifdef DEBUG
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::Print() {
+  int capacity = HashTable<Shape, Key>::Capacity();
+  for (int i = 0; i < capacity; i++) {
+    Object* k = HashTable<Shape, Key>::KeyAt(i);
+    if (HashTable<Shape, Key>::IsKey(k)) {
+      PrintF(" ");
+      if (k->IsString()) {
+        String::cast(k)->StringPrint();
+      } else {
+        k->ShortPrint();
+      }
+      PrintF(": ");
+      ValueAt(i)->ShortPrint();
+      PrintF("\n");
+    }
+  }
+}
+#endif
+
+
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) {
+  int pos = 0;
+  int capacity = HashTable<Shape, Key>::Capacity();
+  WriteBarrierMode mode = elements->GetWriteBarrierMode();
+  for (int i = 0; i < capacity; i++) {
+    Object* k =  Dictionary<Shape, Key>::KeyAt(i);
+    if (Dictionary<Shape, Key>::IsKey(k)) {
+      elements->set(pos++, ValueAt(i), mode);
+    }
+  }
+  ASSERT(pos == elements->length());
+}
+
+
+InterceptorInfo* JSObject::GetNamedInterceptor() {
+  ASSERT(map()->has_named_interceptor());
+  JSFunction* constructor = JSFunction::cast(map()->constructor());
+  Object* template_info = constructor->shared()->function_data();
+  Object* result =
+      FunctionTemplateInfo::cast(template_info)->named_property_handler();
+  return InterceptorInfo::cast(result);
+}
+
+
+InterceptorInfo* JSObject::GetIndexedInterceptor() {
+  ASSERT(map()->has_indexed_interceptor());
+  JSFunction* constructor = JSFunction::cast(map()->constructor());
+  Object* template_info = constructor->shared()->function_data();
+  Object* result =
+      FunctionTemplateInfo::cast(template_info)->indexed_property_handler();
+  return InterceptorInfo::cast(result);
+}
+
+
+Object* JSObject::GetPropertyPostInterceptor(JSObject* receiver,
+                                             String* name,
+                                             PropertyAttributes* attributes) {
+  // Check local property in holder, ignore interceptor.
+  LookupResult result;
+  LocalLookupRealNamedProperty(name, &result);
+  if (result.IsValid()) return GetProperty(receiver, &result, name, attributes);
+  // Continue searching via the prototype chain.
+  Object* pt = GetPrototype();
+  *attributes = ABSENT;
+  if (pt == Heap::null_value()) return Heap::undefined_value();
+  return pt->GetPropertyWithReceiver(receiver, name, attributes);
+}
+
+
+Object* JSObject::GetPropertyWithInterceptor(
+    JSObject* receiver,
+    String* name,
+    PropertyAttributes* attributes) {
+  InterceptorInfo* interceptor = GetNamedInterceptor();
+  HandleScope scope;
+  Handle<JSObject> receiver_handle(receiver);
+  Handle<JSObject> holder_handle(this);
+  Handle<String> name_handle(name);
+
+  if (!interceptor->getter()->IsUndefined()) {
+    v8::NamedPropertyGetter getter =
+        v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
+    LOG(ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
+    CustomArguments args(interceptor->data(), receiver, this);
+    v8::AccessorInfo info(args.end());
+    v8::Handle<v8::Value> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = getter(v8::Utils::ToLocal(name_handle), info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (!result.IsEmpty()) {
+      *attributes = NONE;
+      return *v8::Utils::OpenHandle(*result);
+    }
+  }
+
+  Object* result = holder_handle->GetPropertyPostInterceptor(
+      *receiver_handle,
+      *name_handle,
+      attributes);
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  return result;
+}
+
+
+bool JSObject::HasRealNamedProperty(String* key) {
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    return false;
+  }
+
+  LookupResult result;
+  LocalLookupRealNamedProperty(key, &result);
+  if (result.IsValid()) {
+    switch (result.type()) {
+      case NORMAL:    // fall through.
+      case FIELD:     // fall through.
+      case CALLBACKS:  // fall through.
+      case CONSTANT_FUNCTION:
+        return true;
+      case INTERCEPTOR:
+      case MAP_TRANSITION:
+      case CONSTANT_TRANSITION:
+      case NULL_DESCRIPTOR:
+        return false;
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  return false;
+}
+
+
+bool JSObject::HasRealElementProperty(uint32_t index) {
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    return false;
+  }
+
+  // Handle [] on String objects.
+  if (this->IsStringObjectWithCharacterAt(index)) return true;
+
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>(
+              Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedArray::cast(elements())->length());
+      return (index < length) &&
+          !FixedArray::cast(elements())->get(index)->IsTheHole();
+    }
+    case PIXEL_ELEMENTS: {
+      PixelArray* pixels = PixelArray::cast(elements());
+      return index < static_cast<uint32_t>(pixels->length());
+    }
+    case DICTIONARY_ELEMENTS: {
+      return element_dictionary()->FindEntry(index)
+          != NumberDictionary::kNotFound;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  // All possibilities have been handled above already.
+  UNREACHABLE();
+  return Heap::null_value();
+}
+
+
+bool JSObject::HasRealNamedCallbackProperty(String* key) {
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    return false;
+  }
+
+  LookupResult result;
+  LocalLookupRealNamedProperty(key, &result);
+  return result.IsValid() && (result.type() == CALLBACKS);
+}
+
+
+int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
+  if (HasFastProperties()) {
+    DescriptorArray* descs = map()->instance_descriptors();
+    int result = 0;
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      PropertyDetails details = descs->GetDetails(i);
+      if (details.IsProperty() && (details.attributes() & filter) == 0) {
+        result++;
+      }
+    }
+    return result;
+  } else {
+    return property_dictionary()->NumberOfElementsFilterAttributes(filter);
+  }
+}
+
+
+int JSObject::NumberOfEnumProperties() {
+  return NumberOfLocalProperties(static_cast<PropertyAttributes>(DONT_ENUM));
+}
+
+
+void FixedArray::SwapPairs(FixedArray* numbers, int i, int j) {
+  Object* temp = get(i);
+  set(i, get(j));
+  set(j, temp);
+  if (this != numbers) {
+    temp = numbers->get(i);
+    numbers->set(i, numbers->get(j));
+    numbers->set(j, temp);
+  }
+}
+
+
+static void InsertionSortPairs(FixedArray* content,
+                               FixedArray* numbers,
+                               int len) {
+  for (int i = 1; i < len; i++) {
+    int j = i;
+    while (j > 0 &&
+           (NumberToUint32(numbers->get(j - 1)) >
+            NumberToUint32(numbers->get(j)))) {
+      content->SwapPairs(numbers, j - 1, j);
+      j--;
+    }
+  }
+}
+
+
+void HeapSortPairs(FixedArray* content, FixedArray* numbers, int len) {
+  // In-place heap sort.
+  ASSERT(content->length() == numbers->length());
+
+  // Bottom-up max-heap construction.
+  for (int i = 1; i < len; ++i) {
+    int child_index = i;
+    while (child_index > 0) {
+      int parent_index = ((child_index + 1) >> 1) - 1;
+      uint32_t parent_value = NumberToUint32(numbers->get(parent_index));
+      uint32_t child_value = NumberToUint32(numbers->get(child_index));
+      if (parent_value < child_value) {
+        content->SwapPairs(numbers, parent_index, child_index);
+      } else {
+        break;
+      }
+      child_index = parent_index;
+    }
+  }
+
+  // Extract elements and create sorted array.
+  for (int i = len - 1; i > 0; --i) {
+    // Put max element at the back of the array.
+    content->SwapPairs(numbers, 0, i);
+    // Sift down the new top element.
+    int parent_index = 0;
+    while (true) {
+      int child_index = ((parent_index + 1) << 1) - 1;
+      if (child_index >= i) break;
+      uint32_t child1_value = NumberToUint32(numbers->get(child_index));
+      uint32_t child2_value = NumberToUint32(numbers->get(child_index + 1));
+      uint32_t parent_value = NumberToUint32(numbers->get(parent_index));
+      if (child_index + 1 >= i || child1_value > child2_value) {
+        if (parent_value > child1_value) break;
+        content->SwapPairs(numbers, parent_index, child_index);
+        parent_index = child_index;
+      } else {
+        if (parent_value > child2_value) break;
+        content->SwapPairs(numbers, parent_index, child_index + 1);
+        parent_index = child_index + 1;
+      }
+    }
+  }
+}
+
+
+// Sort this array and the numbers as pairs wrt. the (distinct) numbers.
+void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
+  ASSERT(this->length() == numbers->length());
+  // For small arrays, simply use insertion sort.
+  if (len <= 10) {
+    InsertionSortPairs(this, numbers, len);
+    return;
+  }
+  // Check the range of indices.
+  uint32_t min_index = NumberToUint32(numbers->get(0));
+  uint32_t max_index = min_index;
+  uint32_t i;
+  for (i = 1; i < len; i++) {
+    if (NumberToUint32(numbers->get(i)) < min_index) {
+      min_index = NumberToUint32(numbers->get(i));
+    } else if (NumberToUint32(numbers->get(i)) > max_index) {
+      max_index = NumberToUint32(numbers->get(i));
+    }
+  }
+  if (max_index - min_index + 1 == len) {
+    // Indices form a contiguous range, unless there are duplicates.
+    // Do an in-place linear time sort assuming distinct numbers, but
+    // avoid hanging in case they are not.
+    for (i = 0; i < len; i++) {
+      uint32_t p;
+      uint32_t j = 0;
+      // While the current element at i is not at its correct position p,
+      // swap the elements at these two positions.
+      while ((p = NumberToUint32(numbers->get(i)) - min_index) != i &&
+             j++ < len) {
+        SwapPairs(numbers, i, p);
+      }
+    }
+  } else {
+    HeapSortPairs(this, numbers, len);
+    return;
+  }
+}
+
+
+// Fill in the names of local properties into the supplied storage. The main
+// purpose of this function is to provide reflection information for the object
+// mirrors.
+void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) {
+  ASSERT(storage->length() >= (NumberOfLocalProperties(NONE) - index));
+  if (HasFastProperties()) {
+    DescriptorArray* descs = map()->instance_descriptors();
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      if (descs->IsProperty(i)) storage->set(index++, descs->GetKey(i));
+    }
+    ASSERT(storage->length() >= index);
+  } else {
+    property_dictionary()->CopyKeysTo(storage);
+  }
+}
+
+
+int JSObject::NumberOfLocalElements(PropertyAttributes filter) {
+  return GetLocalElementKeys(NULL, filter);
+}
+
+
+int JSObject::NumberOfEnumElements() {
+  return NumberOfLocalElements(static_cast<PropertyAttributes>(DONT_ENUM));
+}
+
+
+int JSObject::GetLocalElementKeys(FixedArray* storage,
+                                  PropertyAttributes filter) {
+  int counter = 0;
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      int length = IsJSArray() ?
+          Smi::cast(JSArray::cast(this)->length())->value() :
+          FixedArray::cast(elements())->length();
+      for (int i = 0; i < length; i++) {
+        if (!FixedArray::cast(elements())->get(i)->IsTheHole()) {
+          if (storage != NULL) {
+            storage->set(counter, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+          }
+          counter++;
+        }
+      }
+      ASSERT(!storage || storage->length() >= counter);
+      break;
+    }
+    case PIXEL_ELEMENTS: {
+      int length = PixelArray::cast(elements())->length();
+      while (counter < length) {
+        if (storage != NULL) {
+          storage->set(counter, Smi::FromInt(counter), SKIP_WRITE_BARRIER);
+        }
+        counter++;
+      }
+      ASSERT(!storage || storage->length() >= counter);
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      if (storage != NULL) {
+        element_dictionary()->CopyKeysTo(storage, filter);
+      }
+      counter = element_dictionary()->NumberOfElementsFilterAttributes(filter);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  if (this->IsJSValue()) {
+    Object* val = JSValue::cast(this)->value();
+    if (val->IsString()) {
+      String* str = String::cast(val);
+      if (storage) {
+        for (int i = 0; i < str->length(); i++) {
+          storage->set(counter + i, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+        }
+      }
+      counter += str->length();
+    }
+  }
+  ASSERT(!storage || storage->length() == counter);
+  return counter;
+}
+
+
+int JSObject::GetEnumElementKeys(FixedArray* storage) {
+  return GetLocalElementKeys(storage,
+                             static_cast<PropertyAttributes>(DONT_ENUM));
+}
+
+
+bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
+  ASSERT(other->IsNumber());
+  return key == static_cast<uint32_t>(other->Number());
+}
+
+
+uint32_t NumberDictionaryShape::Hash(uint32_t key) {
+  return ComputeIntegerHash(key);
+}
+
+
+uint32_t NumberDictionaryShape::HashForObject(uint32_t key, Object* other) {
+  ASSERT(other->IsNumber());
+  return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
+}
+
+
+Object* NumberDictionaryShape::AsObject(uint32_t key) {
+  return Heap::NumberFromUint32(key);
+}
+
+
+bool StringDictionaryShape::IsMatch(String* key, Object* other) {
+  // We know that all entries in a hash table had their hash keys created.
+  // Use that knowledge to have fast failure.
+  if (key->Hash() != String::cast(other)->Hash()) return false;
+  return key->Equals(String::cast(other));
+}
+
+
+uint32_t StringDictionaryShape::Hash(String* key) {
+  return key->Hash();
+}
+
+
+uint32_t StringDictionaryShape::HashForObject(String* key, Object* other) {
+  return String::cast(other)->Hash();
+}
+
+
+Object* StringDictionaryShape::AsObject(String* key) {
+  return key;
+}
+
+
+// StringKey simply carries a string object as key.
+class StringKey : public HashTableKey {
+ public:
+  explicit StringKey(String* string) :
+      string_(string),
+      hash_(HashForObject(string)) { }
+
+  bool IsMatch(Object* string) {
+    // We know that all entries in a hash table had their hash keys created.
+    // Use that knowledge to have fast failure.
+    if (hash_ != HashForObject(string)) {
+      return false;
+    }
+    return string_->Equals(String::cast(string));
+  }
+
+  uint32_t Hash() { return hash_; }
+
+  uint32_t HashForObject(Object* other) { return String::cast(other)->Hash(); }
+
+  Object* AsObject() { return string_; }
+
+  String* string_;
+  uint32_t hash_;
+};
+
+
+// StringSharedKeys are used as keys in the eval cache.
+class StringSharedKey : public HashTableKey {
+ public:
+  StringSharedKey(String* source, SharedFunctionInfo* shared)
+      : source_(source), shared_(shared) { }
+
+  bool IsMatch(Object* other) {
+    if (!other->IsFixedArray()) return false;
+    FixedArray* pair = FixedArray::cast(other);
+    SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
+    if (shared != shared_) return false;
+    String* source = String::cast(pair->get(1));
+    return source->Equals(source_);
+  }
+
+  static uint32_t StringSharedHashHelper(String* source,
+                                         SharedFunctionInfo* shared) {
+    uint32_t hash = source->Hash();
+    if (shared->HasSourceCode()) {
+      // Instead of using the SharedFunctionInfo pointer in the hash
+      // code computation, we use a combination of the hash of the
+      // script source code and the start and end positions.  We do
+      // this to ensure that the cache entries can survive garbage
+      // collection.
+      Script* script = Script::cast(shared->script());
+      hash ^= String::cast(script->source())->Hash();
+      hash += shared->start_position();
+    }
+    return hash;
+  }
+
+  uint32_t Hash() {
+    return StringSharedHashHelper(source_, shared_);
+  }
+
+  uint32_t HashForObject(Object* obj) {
+    FixedArray* pair = FixedArray::cast(obj);
+    SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
+    String* source = String::cast(pair->get(1));
+    return StringSharedHashHelper(source, shared);
+  }
+
+  Object* AsObject() {
+    Object* obj = Heap::AllocateFixedArray(2);
+    if (obj->IsFailure()) return obj;
+    FixedArray* pair = FixedArray::cast(obj);
+    pair->set(0, shared_);
+    pair->set(1, source_);
+    return pair;
+  }
+
+ private:
+  String* source_;
+  SharedFunctionInfo* shared_;
+};
+
+
+// RegExpKey carries the source and flags of a regular expression as key.
+class RegExpKey : public HashTableKey {
+ public:
+  RegExpKey(String* string, JSRegExp::Flags flags)
+      : string_(string),
+        flags_(Smi::FromInt(flags.value())) { }
+
+  bool IsMatch(Object* obj) {
+    FixedArray* val = FixedArray::cast(obj);
+    return string_->Equals(String::cast(val->get(JSRegExp::kSourceIndex)))
+        && (flags_ == val->get(JSRegExp::kFlagsIndex));
+  }
+
+  uint32_t Hash() { return RegExpHash(string_, flags_); }
+
+  Object* AsObject() {
+    // Plain hash maps, which is where regexp keys are used, don't
+    // use this function.
+    UNREACHABLE();
+    return NULL;
+  }
+
+  uint32_t HashForObject(Object* obj) {
+    FixedArray* val = FixedArray::cast(obj);
+    return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)),
+                      Smi::cast(val->get(JSRegExp::kFlagsIndex)));
+  }
+
+  static uint32_t RegExpHash(String* string, Smi* flags) {
+    return string->Hash() + flags->value();
+  }
+
+  String* string_;
+  Smi* flags_;
+};
+
+// Utf8SymbolKey carries a vector of chars as key.
+class Utf8SymbolKey : public HashTableKey {
+ public:
+  explicit Utf8SymbolKey(Vector<const char> string)
+      : string_(string), length_field_(0) { }
+
+  bool IsMatch(Object* string) {
+    return String::cast(string)->IsEqualTo(string_);
+  }
+
+  uint32_t Hash() {
+    if (length_field_ != 0) return length_field_ >> String::kHashShift;
+    unibrow::Utf8InputBuffer<> buffer(string_.start(),
+                                      static_cast<unsigned>(string_.length()));
+    chars_ = buffer.Length();
+    length_field_ = String::ComputeLengthAndHashField(&buffer, chars_);
+    uint32_t result = length_field_ >> String::kHashShift;
+    ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
+    return result;
+  }
+
+  uint32_t HashForObject(Object* other) {
+    return String::cast(other)->Hash();
+  }
+
+  Object* AsObject() {
+    if (length_field_ == 0) Hash();
+    return Heap::AllocateSymbol(string_, chars_, length_field_);
+  }
+
+  Vector<const char> string_;
+  uint32_t length_field_;
+  int chars_;  // Caches the number of characters when computing the hash code.
+};
+
+
+// SymbolKey carries a string/symbol object as key.
+class SymbolKey : public HashTableKey {
+ public:
+  explicit SymbolKey(String* string) : string_(string) { }
+
+  bool IsMatch(Object* string) {
+    return String::cast(string)->Equals(string_);
+  }
+
+  uint32_t Hash() { return string_->Hash(); }
+
+  uint32_t HashForObject(Object* other) {
+    return String::cast(other)->Hash();
+  }
+
+  Object* AsObject() {
+    // If the string is a cons string, attempt to flatten it so that
+    // symbols will most often be flat strings.
+    if (StringShape(string_).IsCons()) {
+      ConsString* cons_string = ConsString::cast(string_);
+      cons_string->TryFlatten();
+      if (cons_string->second()->length() == 0) {
+        string_ = cons_string->first();
+      }
+    }
+    // Transform string to symbol if possible.
+    Map* map = Heap::SymbolMapForString(string_);
+    if (map != NULL) {
+      string_->set_map(map);
+      ASSERT(string_->IsSymbol());
+      return string_;
+    }
+    // Otherwise allocate a new symbol.
+    StringInputBuffer buffer(string_);
+    return Heap::AllocateInternalSymbol(&buffer,
+                                        string_->length(),
+                                        string_->length_field());
+  }
+
+  static uint32_t StringHash(Object* obj) {
+    return String::cast(obj)->Hash();
+  }
+
+  String* string_;
+};
+
+
+template<typename Shape, typename Key>
+void HashTable<Shape, Key>::IteratePrefix(ObjectVisitor* v) {
+  IteratePointers(v, 0, kElementsStartOffset);
+}
+
+
+template<typename Shape, typename Key>
+void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
+  IteratePointers(v,
+                  kElementsStartOffset,
+                  kHeaderSize + length() * kPointerSize);
+}
+
+
+template<typename Shape, typename Key>
+Object* HashTable<Shape, Key>::Allocate(
+    int at_least_space_for) {
+  int capacity = RoundUpToPowerOf2(at_least_space_for);
+  if (capacity < 4) capacity = 4;  // Guarantee min capacity.
+  Object* obj = Heap::AllocateHashTable(EntryToIndex(capacity));
+  if (!obj->IsFailure()) {
+    HashTable::cast(obj)->SetNumberOfElements(0);
+    HashTable::cast(obj)->SetCapacity(capacity);
+  }
+  return obj;
+}
+
+
+
+// Find entry for key otherwise return -1.
+template<typename Shape, typename Key>
+int HashTable<Shape, Key>::FindEntry(Key key) {
+  uint32_t nof = NumberOfElements();
+  if (nof == 0) return kNotFound;  // Bail out if empty.
+
+  uint32_t capacity = Capacity();
+  uint32_t hash = Shape::Hash(key);
+  uint32_t entry = GetProbe(hash, 0, capacity);
+
+  Object* element = KeyAt(entry);
+  uint32_t passed_elements = 0;
+  if (!element->IsNull()) {
+    if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry;
+    if (++passed_elements == nof) return kNotFound;
+  }
+  for (uint32_t i = 1; !element->IsUndefined(); i++) {
+    entry = GetProbe(hash, i, capacity);
+    element = KeyAt(entry);
+    if (!element->IsNull()) {
+      if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry;
+      if (++passed_elements == nof) return kNotFound;
+    }
+  }
+  return kNotFound;
+}
+
+
+template<typename Shape, typename Key>
+Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
+  int capacity = Capacity();
+  int nof = NumberOfElements() + n;
+  // Make sure 50% is free
+  if (nof + (nof >> 1) <= capacity) return this;
+
+  Object* obj = Allocate(nof * 2);
+  if (obj->IsFailure()) return obj;
+  HashTable* table = HashTable::cast(obj);
+  WriteBarrierMode mode = table->GetWriteBarrierMode();
+
+  // Copy prefix to new array.
+  for (int i = kPrefixStartIndex;
+       i < kPrefixStartIndex + Shape::kPrefixSize;
+       i++) {
+    table->set(i, get(i), mode);
+  }
+  // Rehash the elements.
+  for (int i = 0; i < capacity; i++) {
+    uint32_t from_index = EntryToIndex(i);
+    Object* k = get(from_index);
+    if (IsKey(k)) {
+      uint32_t hash = Shape::HashForObject(key, k);
+      uint32_t insertion_index =
+          EntryToIndex(table->FindInsertionEntry(hash));
+      for (int j = 0; j < Shape::kEntrySize; j++) {
+        table->set(insertion_index + j, get(from_index + j), mode);
+      }
+    }
+  }
+  table->SetNumberOfElements(NumberOfElements());
+  return table;
+}
+
+
+template<typename Shape, typename Key>
+uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
+  uint32_t capacity = Capacity();
+  uint32_t entry = GetProbe(hash, 0, capacity);
+  Object* element = KeyAt(entry);
+
+  for (uint32_t i = 1; !(element->IsUndefined() || element->IsNull()); i++) {
+    entry = GetProbe(hash, i, capacity);
+    element = KeyAt(entry);
+  }
+
+  return entry;
+}
+
+// Force instantiation of template instances class.
+// Please note this list is compiler dependent.
+
+template class HashTable<SymbolTableShape, HashTableKey*>;
+
+template class HashTable<CompilationCacheShape, HashTableKey*>;
+
+template class HashTable<MapCacheShape, HashTableKey*>;
+
+template class Dictionary<StringDictionaryShape, String*>;
+
+template class Dictionary<NumberDictionaryShape, uint32_t>;
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::Allocate(
+    int);
+
+template Object* Dictionary<StringDictionaryShape, String*>::Allocate(
+    int);
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::AtPut(
+    uint32_t, Object*);
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::SlowReverseLookup(
+    Object*);
+
+template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup(
+    Object*);
+
+template void Dictionary<NumberDictionaryShape, uint32_t>::CopyKeysTo(
+    FixedArray*, PropertyAttributes);
+
+template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty(
+    int, JSObject::DeleteMode);
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::DeleteProperty(
+    int, JSObject::DeleteMode);
+
+template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo(
+    FixedArray*);
+
+template int
+Dictionary<StringDictionaryShape, String*>::NumberOfElementsFilterAttributes(
+    PropertyAttributes);
+
+template Object* Dictionary<StringDictionaryShape, String*>::Add(
+    String*, Object*, PropertyDetails);
+
+template Object*
+Dictionary<StringDictionaryShape, String*>::GenerateNewEnumerationIndices();
+
+template int
+Dictionary<NumberDictionaryShape, uint32_t>::NumberOfElementsFilterAttributes(
+    PropertyAttributes);
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::Add(
+    uint32_t, Object*, PropertyDetails);
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::EnsureCapacity(
+    int, uint32_t);
+
+template Object* Dictionary<StringDictionaryShape, String*>::EnsureCapacity(
+    int, String*);
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::AddEntry(
+    uint32_t, Object*, PropertyDetails, uint32_t);
+
+template Object* Dictionary<StringDictionaryShape, String*>::AddEntry(
+    String*, Object*, PropertyDetails, uint32_t);
+
+template
+int Dictionary<NumberDictionaryShape, uint32_t>::NumberOfEnumElements();
+
+template
+int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
+
+// Collates undefined and unexisting elements below limit from position
+// zero of the elements. The object stays in Dictionary mode.
+Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
+  ASSERT(HasDictionaryElements());
+  // Must stay in dictionary mode, either because of requires_slow_elements,
+  // or because we are not going to sort (and therefore compact) all of the
+  // elements.
+  NumberDictionary* dict = element_dictionary();
+  HeapNumber* result_double = NULL;
+  if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
+    // Allocate space for result before we start mutating the object.
+    Object* new_double = Heap::AllocateHeapNumber(0.0);
+    if (new_double->IsFailure()) return new_double;
+    result_double = HeapNumber::cast(new_double);
+  }
+
+  int capacity = dict->Capacity();
+  Object* obj = NumberDictionary::Allocate(dict->Capacity());
+  if (obj->IsFailure()) return obj;
+  NumberDictionary* new_dict = NumberDictionary::cast(obj);
+
+  AssertNoAllocation no_alloc;
+
+  uint32_t pos = 0;
+  uint32_t undefs = 0;
+  for (int i = 0; i < capacity; i++) {
+    Object* k = dict->KeyAt(i);
+    if (dict->IsKey(k)) {
+      ASSERT(k->IsNumber());
+      ASSERT(!k->IsSmi() || Smi::cast(k)->value() >= 0);
+      ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0);
+      ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
+      Object* value = dict->ValueAt(i);
+      PropertyDetails details = dict->DetailsAt(i);
+      if (details.type() == CALLBACKS) {
+        // Bail out and do the sorting of undefineds and array holes in JS.
+        return Smi::FromInt(-1);
+      }
+      uint32_t key = NumberToUint32(k);
+      if (key < limit) {
+        if (value->IsUndefined()) {
+          undefs++;
+        } else {
+          new_dict->AddNumberEntry(pos, value, details);
+          pos++;
+        }
+      } else {
+        new_dict->AddNumberEntry(key, value, details);
+      }
+    }
+  }
+
+  uint32_t result = pos;
+  PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
+  while (undefs > 0) {
+    new_dict->AddNumberEntry(pos, Heap::undefined_value(), no_details);
+    pos++;
+    undefs--;
+  }
+
+  set_elements(new_dict);
+
+  if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
+    return Smi::FromInt(static_cast<int>(result));
+  }
+
+  ASSERT_NE(NULL, result_double);
+  result_double->set_value(static_cast<double>(result));
+  return result_double;
+}
+
+
+// Collects all defined (non-hole) and non-undefined (array) elements at
+// the start of the elements array.
+// If the object is in dictionary mode, it is converted to fast elements
+// mode.
+Object* JSObject::PrepareElementsForSort(uint32_t limit) {
+  ASSERT(!HasPixelElements());
+
+  if (HasDictionaryElements()) {
+    // Convert to fast elements containing only the existing properties.
+    // Ordering is irrelevant, since we are going to sort anyway.
+    NumberDictionary* dict = element_dictionary();
+    if (IsJSArray() || dict->requires_slow_elements() ||
+        dict->max_number_key() >= limit) {
+      return PrepareSlowElementsForSort(limit);
+    }
+    // Convert to fast elements.
+
+    PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED: TENURED;
+    Object* new_array =
+        Heap::AllocateFixedArray(dict->NumberOfElements(), tenure);
+    if (new_array->IsFailure()) {
+      return new_array;
+    }
+    FixedArray* fast_elements = FixedArray::cast(new_array);
+    dict->CopyValuesTo(fast_elements);
+    set_elements(fast_elements);
+  }
+  ASSERT(HasFastElements());
+
+  // Collect holes at the end, undefined before that and the rest at the
+  // start, and return the number of non-hole, non-undefined values.
+
+  FixedArray* elements = FixedArray::cast(this->elements());
+  uint32_t elements_length = static_cast<uint32_t>(elements->length());
+  if (limit > elements_length) {
+    limit = elements_length ;
+  }
+  if (limit == 0) {
+    return Smi::FromInt(0);
+  }
+
+  HeapNumber* result_double = NULL;
+  if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
+    // Pessimistically allocate space for return value before
+    // we start mutating the array.
+    Object* new_double = Heap::AllocateHeapNumber(0.0);
+    if (new_double->IsFailure()) return new_double;
+    result_double = HeapNumber::cast(new_double);
+  }
+
+  AssertNoAllocation no_alloc;
+
+  // Split elements into defined, undefined and the_hole, in that order.
+  // Only count locations for undefined and the hole, and fill them afterwards.
+  WriteBarrierMode write_barrier = elements->GetWriteBarrierMode();
+  unsigned int undefs = limit;
+  unsigned int holes = limit;
+  // Assume most arrays contain no holes and undefined values, so minimize the
+  // number of stores of non-undefined, non-the-hole values.
+  for (unsigned int i = 0; i < undefs; i++) {
+    Object* current = elements->get(i);
+    if (current->IsTheHole()) {
+      holes--;
+      undefs--;
+    } else if (current->IsUndefined()) {
+      undefs--;
+    } else {
+      continue;
+    }
+    // Position i needs to be filled.
+    while (undefs > i) {
+      current = elements->get(undefs);
+      if (current->IsTheHole()) {
+        holes--;
+        undefs--;
+      } else if (current->IsUndefined()) {
+        undefs--;
+      } else {
+        elements->set(i, current, write_barrier);
+        break;
+      }
+    }
+  }
+  uint32_t result = undefs;
+  while (undefs < holes) {
+    elements->set_undefined(undefs);
+    undefs++;
+  }
+  while (holes < limit) {
+    elements->set_the_hole(holes);
+    holes++;
+  }
+
+  if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
+    return Smi::FromInt(static_cast<int>(result));
+  }
+  ASSERT_NE(NULL, result_double);
+  result_double->set_value(static_cast<double>(result));
+  return result_double;
+}
+
+
+Object* PixelArray::SetValue(uint32_t index, Object* value) {
+  uint8_t clamped_value = 0;
+  if (index < static_cast<uint32_t>(length())) {
+    if (value->IsSmi()) {
+      int int_value = Smi::cast(value)->value();
+      if (int_value < 0) {
+        clamped_value = 0;
+      } else if (int_value > 255) {
+        clamped_value = 255;
+      } else {
+        clamped_value = static_cast<uint8_t>(int_value);
+      }
+    } else if (value->IsHeapNumber()) {
+      double double_value = HeapNumber::cast(value)->value();
+      if (!(double_value > 0)) {
+        // NaN and less than zero clamp to zero.
+        clamped_value = 0;
+      } else if (double_value > 255) {
+        // Greater than 255 clamp to 255.
+        clamped_value = 255;
+      } else {
+        // Other doubles are rounded to the nearest integer.
+        clamped_value = static_cast<uint8_t>(double_value + 0.5);
+      }
+    } else {
+      // Clamp undefined to zero (default). All other types have been
+      // converted to a number type further up in the call chain.
+      ASSERT(value->IsUndefined());
+    }
+    set(index, clamped_value);
+  }
+  return Smi::FromInt(clamped_value);
+}
+
+
+Object* GlobalObject::GetPropertyCell(LookupResult* result) {
+  ASSERT(!HasFastProperties());
+  Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
+  ASSERT(value->IsJSGlobalPropertyCell());
+  return value;
+}
+
+
+Object* GlobalObject::EnsurePropertyCell(String* name) {
+  ASSERT(!HasFastProperties());
+  int entry = property_dictionary()->FindEntry(name);
+  if (entry == StringDictionary::kNotFound) {
+    Object* cell = Heap::AllocateJSGlobalPropertyCell(Heap::the_hole_value());
+    if (cell->IsFailure()) return cell;
+    PropertyDetails details(NONE, NORMAL);
+    details = details.AsDeleted();
+    Object* dictionary = property_dictionary()->Add(name, cell, details);
+    if (dictionary->IsFailure()) return dictionary;
+    set_properties(StringDictionary::cast(dictionary));
+    return cell;
+  } else {
+    Object* value = property_dictionary()->ValueAt(entry);
+    ASSERT(value->IsJSGlobalPropertyCell());
+    return value;
+  }
+}
+
+
+Object* SymbolTable::LookupString(String* string, Object** s) {
+  SymbolKey key(string);
+  return LookupKey(&key, s);
+}
+
+
+bool SymbolTable::LookupSymbolIfExists(String* string, String** symbol) {
+  SymbolKey key(string);
+  int entry = FindEntry(&key);
+  if (entry == kNotFound) {
+    return false;
+  } else {
+    String* result = String::cast(KeyAt(entry));
+    ASSERT(StringShape(result).IsSymbol());
+    *symbol = result;
+    return true;
+  }
+}
+
+
+Object* SymbolTable::LookupSymbol(Vector<const char> str, Object** s) {
+  Utf8SymbolKey key(str);
+  return LookupKey(&key, s);
+}
+
+
+Object* SymbolTable::LookupKey(HashTableKey* key, Object** s) {
+  int entry = FindEntry(key);
+
+  // Symbol already in table.
+  if (entry != kNotFound) {
+    *s = KeyAt(entry);
+    return this;
+  }
+
+  // Adding new symbol. Grow table if needed.
+  Object* obj = EnsureCapacity(1, key);
+  if (obj->IsFailure()) return obj;
+
+  // Create symbol object.
+  Object* symbol = key->AsObject();
+  if (symbol->IsFailure()) return symbol;
+
+  // If the symbol table grew as part of EnsureCapacity, obj is not
+  // the current symbol table and therefore we cannot use
+  // SymbolTable::cast here.
+  SymbolTable* table = reinterpret_cast<SymbolTable*>(obj);
+
+  // Add the new symbol and return it along with the symbol table.
+  entry = table->FindInsertionEntry(key->Hash());
+  table->set(EntryToIndex(entry), symbol);
+  table->ElementAdded();
+  *s = symbol;
+  return table;
+}
+
+
+Object* CompilationCacheTable::Lookup(String* src) {
+  StringKey key(src);
+  int entry = FindEntry(&key);
+  if (entry == kNotFound) return Heap::undefined_value();
+  return get(EntryToIndex(entry) + 1);
+}
+
+
+Object* CompilationCacheTable::LookupEval(String* src, Context* context) {
+  StringSharedKey key(src, context->closure()->shared());
+  int entry = FindEntry(&key);
+  if (entry == kNotFound) return Heap::undefined_value();
+  return get(EntryToIndex(entry) + 1);
+}
+
+
+Object* CompilationCacheTable::LookupRegExp(String* src,
+                                            JSRegExp::Flags flags) {
+  RegExpKey key(src, flags);
+  int entry = FindEntry(&key);
+  if (entry == kNotFound) return Heap::undefined_value();
+  return get(EntryToIndex(entry) + 1);
+}
+
+
+Object* CompilationCacheTable::Put(String* src, Object* value) {
+  StringKey key(src);
+  Object* obj = EnsureCapacity(1, &key);
+  if (obj->IsFailure()) return obj;
+
+  CompilationCacheTable* cache =
+      reinterpret_cast<CompilationCacheTable*>(obj);
+  int entry = cache->FindInsertionEntry(key.Hash());
+  cache->set(EntryToIndex(entry), src);
+  cache->set(EntryToIndex(entry) + 1, value);
+  cache->ElementAdded();
+  return cache;
+}
+
+
+Object* CompilationCacheTable::PutEval(String* src,
+                                       Context* context,
+                                       Object* value) {
+  StringSharedKey key(src, context->closure()->shared());
+  Object* obj = EnsureCapacity(1, &key);
+  if (obj->IsFailure()) return obj;
+
+  CompilationCacheTable* cache =
+      reinterpret_cast<CompilationCacheTable*>(obj);
+  int entry = cache->FindInsertionEntry(key.Hash());
+
+  Object* k = key.AsObject();
+  if (k->IsFailure()) return k;
+
+  cache->set(EntryToIndex(entry), k);
+  cache->set(EntryToIndex(entry) + 1, value);
+  cache->ElementAdded();
+  return cache;
+}
+
+
+Object* CompilationCacheTable::PutRegExp(String* src,
+                                         JSRegExp::Flags flags,
+                                         FixedArray* value) {
+  RegExpKey key(src, flags);
+  Object* obj = EnsureCapacity(1, &key);
+  if (obj->IsFailure()) return obj;
+
+  CompilationCacheTable* cache =
+      reinterpret_cast<CompilationCacheTable*>(obj);
+  int entry = cache->FindInsertionEntry(key.Hash());
+  cache->set(EntryToIndex(entry), value);
+  cache->set(EntryToIndex(entry) + 1, value);
+  cache->ElementAdded();
+  return cache;
+}
+
+
+// SymbolsKey used for HashTable where key is array of symbols.
+class SymbolsKey : public HashTableKey {
+ public:
+  explicit SymbolsKey(FixedArray* symbols) : symbols_(symbols) { }
+
+  bool IsMatch(Object* symbols) {
+    FixedArray* o = FixedArray::cast(symbols);
+    int len = symbols_->length();
+    if (o->length() != len) return false;
+    for (int i = 0; i < len; i++) {
+      if (o->get(i) != symbols_->get(i)) return false;
+    }
+    return true;
+  }
+
+  uint32_t Hash() { return HashForObject(symbols_); }
+
+  uint32_t HashForObject(Object* obj) {
+    FixedArray* symbols = FixedArray::cast(obj);
+    int len = symbols->length();
+    uint32_t hash = 0;
+    for (int i = 0; i < len; i++) {
+      hash ^= String::cast(symbols->get(i))->Hash();
+    }
+    return hash;
+  }
+
+  Object* AsObject() { return symbols_; }
+
+ private:
+  FixedArray* symbols_;
+};
+
+
+Object* MapCache::Lookup(FixedArray* array) {
+  SymbolsKey key(array);
+  int entry = FindEntry(&key);
+  if (entry == kNotFound) return Heap::undefined_value();
+  return get(EntryToIndex(entry) + 1);
+}
+
+
+Object* MapCache::Put(FixedArray* array, Map* value) {
+  SymbolsKey key(array);
+  Object* obj = EnsureCapacity(1, &key);
+  if (obj->IsFailure()) return obj;
+
+  MapCache* cache = reinterpret_cast<MapCache*>(obj);
+  int entry = cache->FindInsertionEntry(key.Hash());
+  cache->set(EntryToIndex(entry), array);
+  cache->set(EntryToIndex(entry) + 1, value);
+  cache->ElementAdded();
+  return cache;
+}
+
+
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::Allocate(int at_least_space_for) {
+  Object* obj = HashTable<Shape, Key>::Allocate(at_least_space_for);
+  // Initialize the next enumeration index.
+  if (!obj->IsFailure()) {
+    Dictionary<Shape, Key>::cast(obj)->
+        SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
+  }
+  return obj;
+}
+
+
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
+  int length = HashTable<Shape, Key>::NumberOfElements();
+
+  // Allocate and initialize iteration order array.
+  Object* obj = Heap::AllocateFixedArray(length);
+  if (obj->IsFailure()) return obj;
+  FixedArray* iteration_order = FixedArray::cast(obj);
+  for (int i = 0; i < length; i++) {
+    iteration_order->set(i, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+  }
+
+  // Allocate array with enumeration order.
+  obj = Heap::AllocateFixedArray(length);
+  if (obj->IsFailure()) return obj;
+  FixedArray* enumeration_order = FixedArray::cast(obj);
+
+  // Fill the enumeration order array with property details.
+  int capacity = HashTable<Shape, Key>::Capacity();
+  int pos = 0;
+  for (int i = 0; i < capacity; i++) {
+    if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
+      enumeration_order->set(pos++,
+                             Smi::FromInt(DetailsAt(i).index()),
+                             SKIP_WRITE_BARRIER);
+    }
+  }
+
+  // Sort the arrays wrt. enumeration order.
+  iteration_order->SortPairs(enumeration_order, enumeration_order->length());
+
+  // Overwrite the enumeration_order with the enumeration indices.
+  for (int i = 0; i < length; i++) {
+    int index = Smi::cast(iteration_order->get(i))->value();
+    int enum_index = PropertyDetails::kInitialIndex + i;
+    enumeration_order->set(index,
+                           Smi::FromInt(enum_index),
+                           SKIP_WRITE_BARRIER);
+  }
+
+  // Update the dictionary with new indices.
+  capacity = HashTable<Shape, Key>::Capacity();
+  pos = 0;
+  for (int i = 0; i < capacity; i++) {
+    if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
+      int enum_index = Smi::cast(enumeration_order->get(pos++))->value();
+      PropertyDetails details = DetailsAt(i);
+      PropertyDetails new_details =
+          PropertyDetails(details.attributes(), details.type(), enum_index);
+      DetailsAtPut(i, new_details);
+    }
+  }
+
+  // Set the next enumeration index.
+  SetNextEnumerationIndex(PropertyDetails::kInitialIndex+length);
+  return this;
+}
+
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::EnsureCapacity(int n, Key key) {
+  // Check whether there are enough enumeration indices to add n elements.
+  if (Shape::kIsEnumerable &&
+      !PropertyDetails::IsValidIndex(NextEnumerationIndex() + n)) {
+    // If not, we generate new indices for the properties.
+    Object* result = GenerateNewEnumerationIndices();
+    if (result->IsFailure()) return result;
+  }
+  return HashTable<Shape, Key>::EnsureCapacity(n, key);
+}
+
+
+void NumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
+  // Do nothing if the interval [from, to) is empty.
+  if (from >= to) return;
+
+  int removed_entries = 0;
+  Object* sentinel = Heap::null_value();
+  int capacity = Capacity();
+  for (int i = 0; i < capacity; i++) {
+    Object* key = KeyAt(i);
+    if (key->IsNumber()) {
+      uint32_t number = static_cast<uint32_t>(key->Number());
+      if (from <= number && number < to) {
+        SetEntry(i, sentinel, sentinel, Smi::FromInt(0));
+        removed_entries++;
+      }
+    }
+  }
+
+  // Update the number of elements.
+  SetNumberOfElements(NumberOfElements() - removed_entries);
+}
+
+
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
+                                               JSObject::DeleteMode mode) {
+  PropertyDetails details = DetailsAt(entry);
+  // Ignore attributes if forcing a deletion.
+  if (details.IsDontDelete() && mode == JSObject::NORMAL_DELETION) {
+    return Heap::false_value();
+  }
+  SetEntry(entry, Heap::null_value(), Heap::null_value(), Smi::FromInt(0));
+  HashTable<Shape, Key>::ElementRemoved();
+  return Heap::true_value();
+}
+
+
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
+  int entry = FindEntry(key);
+
+  // If the entry is present set the value;
+  if (entry != Dictionary<Shape, Key>::kNotFound) {
+    ValueAtPut(entry, value);
+    return this;
+  }
+
+  // Check whether the dictionary should be extended.
+  Object* obj = EnsureCapacity(1, key);
+  if (obj->IsFailure()) return obj;
+
+  Object* k = Shape::AsObject(key);
+  if (k->IsFailure()) return k;
+  PropertyDetails details = PropertyDetails(NONE, NORMAL);
+  return Dictionary<Shape, Key>::cast(obj)->
+      AddEntry(key, value, details, Shape::Hash(key));
+}
+
+
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::Add(Key key,
+                                    Object* value,
+                                    PropertyDetails details) {
+  // Valdate key is absent.
+  SLOW_ASSERT((FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
+  // Check whether the dictionary should be extended.
+  Object* obj = EnsureCapacity(1, key);
+  if (obj->IsFailure()) return obj;
+  return Dictionary<Shape, Key>::cast(obj)->
+      AddEntry(key, value, details, Shape::Hash(key));
+}
+
+
+// Add a key, value pair to the dictionary.
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::AddEntry(Key key,
+                                         Object* value,
+                                         PropertyDetails details,
+                                         uint32_t hash) {
+  // Compute the key object.
+  Object* k = Shape::AsObject(key);
+  if (k->IsFailure()) return k;
+
+  uint32_t entry = Dictionary<Shape, Key>::FindInsertionEntry(hash);
+  // Insert element at empty or deleted entry
+  if (!details.IsDeleted() && details.index() == 0 && Shape::kIsEnumerable) {
+    // Assign an enumeration index to the property and update
+    // SetNextEnumerationIndex.
+    int index = NextEnumerationIndex();
+    details = PropertyDetails(details.attributes(), details.type(), index);
+    SetNextEnumerationIndex(index + 1);
+  }
+  SetEntry(entry, k, value, details);
+  ASSERT((Dictionary<Shape, Key>::KeyAt(entry)->IsNumber()
+          || Dictionary<Shape, Key>::KeyAt(entry)->IsString()));
+  HashTable<Shape, Key>::ElementAdded();
+  return this;
+}
+
+
+void NumberDictionary::UpdateMaxNumberKey(uint32_t key) {
+  // If the dictionary requires slow elements an element has already
+  // been added at a high index.
+  if (requires_slow_elements()) return;
+  // Check if this index is high enough that we should require slow
+  // elements.
+  if (key > kRequiresSlowElementsLimit) {
+    set_requires_slow_elements();
+    return;
+  }
+  // Update max key value.
+  Object* max_index_object = get(kMaxNumberKeyIndex);
+  if (!max_index_object->IsSmi() || max_number_key() < key) {
+    FixedArray::set(kMaxNumberKeyIndex,
+                    Smi::FromInt(key << kRequiresSlowElementsTagSize),
+                    SKIP_WRITE_BARRIER);
+  }
+}
+
+
+Object* NumberDictionary::AddNumberEntry(uint32_t key,
+                                         Object* value,
+                                         PropertyDetails details) {
+  UpdateMaxNumberKey(key);
+  SLOW_ASSERT(FindEntry(key) == kNotFound);
+  return Add(key, value, details);
+}
+
+
+Object* NumberDictionary::AtNumberPut(uint32_t key, Object* value) {
+  UpdateMaxNumberKey(key);
+  return AtPut(key, value);
+}
+
+
+Object* NumberDictionary::Set(uint32_t key,
+                              Object* value,
+                              PropertyDetails details) {
+  int entry = FindEntry(key);
+  if (entry == kNotFound) return AddNumberEntry(key, value, details);
+  // Preserve enumeration index.
+  details = PropertyDetails(details.attributes(),
+                            details.type(),
+                            DetailsAt(entry).index());
+  SetEntry(entry, NumberDictionaryShape::AsObject(key), value, details);
+  return this;
+}
+
+
+
+template<typename Shape, typename Key>
+int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
+    PropertyAttributes filter) {
+  int capacity = HashTable<Shape, Key>::Capacity();
+  int result = 0;
+  for (int i = 0; i < capacity; i++) {
+    Object* k = HashTable<Shape, Key>::KeyAt(i);
+    if (HashTable<Shape, Key>::IsKey(k)) {
+      PropertyDetails details = DetailsAt(i);
+      if (details.IsDeleted()) continue;
+      PropertyAttributes attr = details.attributes();
+      if ((attr & filter) == 0) result++;
+    }
+  }
+  return result;
+}
+
+
+template<typename Shape, typename Key>
+int Dictionary<Shape, Key>::NumberOfEnumElements() {
+  return NumberOfElementsFilterAttributes(
+      static_cast<PropertyAttributes>(DONT_ENUM));
+}
+
+
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage,
+                                        PropertyAttributes filter) {
+  ASSERT(storage->length() >= NumberOfEnumElements());
+  int capacity = HashTable<Shape, Key>::Capacity();
+  int index = 0;
+  for (int i = 0; i < capacity; i++) {
+     Object* k = HashTable<Shape, Key>::KeyAt(i);
+     if (HashTable<Shape, Key>::IsKey(k)) {
+       PropertyDetails details = DetailsAt(i);
+       if (details.IsDeleted()) continue;
+       PropertyAttributes attr = details.attributes();
+       if ((attr & filter) == 0) storage->set(index++, k);
+     }
+  }
+  storage->SortPairs(storage, index);
+  ASSERT(storage->length() >= index);
+}
+
+
+void StringDictionary::CopyEnumKeysTo(FixedArray* storage,
+                                      FixedArray* sort_array) {
+  ASSERT(storage->length() >= NumberOfEnumElements());
+  int capacity = Capacity();
+  int index = 0;
+  for (int i = 0; i < capacity; i++) {
+     Object* k = KeyAt(i);
+     if (IsKey(k)) {
+       PropertyDetails details = DetailsAt(i);
+       if (details.IsDeleted() || details.IsDontEnum()) continue;
+       storage->set(index, k);
+       sort_array->set(index,
+                       Smi::FromInt(details.index()),
+                       SKIP_WRITE_BARRIER);
+       index++;
+     }
+  }
+  storage->SortPairs(sort_array, sort_array->length());
+  ASSERT(storage->length() >= index);
+}
+
+
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage) {
+  ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
+      static_cast<PropertyAttributes>(NONE)));
+  int capacity = HashTable<Shape, Key>::Capacity();
+  int index = 0;
+  for (int i = 0; i < capacity; i++) {
+    Object* k = HashTable<Shape, Key>::KeyAt(i);
+    if (HashTable<Shape, Key>::IsKey(k)) {
+      PropertyDetails details = DetailsAt(i);
+      if (details.IsDeleted()) continue;
+      storage->set(index++, k);
+    }
+  }
+  ASSERT(storage->length() >= index);
+}
+
+
+// Backwards lookup (slow).
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
+  int capacity = HashTable<Shape, Key>::Capacity();
+  for (int i = 0; i < capacity; i++) {
+    Object* k =  HashTable<Shape, Key>::KeyAt(i);
+    if (Dictionary<Shape, Key>::IsKey(k)) {
+      Object* e = ValueAt(i);
+      if (e->IsJSGlobalPropertyCell()) {
+        e = JSGlobalPropertyCell::cast(e)->value();
+      }
+      if (e == value) return k;
+    }
+  }
+  return Heap::undefined_value();
+}
+
+
+Object* StringDictionary::TransformPropertiesToFastFor(
+    JSObject* obj, int unused_property_fields) {
+  // Make sure we preserve dictionary representation if there are too many
+  // descriptors.
+  if (NumberOfElements() > DescriptorArray::kMaxNumberOfDescriptors) return obj;
+
+  // Figure out if it is necessary to generate new enumeration indices.
+  int max_enumeration_index =
+      NextEnumerationIndex() +
+          (DescriptorArray::kMaxNumberOfDescriptors -
+           NumberOfElements());
+  if (!PropertyDetails::IsValidIndex(max_enumeration_index)) {
+    Object* result = GenerateNewEnumerationIndices();
+    if (result->IsFailure()) return result;
+  }
+
+  int instance_descriptor_length = 0;
+  int number_of_fields = 0;
+
+  // Compute the length of the instance descriptor.
+  int capacity = Capacity();
+  for (int i = 0; i < capacity; i++) {
+    Object* k = KeyAt(i);
+    if (IsKey(k)) {
+      Object* value = ValueAt(i);
+      PropertyType type = DetailsAt(i).type();
+      ASSERT(type != FIELD);
+      instance_descriptor_length++;
+      if (type == NORMAL && !value->IsJSFunction()) number_of_fields += 1;
+    }
+  }
+
+  // Allocate the instance descriptor.
+  Object* descriptors_unchecked =
+      DescriptorArray::Allocate(instance_descriptor_length);
+  if (descriptors_unchecked->IsFailure()) return descriptors_unchecked;
+  DescriptorArray* descriptors = DescriptorArray::cast(descriptors_unchecked);
+
+  int inobject_props = obj->map()->inobject_properties();
+  int number_of_allocated_fields =
+      number_of_fields + unused_property_fields - inobject_props;
+
+  // Allocate the fixed array for the fields.
+  Object* fields = Heap::AllocateFixedArray(number_of_allocated_fields);
+  if (fields->IsFailure()) return fields;
+
+  // Fill in the instance descriptor and the fields.
+  int next_descriptor = 0;
+  int current_offset = 0;
+  for (int i = 0; i < capacity; i++) {
+    Object* k = KeyAt(i);
+    if (IsKey(k)) {
+      Object* value = ValueAt(i);
+      // Ensure the key is a symbol before writing into the instance descriptor.
+      Object* key = Heap::LookupSymbol(String::cast(k));
+      if (key->IsFailure()) return key;
+      PropertyDetails details = DetailsAt(i);
+      PropertyType type = details.type();
+
+      if (value->IsJSFunction()) {
+        ConstantFunctionDescriptor d(String::cast(key),
+                                     JSFunction::cast(value),
+                                     details.attributes(),
+                                     details.index());
+        descriptors->Set(next_descriptor++, &d);
+      } else if (type == NORMAL) {
+        if (current_offset < inobject_props) {
+          obj->InObjectPropertyAtPut(current_offset,
+                                     value,
+                                     UPDATE_WRITE_BARRIER);
+        } else {
+          int offset = current_offset - inobject_props;
+          FixedArray::cast(fields)->set(offset, value);
+        }
+        FieldDescriptor d(String::cast(key),
+                          current_offset++,
+                          details.attributes(),
+                          details.index());
+        descriptors->Set(next_descriptor++, &d);
+      } else if (type == CALLBACKS) {
+        CallbacksDescriptor d(String::cast(key),
+                              value,
+                              details.attributes(),
+                              details.index());
+        descriptors->Set(next_descriptor++, &d);
+      } else {
+        UNREACHABLE();
+      }
+    }
+  }
+  ASSERT(current_offset == number_of_fields);
+
+  descriptors->Sort();
+  // Allocate new map.
+  Object* new_map = obj->map()->CopyDropDescriptors();
+  if (new_map->IsFailure()) return new_map;
+
+  // Transform the object.
+  obj->set_map(Map::cast(new_map));
+  obj->map()->set_instance_descriptors(descriptors);
+  obj->map()->set_unused_property_fields(unused_property_fields);
+
+  obj->set_properties(FixedArray::cast(fields));
+  ASSERT(obj->IsJSObject());
+
+  descriptors->SetNextEnumerationIndex(NextEnumerationIndex());
+  // Check that it really works.
+  ASSERT(obj->HasFastProperties());
+
+  return obj;
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// Check if there is a break point at this code position.
+bool DebugInfo::HasBreakPoint(int code_position) {
+  // Get the break point info object for this code position.
+  Object* break_point_info = GetBreakPointInfo(code_position);
+
+  // If there is no break point info object or no break points in the break
+  // point info object there is no break point at this code position.
+  if (break_point_info->IsUndefined()) return false;
+  return BreakPointInfo::cast(break_point_info)->GetBreakPointCount() > 0;
+}
+
+
+// Get the break point info object for this code position.
+Object* DebugInfo::GetBreakPointInfo(int code_position) {
+  // Find the index of the break point info object for this code position.
+  int index = GetBreakPointInfoIndex(code_position);
+
+  // Return the break point info object if any.
+  if (index == kNoBreakPointInfo) return Heap::undefined_value();
+  return BreakPointInfo::cast(break_points()->get(index));
+}
+
+
+// Clear a break point at the specified code position.
+void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
+                                int code_position,
+                                Handle<Object> break_point_object) {
+  Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
+  if (break_point_info->IsUndefined()) return;
+  BreakPointInfo::ClearBreakPoint(
+      Handle<BreakPointInfo>::cast(break_point_info),
+      break_point_object);
+}
+
+
+void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
+                              int code_position,
+                              int source_position,
+                              int statement_position,
+                              Handle<Object> break_point_object) {
+  Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
+  if (!break_point_info->IsUndefined()) {
+    BreakPointInfo::SetBreakPoint(
+        Handle<BreakPointInfo>::cast(break_point_info),
+        break_point_object);
+    return;
+  }
+
+  // Adding a new break point for a code position which did not have any
+  // break points before. Try to find a free slot.
+  int index = kNoBreakPointInfo;
+  for (int i = 0; i < debug_info->break_points()->length(); i++) {
+    if (debug_info->break_points()->get(i)->IsUndefined()) {
+      index = i;
+      break;
+    }
+  }
+  if (index == kNoBreakPointInfo) {
+    // No free slot - extend break point info array.
+    Handle<FixedArray> old_break_points =
+        Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
+    debug_info->set_break_points(*Factory::NewFixedArray(
+        old_break_points->length() +
+            Debug::kEstimatedNofBreakPointsInFunction));
+    Handle<FixedArray> new_break_points =
+        Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
+    for (int i = 0; i < old_break_points->length(); i++) {
+      new_break_points->set(i, old_break_points->get(i));
+    }
+    index = old_break_points->length();
+  }
+  ASSERT(index != kNoBreakPointInfo);
+
+  // Allocate new BreakPointInfo object and set the break point.
+  Handle<BreakPointInfo> new_break_point_info =
+      Handle<BreakPointInfo>::cast(Factory::NewStruct(BREAK_POINT_INFO_TYPE));
+  new_break_point_info->set_code_position(Smi::FromInt(code_position));
+  new_break_point_info->set_source_position(Smi::FromInt(source_position));
+  new_break_point_info->
+      set_statement_position(Smi::FromInt(statement_position));
+  new_break_point_info->set_break_point_objects(Heap::undefined_value());
+  BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
+  debug_info->break_points()->set(index, *new_break_point_info);
+}
+
+
+// Get the break point objects for a code position.
+Object* DebugInfo::GetBreakPointObjects(int code_position) {
+  Object* break_point_info = GetBreakPointInfo(code_position);
+  if (break_point_info->IsUndefined()) {
+    return Heap::undefined_value();
+  }
+  return BreakPointInfo::cast(break_point_info)->break_point_objects();
+}
+
+
+// Get the total number of break points.
+int DebugInfo::GetBreakPointCount() {
+  if (break_points()->IsUndefined()) return 0;
+  int count = 0;
+  for (int i = 0; i < break_points()->length(); i++) {
+    if (!break_points()->get(i)->IsUndefined()) {
+      BreakPointInfo* break_point_info =
+          BreakPointInfo::cast(break_points()->get(i));
+      count += break_point_info->GetBreakPointCount();
+    }
+  }
+  return count;
+}
+
+
+Object* DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
+                                      Handle<Object> break_point_object) {
+  if (debug_info->break_points()->IsUndefined()) return Heap::undefined_value();
+  for (int i = 0; i < debug_info->break_points()->length(); i++) {
+    if (!debug_info->break_points()->get(i)->IsUndefined()) {
+      Handle<BreakPointInfo> break_point_info =
+          Handle<BreakPointInfo>(BreakPointInfo::cast(
+              debug_info->break_points()->get(i)));
+      if (BreakPointInfo::HasBreakPointObject(break_point_info,
+                                              break_point_object)) {
+        return *break_point_info;
+      }
+    }
+  }
+  return Heap::undefined_value();
+}
+
+
+// Find the index of the break point info object for the specified code
+// position.
+int DebugInfo::GetBreakPointInfoIndex(int code_position) {
+  if (break_points()->IsUndefined()) return kNoBreakPointInfo;
+  for (int i = 0; i < break_points()->length(); i++) {
+    if (!break_points()->get(i)->IsUndefined()) {
+      BreakPointInfo* break_point_info =
+          BreakPointInfo::cast(break_points()->get(i));
+      if (break_point_info->code_position()->value() == code_position) {
+        return i;
+      }
+    }
+  }
+  return kNoBreakPointInfo;
+}
+
+
+// Remove the specified break point object.
+void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
+                                     Handle<Object> break_point_object) {
+  // If there are no break points just ignore.
+  if (break_point_info->break_point_objects()->IsUndefined()) return;
+  // If there is a single break point clear it if it is the same.
+  if (!break_point_info->break_point_objects()->IsFixedArray()) {
+    if (break_point_info->break_point_objects() == *break_point_object) {
+      break_point_info->set_break_point_objects(Heap::undefined_value());
+    }
+    return;
+  }
+  // If there are multiple break points shrink the array
+  ASSERT(break_point_info->break_point_objects()->IsFixedArray());
+  Handle<FixedArray> old_array =
+      Handle<FixedArray>(
+          FixedArray::cast(break_point_info->break_point_objects()));
+  Handle<FixedArray> new_array =
+      Factory::NewFixedArray(old_array->length() - 1);
+  int found_count = 0;
+  for (int i = 0; i < old_array->length(); i++) {
+    if (old_array->get(i) == *break_point_object) {
+      ASSERT(found_count == 0);
+      found_count++;
+    } else {
+      new_array->set(i - found_count, old_array->get(i));
+    }
+  }
+  // If the break point was found in the list change it.
+  if (found_count > 0) break_point_info->set_break_point_objects(*new_array);
+}
+
+
+// Add the specified break point object.
+void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
+                                   Handle<Object> break_point_object) {
+  // If there was no break point objects before just set it.
+  if (break_point_info->break_point_objects()->IsUndefined()) {
+    break_point_info->set_break_point_objects(*break_point_object);
+    return;
+  }
+  // If the break point object is the same as before just ignore.
+  if (break_point_info->break_point_objects() == *break_point_object) return;
+  // If there was one break point object before replace with array.
+  if (!break_point_info->break_point_objects()->IsFixedArray()) {
+    Handle<FixedArray> array = Factory::NewFixedArray(2);
+    array->set(0, break_point_info->break_point_objects());
+    array->set(1, *break_point_object);
+    break_point_info->set_break_point_objects(*array);
+    return;
+  }
+  // If there was more than one break point before extend array.
+  Handle<FixedArray> old_array =
+      Handle<FixedArray>(
+          FixedArray::cast(break_point_info->break_point_objects()));
+  Handle<FixedArray> new_array =
+      Factory::NewFixedArray(old_array->length() + 1);
+  for (int i = 0; i < old_array->length(); i++) {
+    // If the break point was there before just ignore.
+    if (old_array->get(i) == *break_point_object) return;
+    new_array->set(i, old_array->get(i));
+  }
+  // Add the new break point.
+  new_array->set(old_array->length(), *break_point_object);
+  break_point_info->set_break_point_objects(*new_array);
+}
+
+
+bool BreakPointInfo::HasBreakPointObject(
+    Handle<BreakPointInfo> break_point_info,
+    Handle<Object> break_point_object) {
+  // No break point.
+  if (break_point_info->break_point_objects()->IsUndefined()) return false;
+  // Single beak point.
+  if (!break_point_info->break_point_objects()->IsFixedArray()) {
+    return break_point_info->break_point_objects() == *break_point_object;
+  }
+  // Multiple break points.
+  FixedArray* array = FixedArray::cast(break_point_info->break_point_objects());
+  for (int i = 0; i < array->length(); i++) {
+    if (array->get(i) == *break_point_object) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+// Get the number of break points.
+int BreakPointInfo::GetBreakPointCount() {
+  // No break point.
+  if (break_point_objects()->IsUndefined()) return 0;
+  // Single beak point.
+  if (!break_point_objects()->IsFixedArray()) return 1;
+  // Multiple break points.
+  return FixedArray::cast(break_point_objects())->length();
+}
+#endif
+
+
+} }  // namespace v8::internal
diff --git a/src/objects.h b/src/objects.h
new file mode 100644
index 0000000..e9430f5
--- /dev/null
+++ b/src/objects.h
@@ -0,0 +1,4889 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OBJECTS_H_
+#define V8_OBJECTS_H_
+
+#include "builtins.h"
+#include "code-stubs.h"
+#include "smart-pointer.h"
+#include "unicode-inl.h"
+
+//
+// All object types in the V8 JavaScript are described in this file.
+//
+// Inheritance hierarchy:
+//   - Object
+//     - Smi          (immediate small integer)
+//     - Failure      (immediate for marking failed operation)
+//     - HeapObject   (superclass for everything allocated in the heap)
+//       - JSObject
+//         - JSArray
+//         - JSRegExp
+//         - JSFunction
+//         - GlobalObject
+//           - JSGlobalObject
+//           - JSBuiltinsObject
+//         - JSGlobalProxy
+//         - JSValue
+//       - Array
+//         - ByteArray
+//         - PixelArray
+//         - FixedArray
+//           - DescriptorArray
+//           - HashTable
+//             - Dictionary
+//             - SymbolTable
+//             - CompilationCacheTable
+//             - MapCache
+//           - Context
+//           - GlobalContext
+//       - String
+//         - SeqString
+//           - SeqAsciiString
+//           - SeqTwoByteString
+//         - ConsString
+//         - SlicedString
+//         - ExternalString
+//           - ExternalAsciiString
+//           - ExternalTwoByteString
+//       - HeapNumber
+//       - Code
+//       - Map
+//       - Oddball
+//       - Proxy
+//       - SharedFunctionInfo
+//       - Struct
+//         - AccessorInfo
+//         - AccessCheckInfo
+//         - InterceptorInfo
+//         - CallHandlerInfo
+//         - TemplateInfo
+//           - FunctionTemplateInfo
+//           - ObjectTemplateInfo
+//         - Script
+//         - SignatureInfo
+//         - TypeSwitchInfo
+//         - DebugInfo
+//         - BreakPointInfo
+//
+// Formats of Object*:
+//  Smi:        [31 bit signed int] 0
+//  HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
+//  Failure:    [30 bit signed int] 11
+
+// Ecma-262 3rd 8.6.1
+enum PropertyAttributes {
+  NONE              = v8::None,
+  READ_ONLY         = v8::ReadOnly,
+  DONT_ENUM         = v8::DontEnum,
+  DONT_DELETE       = v8::DontDelete,
+  ABSENT            = 16  // Used in runtime to indicate a property is absent.
+  // ABSENT can never be stored in or returned from a descriptor's attributes
+  // bitfield.  It is only used as a return value meaning the attributes of
+  // a non-existent property.
+};
+
+namespace v8 {
+namespace internal {
+
+
+// PropertyDetails captures type and attributes for a property.
+// They are used both in property dictionaries and instance descriptors.
+class PropertyDetails BASE_EMBEDDED {
+ public:
+
+  PropertyDetails(PropertyAttributes attributes,
+                  PropertyType type,
+                  int index = 0) {
+    ASSERT(TypeField::is_valid(type));
+    ASSERT(AttributesField::is_valid(attributes));
+    ASSERT(IndexField::is_valid(index));
+
+    value_ = TypeField::encode(type)
+        | AttributesField::encode(attributes)
+        | IndexField::encode(index);
+
+    ASSERT(type == this->type());
+    ASSERT(attributes == this->attributes());
+    ASSERT(index == this->index());
+  }
+
+  // Conversion for storing details as Object*.
+  inline PropertyDetails(Smi* smi);
+  inline Smi* AsSmi();
+
+  PropertyType type() { return TypeField::decode(value_); }
+
+  bool IsTransition() {
+    PropertyType t = type();
+    ASSERT(t != INTERCEPTOR);
+    return t == MAP_TRANSITION || t == CONSTANT_TRANSITION;
+  }
+
+  bool IsProperty() {
+    return type() < FIRST_PHANTOM_PROPERTY_TYPE;
+  }
+
+  PropertyAttributes attributes() { return AttributesField::decode(value_); }
+
+  int index() { return IndexField::decode(value_); }
+
+  inline PropertyDetails AsDeleted();
+
+  static bool IsValidIndex(int index) { return IndexField::is_valid(index); }
+
+  bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
+  bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
+  bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
+  bool IsDeleted() { return DeletedField::decode(value_) != 0;}
+
+  // Bit fields in value_ (type, shift, size). Must be public so the
+  // constants can be embedded in generated code.
+  class TypeField:       public BitField<PropertyType,       0, 3> {};
+  class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
+  class DeletedField:    public BitField<uint32_t,           6, 1> {};
+  class IndexField:      public BitField<uint32_t,           7, 31-7> {};
+
+  static const int kInitialIndex = 1;
+ private:
+  uint32_t value_;
+};
+
+
+// Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
+enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
+
+
+// PropertyNormalizationMode is used to specify whether to keep
+// inobject properties when normalizing properties of a JSObject.
+enum PropertyNormalizationMode {
+  CLEAR_INOBJECT_PROPERTIES,
+  KEEP_INOBJECT_PROPERTIES
+};
+
+
+// All Maps have a field instance_type containing a InstanceType.
+// It describes the type of the instances.
+//
+// As an example, a JavaScript object is a heap object and its map
+// instance_type is JS_OBJECT_TYPE.
+//
+// The names of the string instance types are intended to systematically
+// mirror their encoding in the instance_type field of the map.  The length
+// (SHORT, MEDIUM, or LONG) is always mentioned.  The default encoding is
+// considered TWO_BYTE.  It is not mentioned in the name.  ASCII encoding is
+// mentioned explicitly in the name.  Likewise, the default representation is
+// considered sequential.  It is not mentioned in the name.  The other
+// representations (eg, CONS, SLICED, EXTERNAL) are explicitly mentioned.
+// Finally, the string is either a SYMBOL_TYPE (if it is a symbol) or a
+// STRING_TYPE (if it is not a symbol).
+//
+// NOTE: The following things are some that depend on the string types having
+// instance_types that are less than those of all other types:
+// HeapObject::Size, HeapObject::IterateBody, the typeof operator, and
+// Object::IsString.
+//
+// NOTE: Everything following JS_VALUE_TYPE is considered a
+// JSObject for GC purposes. The first four entries here have typeof
+// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
+#define INSTANCE_TYPE_LIST_ALL(V)               \
+  V(SHORT_SYMBOL_TYPE)                          \
+  V(MEDIUM_SYMBOL_TYPE)                         \
+  V(LONG_SYMBOL_TYPE)                           \
+  V(SHORT_ASCII_SYMBOL_TYPE)                    \
+  V(MEDIUM_ASCII_SYMBOL_TYPE)                   \
+  V(LONG_ASCII_SYMBOL_TYPE)                     \
+  V(SHORT_CONS_SYMBOL_TYPE)                     \
+  V(MEDIUM_CONS_SYMBOL_TYPE)                    \
+  V(LONG_CONS_SYMBOL_TYPE)                      \
+  V(SHORT_CONS_ASCII_SYMBOL_TYPE)               \
+  V(MEDIUM_CONS_ASCII_SYMBOL_TYPE)              \
+  V(LONG_CONS_ASCII_SYMBOL_TYPE)                \
+  V(SHORT_SLICED_SYMBOL_TYPE)                   \
+  V(MEDIUM_SLICED_SYMBOL_TYPE)                  \
+  V(LONG_SLICED_SYMBOL_TYPE)                    \
+  V(SHORT_SLICED_ASCII_SYMBOL_TYPE)             \
+  V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE)            \
+  V(LONG_SLICED_ASCII_SYMBOL_TYPE)              \
+  V(SHORT_EXTERNAL_SYMBOL_TYPE)                 \
+  V(MEDIUM_EXTERNAL_SYMBOL_TYPE)                \
+  V(LONG_EXTERNAL_SYMBOL_TYPE)                  \
+  V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE)           \
+  V(MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE)          \
+  V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE)            \
+  V(SHORT_STRING_TYPE)                          \
+  V(MEDIUM_STRING_TYPE)                         \
+  V(LONG_STRING_TYPE)                           \
+  V(SHORT_ASCII_STRING_TYPE)                    \
+  V(MEDIUM_ASCII_STRING_TYPE)                   \
+  V(LONG_ASCII_STRING_TYPE)                     \
+  V(SHORT_CONS_STRING_TYPE)                     \
+  V(MEDIUM_CONS_STRING_TYPE)                    \
+  V(LONG_CONS_STRING_TYPE)                      \
+  V(SHORT_CONS_ASCII_STRING_TYPE)               \
+  V(MEDIUM_CONS_ASCII_STRING_TYPE)              \
+  V(LONG_CONS_ASCII_STRING_TYPE)                \
+  V(SHORT_SLICED_STRING_TYPE)                   \
+  V(MEDIUM_SLICED_STRING_TYPE)                  \
+  V(LONG_SLICED_STRING_TYPE)                    \
+  V(SHORT_SLICED_ASCII_STRING_TYPE)             \
+  V(MEDIUM_SLICED_ASCII_STRING_TYPE)            \
+  V(LONG_SLICED_ASCII_STRING_TYPE)              \
+  V(SHORT_EXTERNAL_STRING_TYPE)                 \
+  V(MEDIUM_EXTERNAL_STRING_TYPE)                \
+  V(LONG_EXTERNAL_STRING_TYPE)                  \
+  V(SHORT_EXTERNAL_ASCII_STRING_TYPE)           \
+  V(MEDIUM_EXTERNAL_ASCII_STRING_TYPE)          \
+  V(LONG_EXTERNAL_ASCII_STRING_TYPE)            \
+  V(LONG_PRIVATE_EXTERNAL_ASCII_STRING_TYPE)    \
+                                                \
+  V(MAP_TYPE)                                   \
+  V(HEAP_NUMBER_TYPE)                           \
+  V(FIXED_ARRAY_TYPE)                           \
+  V(CODE_TYPE)                                  \
+  V(JS_GLOBAL_PROPERTY_CELL_TYPE)               \
+  V(ODDBALL_TYPE)                               \
+  V(PROXY_TYPE)                                 \
+  V(BYTE_ARRAY_TYPE)                            \
+  V(PIXEL_ARRAY_TYPE)                           \
+  V(FILLER_TYPE)                                \
+                                                \
+  V(ACCESSOR_INFO_TYPE)                         \
+  V(ACCESS_CHECK_INFO_TYPE)                     \
+  V(INTERCEPTOR_INFO_TYPE)                      \
+  V(SHARED_FUNCTION_INFO_TYPE)                  \
+  V(CALL_HANDLER_INFO_TYPE)                     \
+  V(FUNCTION_TEMPLATE_INFO_TYPE)                \
+  V(OBJECT_TEMPLATE_INFO_TYPE)                  \
+  V(SIGNATURE_INFO_TYPE)                        \
+  V(TYPE_SWITCH_INFO_TYPE)                      \
+  V(SCRIPT_TYPE)                                \
+                                                \
+  V(JS_VALUE_TYPE)                              \
+  V(JS_OBJECT_TYPE)                             \
+  V(JS_CONTEXT_EXTENSION_OBJECT_TYPE)           \
+  V(JS_GLOBAL_OBJECT_TYPE)                      \
+  V(JS_BUILTINS_OBJECT_TYPE)                    \
+  V(JS_GLOBAL_PROXY_TYPE)                       \
+  V(JS_ARRAY_TYPE)                              \
+  V(JS_REGEXP_TYPE)                             \
+                                                \
+  V(JS_FUNCTION_TYPE)                           \
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+#define INSTANCE_TYPE_LIST_DEBUGGER(V)          \
+  V(DEBUG_INFO_TYPE)                            \
+  V(BREAK_POINT_INFO_TYPE)
+#else
+#define INSTANCE_TYPE_LIST_DEBUGGER(V)
+#endif
+
+#define INSTANCE_TYPE_LIST(V)                   \
+  INSTANCE_TYPE_LIST_ALL(V)                     \
+  INSTANCE_TYPE_LIST_DEBUGGER(V)
+
+
+// Since string types are not consecutive, this macro is used to
+// iterate over them.
+#define STRING_TYPE_LIST(V)                                                    \
+  V(SHORT_SYMBOL_TYPE,                                                         \
+    SeqTwoByteString::kAlignedSize,                                            \
+    short_symbol,                                                              \
+    ShortSymbol)                                                               \
+  V(MEDIUM_SYMBOL_TYPE,                                                        \
+    SeqTwoByteString::kAlignedSize,                                            \
+    medium_symbol,                                                             \
+    MediumSymbol)                                                              \
+  V(LONG_SYMBOL_TYPE,                                                          \
+    SeqTwoByteString::kAlignedSize,                                            \
+    long_symbol,                                                               \
+    LongSymbol)                                                                \
+  V(SHORT_ASCII_SYMBOL_TYPE,                                                   \
+    SeqAsciiString::kAlignedSize,                                              \
+    short_ascii_symbol,                                                        \
+    ShortAsciiSymbol)                                                          \
+  V(MEDIUM_ASCII_SYMBOL_TYPE,                                                  \
+    SeqAsciiString::kAlignedSize,                                              \
+    medium_ascii_symbol,                                                       \
+    MediumAsciiSymbol)                                                         \
+  V(LONG_ASCII_SYMBOL_TYPE,                                                    \
+    SeqAsciiString::kAlignedSize,                                              \
+    long_ascii_symbol,                                                         \
+    LongAsciiSymbol)                                                           \
+  V(SHORT_CONS_SYMBOL_TYPE,                                                    \
+    ConsString::kSize,                                                         \
+    short_cons_symbol,                                                         \
+    ShortConsSymbol)                                                           \
+  V(MEDIUM_CONS_SYMBOL_TYPE,                                                   \
+    ConsString::kSize,                                                         \
+    medium_cons_symbol,                                                        \
+    MediumConsSymbol)                                                          \
+  V(LONG_CONS_SYMBOL_TYPE,                                                     \
+    ConsString::kSize,                                                         \
+    long_cons_symbol,                                                          \
+    LongConsSymbol)                                                            \
+  V(SHORT_CONS_ASCII_SYMBOL_TYPE,                                              \
+    ConsString::kSize,                                                         \
+    short_cons_ascii_symbol,                                                   \
+    ShortConsAsciiSymbol)                                                      \
+  V(MEDIUM_CONS_ASCII_SYMBOL_TYPE,                                             \
+    ConsString::kSize,                                                         \
+    medium_cons_ascii_symbol,                                                  \
+    MediumConsAsciiSymbol)                                                     \
+  V(LONG_CONS_ASCII_SYMBOL_TYPE,                                               \
+    ConsString::kSize,                                                         \
+    long_cons_ascii_symbol,                                                    \
+    LongConsAsciiSymbol)                                                       \
+  V(SHORT_SLICED_SYMBOL_TYPE,                                                  \
+    SlicedString::kSize,                                                       \
+    short_sliced_symbol,                                                       \
+    ShortSlicedSymbol)                                                         \
+  V(MEDIUM_SLICED_SYMBOL_TYPE,                                                 \
+    SlicedString::kSize,                                                       \
+    medium_sliced_symbol,                                                      \
+    MediumSlicedSymbol)                                                        \
+  V(LONG_SLICED_SYMBOL_TYPE,                                                   \
+    SlicedString::kSize,                                                       \
+    long_sliced_symbol,                                                        \
+    LongSlicedSymbol)                                                          \
+  V(SHORT_SLICED_ASCII_SYMBOL_TYPE,                                            \
+    SlicedString::kSize,                                                       \
+    short_sliced_ascii_symbol,                                                 \
+    ShortSlicedAsciiSymbol)                                                    \
+  V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE,                                           \
+    SlicedString::kSize,                                                       \
+    medium_sliced_ascii_symbol,                                                \
+    MediumSlicedAsciiSymbol)                                                   \
+  V(LONG_SLICED_ASCII_SYMBOL_TYPE,                                             \
+    SlicedString::kSize,                                                       \
+    long_sliced_ascii_symbol,                                                  \
+    LongSlicedAsciiSymbol)                                                     \
+  V(SHORT_EXTERNAL_SYMBOL_TYPE,                                                \
+    ExternalTwoByteString::kSize,                                              \
+    short_external_symbol,                                                     \
+    ShortExternalSymbol)                                                       \
+  V(MEDIUM_EXTERNAL_SYMBOL_TYPE,                                               \
+    ExternalTwoByteString::kSize,                                              \
+    medium_external_symbol,                                                    \
+    MediumExternalSymbol)                                                      \
+  V(LONG_EXTERNAL_SYMBOL_TYPE,                                                 \
+    ExternalTwoByteString::kSize,                                              \
+    long_external_symbol,                                                      \
+    LongExternalSymbol)                                                        \
+  V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE,                                          \
+    ExternalAsciiString::kSize,                                                \
+    short_external_ascii_symbol,                                               \
+    ShortExternalAsciiSymbol)                                                  \
+  V(MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE,                                         \
+    ExternalAsciiString::kSize,                                                \
+    medium_external_ascii_symbol,                                              \
+    MediumExternalAsciiSymbol)                                                 \
+  V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE,                                           \
+    ExternalAsciiString::kSize,                                                \
+    long_external_ascii_symbol,                                                \
+    LongExternalAsciiSymbol)                                                   \
+  V(SHORT_STRING_TYPE,                                                         \
+    SeqTwoByteString::kAlignedSize,                                            \
+    short_string,                                                              \
+    ShortString)                                                               \
+  V(MEDIUM_STRING_TYPE,                                                        \
+    SeqTwoByteString::kAlignedSize,                                            \
+    medium_string,                                                             \
+    MediumString)                                                              \
+  V(LONG_STRING_TYPE,                                                          \
+    SeqTwoByteString::kAlignedSize,                                            \
+    long_string,                                                               \
+    LongString)                                                                \
+  V(SHORT_ASCII_STRING_TYPE,                                                   \
+    SeqAsciiString::kAlignedSize,                                              \
+    short_ascii_string,                                                        \
+    ShortAsciiString)                                                          \
+  V(MEDIUM_ASCII_STRING_TYPE,                                                  \
+    SeqAsciiString::kAlignedSize,                                              \
+    medium_ascii_string,                                                       \
+    MediumAsciiString)                                                         \
+  V(LONG_ASCII_STRING_TYPE,                                                    \
+    SeqAsciiString::kAlignedSize,                                              \
+    long_ascii_string,                                                         \
+    LongAsciiString)                                                           \
+  V(SHORT_CONS_STRING_TYPE,                                                    \
+    ConsString::kSize,                                                         \
+    short_cons_string,                                                         \
+    ShortConsString)                                                           \
+  V(MEDIUM_CONS_STRING_TYPE,                                                   \
+    ConsString::kSize,                                                         \
+    medium_cons_string,                                                        \
+    MediumConsString)                                                          \
+  V(LONG_CONS_STRING_TYPE,                                                     \
+    ConsString::kSize,                                                         \
+    long_cons_string,                                                          \
+    LongConsString)                                                            \
+  V(SHORT_CONS_ASCII_STRING_TYPE,                                              \
+    ConsString::kSize,                                                         \
+    short_cons_ascii_string,                                                   \
+    ShortConsAsciiString)                                                      \
+  V(MEDIUM_CONS_ASCII_STRING_TYPE,                                             \
+    ConsString::kSize,                                                         \
+    medium_cons_ascii_string,                                                  \
+    MediumConsAsciiString)                                                     \
+  V(LONG_CONS_ASCII_STRING_TYPE,                                               \
+    ConsString::kSize,                                                         \
+    long_cons_ascii_string,                                                    \
+    LongConsAsciiString)                                                       \
+  V(SHORT_SLICED_STRING_TYPE,                                                  \
+    SlicedString::kSize,                                                       \
+    short_sliced_string,                                                       \
+    ShortSlicedString)                                                         \
+  V(MEDIUM_SLICED_STRING_TYPE,                                                 \
+    SlicedString::kSize,                                                       \
+    medium_sliced_string,                                                      \
+    MediumSlicedString)                                                        \
+  V(LONG_SLICED_STRING_TYPE,                                                   \
+    SlicedString::kSize,                                                       \
+    long_sliced_string,                                                        \
+    LongSlicedString)                                                          \
+  V(SHORT_SLICED_ASCII_STRING_TYPE,                                            \
+    SlicedString::kSize,                                                       \
+    short_sliced_ascii_string,                                                 \
+    ShortSlicedAsciiString)                                                    \
+  V(MEDIUM_SLICED_ASCII_STRING_TYPE,                                           \
+    SlicedString::kSize,                                                       \
+    medium_sliced_ascii_string,                                                \
+    MediumSlicedAsciiString)                                                   \
+  V(LONG_SLICED_ASCII_STRING_TYPE,                                             \
+    SlicedString::kSize,                                                       \
+    long_sliced_ascii_string,                                                  \
+    LongSlicedAsciiString)                                                     \
+  V(SHORT_EXTERNAL_STRING_TYPE,                                                \
+    ExternalTwoByteString::kSize,                                              \
+    short_external_string,                                                     \
+    ShortExternalString)                                                       \
+  V(MEDIUM_EXTERNAL_STRING_TYPE,                                               \
+    ExternalTwoByteString::kSize,                                              \
+    medium_external_string,                                                    \
+    MediumExternalString)                                                      \
+  V(LONG_EXTERNAL_STRING_TYPE,                                                 \
+    ExternalTwoByteString::kSize,                                              \
+    long_external_string,                                                      \
+    LongExternalString)                                                        \
+  V(SHORT_EXTERNAL_ASCII_STRING_TYPE,                                          \
+    ExternalAsciiString::kSize,                                                \
+    short_external_ascii_string,                                               \
+    ShortExternalAsciiString)                                                  \
+  V(MEDIUM_EXTERNAL_ASCII_STRING_TYPE,                                         \
+    ExternalAsciiString::kSize,                                                \
+    medium_external_ascii_string,                                              \
+    MediumExternalAsciiString)                                                 \
+  V(LONG_EXTERNAL_ASCII_STRING_TYPE,                                           \
+    ExternalAsciiString::kSize,                                                \
+    long_external_ascii_string,                                                \
+    LongExternalAsciiString)
+
+// A struct is a simple object a set of object-valued fields.  Including an
+// object type in this causes the compiler to generate most of the boilerplate
+// code for the class including allocation and garbage collection routines,
+// casts and predicates.  All you need to define is the class, methods and
+// object verification routines.  Easy, no?
+//
+// Note that for subtle reasons related to the ordering or numerical values of
+// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
+// manually.
+#define STRUCT_LIST_ALL(V)                                                \
+  V(ACCESSOR_INFO, AccessorInfo, accessor_info)                           \
+  V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info)                \
+  V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info)                  \
+  V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info)                \
+  V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
+  V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info)       \
+  V(SIGNATURE_INFO, SignatureInfo, signature_info)                        \
+  V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info)                   \
+  V(SCRIPT, Script, script)
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+#define STRUCT_LIST_DEBUGGER(V)                                           \
+  V(DEBUG_INFO, DebugInfo, debug_info)                                    \
+  V(BREAK_POINT_INFO, BreakPointInfo, break_point_info)
+#else
+#define STRUCT_LIST_DEBUGGER(V)
+#endif
+
+#define STRUCT_LIST(V)                                                    \
+  STRUCT_LIST_ALL(V)                                                      \
+  STRUCT_LIST_DEBUGGER(V)
+
+// We use the full 8 bits of the instance_type field to encode heap object
+// instance types.  The high-order bit (bit 7) is set if the object is not a
+// string, and cleared if it is a string.
+const uint32_t kIsNotStringMask = 0x80;
+const uint32_t kStringTag = 0x0;
+const uint32_t kNotStringTag = 0x80;
+
+// If bit 7 is clear, bit 5 indicates that the string is a symbol (if set) or
+// not (if cleared).
+const uint32_t kIsSymbolMask = 0x20;
+const uint32_t kNotSymbolTag = 0x0;
+const uint32_t kSymbolTag = 0x20;
+
+// If bit 7 is clear, bits 3 and 4 are the string's size (short, medium or
+// long).  These values are very special in that they are also used to shift
+// the length field to get the length, removing the hash value.  This avoids
+// using if or switch when getting the length of a string.
+const uint32_t kStringSizeMask = 0x18;
+const uint32_t kShortStringTag = 0x18;
+const uint32_t kMediumStringTag = 0x10;
+const uint32_t kLongStringTag = 0x00;
+
+// If bit 7 is clear then bit 2 indicates whether the string consists of
+// two-byte characters or one-byte characters.
+const uint32_t kStringEncodingMask = 0x4;
+const uint32_t kTwoByteStringTag = 0x0;
+const uint32_t kAsciiStringTag = 0x4;
+
+// If bit 7 is clear, the low-order 2 bits indicate the representation
+// of the string.
+const uint32_t kStringRepresentationMask = 0x03;
+enum StringRepresentationTag {
+  kSeqStringTag = 0x0,
+  kConsStringTag = 0x1,
+  kSlicedStringTag = 0x2,
+  kExternalStringTag = 0x3
+};
+
+
+// A ConsString with an empty string as the right side is a candidate
+// for being shortcut by the garbage collector unless it is a
+// symbol. It's not common to have non-flat symbols, so we do not
+// shortcut them thereby avoiding turning symbols into strings. See
+// heap.cc and mark-compact.cc.
+const uint32_t kShortcutTypeMask =
+    kIsNotStringMask |
+    kIsSymbolMask |
+    kStringRepresentationMask;
+const uint32_t kShortcutTypeTag = kConsStringTag;
+
+
+enum InstanceType {
+  SHORT_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kSeqStringTag,
+  MEDIUM_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kSeqStringTag,
+  LONG_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kSeqStringTag,
+  SHORT_ASCII_SYMBOL_TYPE =
+      kShortStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
+  MEDIUM_ASCII_SYMBOL_TYPE =
+      kMediumStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
+  LONG_ASCII_SYMBOL_TYPE =
+      kLongStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
+  SHORT_CONS_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kConsStringTag,
+  MEDIUM_CONS_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kConsStringTag,
+  LONG_CONS_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kConsStringTag,
+  SHORT_CONS_ASCII_SYMBOL_TYPE =
+      kShortStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
+  MEDIUM_CONS_ASCII_SYMBOL_TYPE =
+      kMediumStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
+  LONG_CONS_ASCII_SYMBOL_TYPE =
+      kLongStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
+  SHORT_SLICED_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kSlicedStringTag,
+  MEDIUM_SLICED_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kSlicedStringTag,
+  LONG_SLICED_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kSlicedStringTag,
+  SHORT_SLICED_ASCII_SYMBOL_TYPE =
+      kShortStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
+  MEDIUM_SLICED_ASCII_SYMBOL_TYPE =
+      kMediumStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
+  LONG_SLICED_ASCII_SYMBOL_TYPE =
+      kLongStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
+  SHORT_EXTERNAL_SYMBOL_TYPE =
+      kShortStringTag | kSymbolTag | kExternalStringTag,
+  MEDIUM_EXTERNAL_SYMBOL_TYPE =
+      kMediumStringTag | kSymbolTag | kExternalStringTag,
+  LONG_EXTERNAL_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kExternalStringTag,
+  SHORT_EXTERNAL_ASCII_SYMBOL_TYPE =
+      kShortStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
+  MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE =
+      kMediumStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
+  LONG_EXTERNAL_ASCII_SYMBOL_TYPE =
+      kLongStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
+  SHORT_STRING_TYPE = kShortStringTag | kSeqStringTag,
+  MEDIUM_STRING_TYPE = kMediumStringTag | kSeqStringTag,
+  LONG_STRING_TYPE = kLongStringTag | kSeqStringTag,
+  SHORT_ASCII_STRING_TYPE = kShortStringTag | kAsciiStringTag | kSeqStringTag,
+  MEDIUM_ASCII_STRING_TYPE = kMediumStringTag | kAsciiStringTag | kSeqStringTag,
+  LONG_ASCII_STRING_TYPE = kLongStringTag | kAsciiStringTag | kSeqStringTag,
+  SHORT_CONS_STRING_TYPE = kShortStringTag | kConsStringTag,
+  MEDIUM_CONS_STRING_TYPE = kMediumStringTag | kConsStringTag,
+  LONG_CONS_STRING_TYPE = kLongStringTag | kConsStringTag,
+  SHORT_CONS_ASCII_STRING_TYPE =
+      kShortStringTag | kAsciiStringTag | kConsStringTag,
+  MEDIUM_CONS_ASCII_STRING_TYPE =
+      kMediumStringTag | kAsciiStringTag | kConsStringTag,
+  LONG_CONS_ASCII_STRING_TYPE =
+      kLongStringTag | kAsciiStringTag | kConsStringTag,
+  SHORT_SLICED_STRING_TYPE = kShortStringTag | kSlicedStringTag,
+  MEDIUM_SLICED_STRING_TYPE = kMediumStringTag | kSlicedStringTag,
+  LONG_SLICED_STRING_TYPE = kLongStringTag | kSlicedStringTag,
+  SHORT_SLICED_ASCII_STRING_TYPE =
+      kShortStringTag | kAsciiStringTag | kSlicedStringTag,
+  MEDIUM_SLICED_ASCII_STRING_TYPE =
+      kMediumStringTag | kAsciiStringTag | kSlicedStringTag,
+  LONG_SLICED_ASCII_STRING_TYPE =
+      kLongStringTag | kAsciiStringTag | kSlicedStringTag,
+  SHORT_EXTERNAL_STRING_TYPE = kShortStringTag | kExternalStringTag,
+  MEDIUM_EXTERNAL_STRING_TYPE = kMediumStringTag | kExternalStringTag,
+  LONG_EXTERNAL_STRING_TYPE = kLongStringTag | kExternalStringTag,
+  SHORT_EXTERNAL_ASCII_STRING_TYPE =
+      kShortStringTag | kAsciiStringTag | kExternalStringTag,
+  MEDIUM_EXTERNAL_ASCII_STRING_TYPE =
+      kMediumStringTag | kAsciiStringTag | kExternalStringTag,
+  LONG_EXTERNAL_ASCII_STRING_TYPE =
+      kLongStringTag | kAsciiStringTag | kExternalStringTag,
+  LONG_PRIVATE_EXTERNAL_ASCII_STRING_TYPE = LONG_EXTERNAL_ASCII_STRING_TYPE,
+
+  MAP_TYPE = kNotStringTag,
+  HEAP_NUMBER_TYPE,
+  FIXED_ARRAY_TYPE,
+  CODE_TYPE,
+  ODDBALL_TYPE,
+  JS_GLOBAL_PROPERTY_CELL_TYPE,
+  PROXY_TYPE,
+  BYTE_ARRAY_TYPE,
+  PIXEL_ARRAY_TYPE,
+  FILLER_TYPE,
+  SMI_TYPE,
+
+  ACCESSOR_INFO_TYPE,
+  ACCESS_CHECK_INFO_TYPE,
+  INTERCEPTOR_INFO_TYPE,
+  SHARED_FUNCTION_INFO_TYPE,
+  CALL_HANDLER_INFO_TYPE,
+  FUNCTION_TEMPLATE_INFO_TYPE,
+  OBJECT_TEMPLATE_INFO_TYPE,
+  SIGNATURE_INFO_TYPE,
+  TYPE_SWITCH_INFO_TYPE,
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  DEBUG_INFO_TYPE,
+  BREAK_POINT_INFO_TYPE,
+#endif
+  SCRIPT_TYPE,
+
+  JS_VALUE_TYPE,
+  JS_OBJECT_TYPE,
+  JS_CONTEXT_EXTENSION_OBJECT_TYPE,
+  JS_GLOBAL_OBJECT_TYPE,
+  JS_BUILTINS_OBJECT_TYPE,
+  JS_GLOBAL_PROXY_TYPE,
+  JS_ARRAY_TYPE,
+  JS_REGEXP_TYPE,
+
+  JS_FUNCTION_TYPE,
+
+  // Pseudo-types
+  FIRST_NONSTRING_TYPE = MAP_TYPE,
+  FIRST_TYPE = 0x0,
+  INVALID_TYPE = FIRST_TYPE - 1,
+  LAST_TYPE = JS_FUNCTION_TYPE,
+  // Boundaries for testing the type is a JavaScript "object".  Note that
+  // function objects are not counted as objects, even though they are
+  // implemented as such; only values whose typeof is "object" are included.
+  FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
+  LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE
+};
+
+
+enum CompareResult {
+  LESS      = -1,
+  EQUAL     =  0,
+  GREATER   =  1,
+
+  NOT_EQUAL = GREATER
+};
+
+
+#define DECL_BOOLEAN_ACCESSORS(name)   \
+  inline bool name();                  \
+  inline void set_##name(bool value);  \
+
+
+#define DECL_ACCESSORS(name, type)                                      \
+  inline type* name();                                                  \
+  inline void set_##name(type* value,                                   \
+                         WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
+
+
+class StringStream;
+class ObjectVisitor;
+
+struct ValueInfo : public Malloced {
+  ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
+  InstanceType type;
+  Object* ptr;
+  const char* str;
+  double number;
+};
+
+
+// A template-ized version of the IsXXX functions.
+template <class C> static inline bool Is(Object* obj);
+
+
+// Object is the abstract superclass for all classes in the
+// object hierarchy.
+// Object does not use any virtual functions to avoid the
+// allocation of the C++ vtable.
+// Since Smi and Failure are subclasses of Object no
+// data members can be present in Object.
+class Object BASE_EMBEDDED {
+ public:
+  // Type testing.
+  inline bool IsSmi();
+  inline bool IsHeapObject();
+  inline bool IsHeapNumber();
+  inline bool IsString();
+  inline bool IsSymbol();
+#ifdef DEBUG
+  // See objects-inl.h for more details
+  inline bool IsSeqString();
+  inline bool IsSlicedString();
+  inline bool IsExternalString();
+  inline bool IsExternalTwoByteString();
+  inline bool IsExternalAsciiString();
+  inline bool IsSeqTwoByteString();
+  inline bool IsSeqAsciiString();
+#endif  // DEBUG
+  inline bool IsConsString();
+
+  inline bool IsNumber();
+  inline bool IsByteArray();
+  inline bool IsPixelArray();
+  inline bool IsFailure();
+  inline bool IsRetryAfterGC();
+  inline bool IsOutOfMemoryFailure();
+  inline bool IsException();
+  inline bool IsJSObject();
+  inline bool IsJSContextExtensionObject();
+  inline bool IsMap();
+  inline bool IsFixedArray();
+  inline bool IsDescriptorArray();
+  inline bool IsContext();
+  inline bool IsCatchContext();
+  inline bool IsGlobalContext();
+  inline bool IsJSFunction();
+  inline bool IsCode();
+  inline bool IsOddball();
+  inline bool IsSharedFunctionInfo();
+  inline bool IsJSValue();
+  inline bool IsStringWrapper();
+  inline bool IsProxy();
+  inline bool IsBoolean();
+  inline bool IsJSArray();
+  inline bool IsJSRegExp();
+  inline bool IsHashTable();
+  inline bool IsDictionary();
+  inline bool IsSymbolTable();
+  inline bool IsCompilationCacheTable();
+  inline bool IsMapCache();
+  inline bool IsPrimitive();
+  inline bool IsGlobalObject();
+  inline bool IsJSGlobalObject();
+  inline bool IsJSBuiltinsObject();
+  inline bool IsJSGlobalProxy();
+  inline bool IsUndetectableObject();
+  inline bool IsAccessCheckNeeded();
+  inline bool IsJSGlobalPropertyCell();
+
+  // Returns true if this object is an instance of the specified
+  // function template.
+  inline bool IsInstanceOf(FunctionTemplateInfo* type);
+
+  inline bool IsStruct();
+#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) inline bool Is##Name();
+  STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
+#undef DECLARE_STRUCT_PREDICATE
+
+  // Oddball testing.
+  INLINE(bool IsUndefined());
+  INLINE(bool IsTheHole());
+  INLINE(bool IsNull());
+  INLINE(bool IsTrue());
+  INLINE(bool IsFalse());
+
+  // Extract the number.
+  inline double Number();
+
+  inline bool HasSpecificClassOf(String* name);
+
+  Object* ToObject();             // ECMA-262 9.9.
+  Object* ToBoolean();            // ECMA-262 9.2.
+
+  // Convert to a JSObject if needed.
+  // global_context is used when creating wrapper object.
+  Object* ToObject(Context* global_context);
+
+  // Converts this to a Smi if possible.
+  // Failure is returned otherwise.
+  inline Object* ToSmi();
+
+  void Lookup(String* name, LookupResult* result);
+
+  // Property access.
+  inline Object* GetProperty(String* key);
+  inline Object* GetProperty(String* key, PropertyAttributes* attributes);
+  Object* GetPropertyWithReceiver(Object* receiver,
+                                  String* key,
+                                  PropertyAttributes* attributes);
+  Object* GetProperty(Object* receiver,
+                      LookupResult* result,
+                      String* key,
+                      PropertyAttributes* attributes);
+  Object* GetPropertyWithCallback(Object* receiver,
+                                  Object* structure,
+                                  String* name,
+                                  Object* holder);
+  Object* GetPropertyWithDefinedGetter(Object* receiver,
+                                       JSFunction* getter);
+
+  inline Object* GetElement(uint32_t index);
+  Object* GetElementWithReceiver(Object* receiver, uint32_t index);
+
+  // Return the object's prototype (might be Heap::null_value()).
+  Object* GetPrototype();
+
+  // Returns true if this is a JSValue containing a string and the index is
+  // < the length of the string.  Used to implement [] on strings.
+  inline bool IsStringObjectWithCharacterAt(uint32_t index);
+
+#ifdef DEBUG
+  // Prints this object with details.
+  void Print();
+  void PrintLn();
+  // Verifies the object.
+  void Verify();
+
+  // Verify a pointer is a valid object pointer.
+  static void VerifyPointer(Object* p);
+#endif
+
+  // Prints this object without details.
+  void ShortPrint();
+
+  // Prints this object without details to a message accumulator.
+  void ShortPrint(StringStream* accumulator);
+
+  // Casting: This cast is only needed to satisfy macros in objects-inl.h.
+  static Object* cast(Object* value) { return value; }
+
+  // Layout description.
+  static const int kHeaderSize = 0;  // Object does not take up any space.
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
+};
+
+
+// Smi represents integer Numbers that can be stored in 31 bits.
+// Smis are immediate which means they are NOT allocated in the heap.
+// Smi stands for small integer.
+// The this pointer has the following format: [31 bit signed int] 0
+// On 64-bit, the top 32 bits of the pointer is allowed to have any
+// value.
+class Smi: public Object {
+ public:
+  // Returns the integer value.
+  inline int value();
+
+  // Convert a value to a Smi object.
+  static inline Smi* FromInt(int value);
+
+  static inline Smi* FromIntptr(intptr_t value);
+
+  // Returns whether value can be represented in a Smi.
+  static inline bool IsValid(intptr_t value);
+
+  static inline bool IsIntptrValid(intptr_t);
+
+  // Casting.
+  static inline Smi* cast(Object* object);
+
+  // Dispatched behavior.
+  void SmiPrint();
+  void SmiPrint(StringStream* accumulator);
+#ifdef DEBUG
+  void SmiVerify();
+#endif
+
+  static const int kSmiNumBits = 31;
+  // Min and max limits for Smi values.
+  static const int kMinValue = -(1 << (kSmiNumBits - 1));
+  static const int kMaxValue = (1 << (kSmiNumBits - 1)) - 1;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Smi);
+};
+
+
+// Failure is used for reporting out of memory situations and
+// propagating exceptions through the runtime system.  Failure objects
+// are transient and cannot occur as part of the object graph.
+//
+// Failures are a single word, encoded as follows:
+// +-------------------------+---+--+--+
+// |rrrrrrrrrrrrrrrrrrrrrrrrr|sss|tt|11|
+// +-------------------------+---+--+--+
+//  3                       7 6 4 32 10
+//  1
+//
+// The low two bits, 0-1, are the failure tag, 11.  The next two bits,
+// 2-3, are a failure type tag 'tt' with possible values:
+//   00 RETRY_AFTER_GC
+//   01 EXCEPTION
+//   10 INTERNAL_ERROR
+//   11 OUT_OF_MEMORY_EXCEPTION
+//
+// The next three bits, 4-6, are an allocation space tag 'sss'.  The
+// allocation space tag is 000 for all failure types except
+// RETRY_AFTER_GC.  For RETRY_AFTER_GC, the possible values are the
+// allocation spaces (the encoding is found in globals.h).
+//
+// The remaining bits is the size of the allocation request in units
+// of the pointer size, and is zeroed except for RETRY_AFTER_GC
+// failures.  The 25 bits (on a 32 bit platform) gives a representable
+// range of 2^27 bytes (128MB).
+
+// Failure type tag info.
+const int kFailureTypeTagSize = 2;
+const int kFailureTypeTagMask = (1 << kFailureTypeTagSize) - 1;
+
+class Failure: public Object {
+ public:
+  // RuntimeStubs assumes EXCEPTION = 1 in the compiler-generated code.
+  enum Type {
+    RETRY_AFTER_GC = 0,
+    EXCEPTION = 1,       // Returning this marker tells the real exception
+                         // is in Top::pending_exception.
+    INTERNAL_ERROR = 2,
+    OUT_OF_MEMORY_EXCEPTION = 3
+  };
+
+  inline Type type() const;
+
+  // Returns the space that needs to be collected for RetryAfterGC failures.
+  inline AllocationSpace allocation_space() const;
+
+  // Returns the number of bytes requested (up to the representable maximum)
+  // for RetryAfterGC failures.
+  inline int requested() const;
+
+  inline bool IsInternalError() const;
+  inline bool IsOutOfMemoryException() const;
+
+  static Failure* RetryAfterGC(int requested_bytes, AllocationSpace space);
+  static inline Failure* RetryAfterGC(int requested_bytes);  // NEW_SPACE
+  static inline Failure* Exception();
+  static inline Failure* InternalError();
+  static inline Failure* OutOfMemoryException();
+  // Casting.
+  static inline Failure* cast(Object* object);
+
+  // Dispatched behavior.
+  void FailurePrint();
+  void FailurePrint(StringStream* accumulator);
+#ifdef DEBUG
+  void FailureVerify();
+#endif
+
+ private:
+  inline int value() const;
+  static inline Failure* Construct(Type type, int value = 0);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Failure);
+};
+
+
+// Heap objects typically have a map pointer in their first word.  However,
+// during GC other data (eg, mark bits, forwarding addresses) is sometimes
+// encoded in the first word.  The class MapWord is an abstraction of the
+// value in a heap object's first word.
+class MapWord BASE_EMBEDDED {
+ public:
+  // Normal state: the map word contains a map pointer.
+
+  // Create a map word from a map pointer.
+  static inline MapWord FromMap(Map* map);
+
+  // View this map word as a map pointer.
+  inline Map* ToMap();
+
+
+  // Scavenge collection: the map word of live objects in the from space
+  // contains a forwarding address (a heap object pointer in the to space).
+
+  // True if this map word is a forwarding address for a scavenge
+  // collection.  Only valid during a scavenge collection (specifically,
+  // when all map words are heap object pointers, ie. not during a full GC).
+  inline bool IsForwardingAddress();
+
+  // Create a map word from a forwarding address.
+  static inline MapWord FromForwardingAddress(HeapObject* object);
+
+  // View this map word as a forwarding address.
+  inline HeapObject* ToForwardingAddress();
+
+
+  // Marking phase of full collection: the map word of live objects is
+  // marked, and may be marked as overflowed (eg, the object is live, its
+  // children have not been visited, and it does not fit in the marking
+  // stack).
+
+  // True if this map word's mark bit is set.
+  inline bool IsMarked();
+
+  // Return this map word but with its mark bit set.
+  inline void SetMark();
+
+  // Return this map word but with its mark bit cleared.
+  inline void ClearMark();
+
+  // True if this map word's overflow bit is set.
+  inline bool IsOverflowed();
+
+  // Return this map word but with its overflow bit set.
+  inline void SetOverflow();
+
+  // Return this map word but with its overflow bit cleared.
+  inline void ClearOverflow();
+
+
+  // Compacting phase of a full compacting collection: the map word of live
+  // objects contains an encoding of the original map address along with the
+  // forwarding address (represented as an offset from the first live object
+  // in the same page as the (old) object address).
+
+  // Create a map word from a map address and a forwarding address offset.
+  static inline MapWord EncodeAddress(Address map_address, int offset);
+
+  // Return the map address encoded in this map word.
+  inline Address DecodeMapAddress(MapSpace* map_space);
+
+  // Return the forwarding offset encoded in this map word.
+  inline int DecodeOffset();
+
+
+  // During serialization: the map word is used to hold an encoded
+  // address, and possibly a mark bit (set and cleared with SetMark
+  // and ClearMark).
+
+  // Create a map word from an encoded address.
+  static inline MapWord FromEncodedAddress(Address address);
+
+  inline Address ToEncodedAddress();
+
+  // Bits used by the marking phase of the garbage collector.
+  //
+  // The first word of a heap object is normally a map pointer. The last two
+  // bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits to
+  // mark an object as live and/or overflowed:
+  //   last bit = 0, marked as alive
+  //   second bit = 1, overflowed
+  // An object is only marked as overflowed when it is marked as live while
+  // the marking stack is overflowed.
+  static const int kMarkingBit = 0;  // marking bit
+  static const int kMarkingMask = (1 << kMarkingBit);  // marking mask
+  static const int kOverflowBit = 1;  // overflow bit
+  static const int kOverflowMask = (1 << kOverflowBit);  // overflow mask
+
+  // Forwarding pointers and map pointer encoding
+  //  31             21 20              10 9               0
+  // +-----------------+------------------+-----------------+
+  // |forwarding offset|page offset of map|page index of map|
+  // +-----------------+------------------+-----------------+
+  //  11 bits           11 bits            10 bits
+  static const int kMapPageIndexBits = 10;
+  static const int kMapPageOffsetBits = 11;
+  static const int kForwardingOffsetBits = 11;
+
+  static const int kMapPageIndexShift = 0;
+  static const int kMapPageOffsetShift =
+      kMapPageIndexShift + kMapPageIndexBits;
+  static const int kForwardingOffsetShift =
+      kMapPageOffsetShift + kMapPageOffsetBits;
+
+  // 0x000003FF
+  static const uint32_t kMapPageIndexMask =
+      (1 << kMapPageOffsetShift) - 1;
+
+  // 0x001FFC00
+  static const uint32_t kMapPageOffsetMask =
+      ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
+
+  // 0xFFE00000
+  static const uint32_t kForwardingOffsetMask =
+      ~(kMapPageIndexMask | kMapPageOffsetMask);
+
+ private:
+  // HeapObject calls the private constructor and directly reads the value.
+  friend class HeapObject;
+
+  explicit MapWord(uintptr_t value) : value_(value) {}
+
+  uintptr_t value_;
+};
+
+
+// HeapObject is the superclass for all classes describing heap allocated
+// objects.
+class HeapObject: public Object {
+ public:
+  // [map]: Contains a map which contains the object's reflective
+  // information.
+  inline Map* map();
+  inline void set_map(Map* value);
+
+  // During garbage collection, the map word of a heap object does not
+  // necessarily contain a map pointer.
+  inline MapWord map_word();
+  inline void set_map_word(MapWord map_word);
+
+  // Converts an address to a HeapObject pointer.
+  static inline HeapObject* FromAddress(Address address);
+
+  // Returns the address of this HeapObject.
+  inline Address address();
+
+  // Iterates over pointers contained in the object (including the Map)
+  void Iterate(ObjectVisitor* v);
+
+  // Iterates over all pointers contained in the object except the
+  // first map pointer.  The object type is given in the first
+  // parameter. This function does not access the map pointer in the
+  // object, and so is safe to call while the map pointer is modified.
+  void IterateBody(InstanceType type, int object_size, ObjectVisitor* v);
+
+  // This method only applies to struct objects.  Iterates over all the fields
+  // of this struct.
+  void IterateStructBody(int object_size, ObjectVisitor* v);
+
+  // Returns the heap object's size in bytes
+  inline int Size();
+
+  // Given a heap object's map pointer, returns the heap size in bytes
+  // Useful when the map pointer field is used for other purposes.
+  // GC internal.
+  inline int SizeFromMap(Map* map);
+
+  // Support for the marking heap objects during the marking phase of GC.
+  // True if the object is marked live.
+  inline bool IsMarked();
+
+  // Mutate this object's map pointer to indicate that the object is live.
+  inline void SetMark();
+
+  // Mutate this object's map pointer to remove the indication that the
+  // object is live (ie, partially restore the map pointer).
+  inline void ClearMark();
+
+  // True if this object is marked as overflowed.  Overflowed objects have
+  // been reached and marked during marking of the heap, but their children
+  // have not necessarily been marked and they have not been pushed on the
+  // marking stack.
+  inline bool IsOverflowed();
+
+  // Mutate this object's map pointer to indicate that the object is
+  // overflowed.
+  inline void SetOverflow();
+
+  // Mutate this object's map pointer to remove the indication that the
+  // object is overflowed (ie, partially restore the map pointer).
+  inline void ClearOverflow();
+
+  // Returns the field at offset in obj, as a read/write Object* reference.
+  // Does no checking, and is safe to use during GC, while maps are invalid.
+  // Does not update remembered sets, so should only be assigned to
+  // during marking GC.
+  static inline Object** RawField(HeapObject* obj, int offset);
+
+  // Casting.
+  static inline HeapObject* cast(Object* obj);
+
+  // Return the write barrier mode for this.
+  inline WriteBarrierMode GetWriteBarrierMode();
+
+  // Dispatched behavior.
+  void HeapObjectShortPrint(StringStream* accumulator);
+#ifdef DEBUG
+  void HeapObjectPrint();
+  void HeapObjectVerify();
+  inline void VerifyObjectField(int offset);
+
+  void PrintHeader(const char* id);
+
+  // Verify a pointer is a valid HeapObject pointer that points to object
+  // areas in the heap.
+  static void VerifyHeapPointer(Object* p);
+#endif
+
+  // Layout description.
+  // First field in a heap object is map.
+  static const int kMapOffset = Object::kHeaderSize;
+  static const int kHeaderSize = kMapOffset + kPointerSize;
+
+  STATIC_CHECK(kMapOffset == Internals::kHeapObjectMapOffset);
+
+ protected:
+  // helpers for calling an ObjectVisitor to iterate over pointers in the
+  // half-open range [start, end) specified as integer offsets
+  inline void IteratePointers(ObjectVisitor* v, int start, int end);
+  // as above, for the single element at "offset"
+  inline void IteratePointer(ObjectVisitor* v, int offset);
+
+  // Computes the object size from the map.
+  // Should only be used from SizeFromMap.
+  int SlowSizeFromMap(Map* map);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
+};
+
+
+// The HeapNumber class describes heap allocated numbers that cannot be
+// represented in a Smi (small integer)
+class HeapNumber: public HeapObject {
+ public:
+  // [value]: number value.
+  inline double value();
+  inline void set_value(double value);
+
+  // Casting.
+  static inline HeapNumber* cast(Object* obj);
+
+  // Dispatched behavior.
+  Object* HeapNumberToBoolean();
+  void HeapNumberPrint();
+  void HeapNumberPrint(StringStream* accumulator);
+#ifdef DEBUG
+  void HeapNumberVerify();
+#endif
+
+  // Layout description.
+  static const int kValueOffset = HeapObject::kHeaderSize;
+  // IEEE doubles are two 32 bit words.  The first is just mantissa, the second
+  // is a mixture of sign, exponent and mantissa.  Our current platforms are all
+  // little endian apart from non-EABI arm which is little endian with big
+  // endian floating point word ordering!
+#if !defined(V8_HOST_ARCH_ARM) || __ARM_EABI__
+  static const int kMantissaOffset = kValueOffset;
+  static const int kExponentOffset = kValueOffset + 4;
+#else
+  static const int kMantissaOffset = kValueOffset + 4;
+  static const int kExponentOffset = kValueOffset;
+# define BIG_ENDIAN_FLOATING_POINT 1
+#endif
+  static const int kSize = kValueOffset + kDoubleSize;
+
+  static const uint32_t kSignMask = 0x80000000u;
+  static const uint32_t kExponentMask = 0x7ff00000u;
+  static const uint32_t kMantissaMask = 0xfffffu;
+  static const int kExponentBias = 1023;
+  static const int kExponentShift = 20;
+  static const int kMantissaBitsInTopWord = 20;
+  static const int kNonMantissaBitsInTopWord = 12;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber);
+};
+
+
+// The JSObject describes real heap allocated JavaScript objects with
+// properties.
+// Note that the map of JSObject changes during execution to enable inline
+// caching.
+class JSObject: public HeapObject {
+ public:
+  enum DeleteMode { NORMAL_DELETION, FORCE_DELETION };
+  enum ElementsKind {
+    FAST_ELEMENTS,
+    DICTIONARY_ELEMENTS,
+    PIXEL_ELEMENTS
+  };
+
+  // [properties]: Backing storage for properties.
+  // properties is a FixedArray in the fast case, and a Dictionary in the
+  // slow case.
+  DECL_ACCESSORS(properties, FixedArray)  // Get and set fast properties.
+  inline void initialize_properties();
+  inline bool HasFastProperties();
+  inline StringDictionary* property_dictionary();  // Gets slow properties.
+
+  // [elements]: The elements (properties with names that are integers).
+  // elements is a FixedArray in the fast case, and a Dictionary in the slow
+  // case or a PixelArray in a special case.
+  DECL_ACCESSORS(elements, Array)  // Get and set fast elements.
+  inline void initialize_elements();
+  inline ElementsKind GetElementsKind();
+  inline bool HasFastElements();
+  inline bool HasDictionaryElements();
+  inline bool HasPixelElements();
+  inline NumberDictionary* element_dictionary();  // Gets slow elements.
+
+  // Collects elements starting at index 0.
+  // Undefined values are placed after non-undefined values.
+  // Returns the number of non-undefined values.
+  Object* PrepareElementsForSort(uint32_t limit);
+  // As PrepareElementsForSort, but only on objects where elements is
+  // a dictionary, and it will stay a dictionary.
+  Object* PrepareSlowElementsForSort(uint32_t limit);
+
+  Object* SetProperty(String* key,
+                      Object* value,
+                      PropertyAttributes attributes);
+  Object* SetProperty(LookupResult* result,
+                      String* key,
+                      Object* value,
+                      PropertyAttributes attributes);
+  Object* SetPropertyWithFailedAccessCheck(LookupResult* result,
+                                           String* name,
+                                           Object* value);
+  Object* SetPropertyWithCallback(Object* structure,
+                                  String* name,
+                                  Object* value,
+                                  JSObject* holder);
+  Object* SetPropertyWithDefinedSetter(JSFunction* setter,
+                                       Object* value);
+  Object* SetPropertyWithInterceptor(String* name,
+                                     Object* value,
+                                     PropertyAttributes attributes);
+  Object* SetPropertyPostInterceptor(String* name,
+                                     Object* value,
+                                     PropertyAttributes attributes);
+  Object* IgnoreAttributesAndSetLocalProperty(String* key,
+                                              Object* value,
+                                              PropertyAttributes attributes);
+
+  // Retrieve a value in a normalized object given a lookup result.
+  // Handles the special representation of JS global objects.
+  Object* GetNormalizedProperty(LookupResult* result);
+
+  // Sets the property value in a normalized object given a lookup result.
+  // Handles the special representation of JS global objects.
+  Object* SetNormalizedProperty(LookupResult* result, Object* value);
+
+  // Sets the property value in a normalized object given (key, value, details).
+  // Handles the special representation of JS global objects.
+  Object* SetNormalizedProperty(String* name,
+                                Object* value,
+                                PropertyDetails details);
+
+  // Deletes the named property in a normalized object.
+  Object* DeleteNormalizedProperty(String* name, DeleteMode mode);
+
+  // Sets a property that currently has lazy loading.
+  Object* SetLazyProperty(LookupResult* result,
+                          String* name,
+                          Object* value,
+                          PropertyAttributes attributes);
+
+  // Returns the class name ([[Class]] property in the specification).
+  String* class_name();
+
+  // Returns the constructor name (the name (possibly, inferred name) of the
+  // function that was used to instantiate the object).
+  String* constructor_name();
+
+  // Retrieve interceptors.
+  InterceptorInfo* GetNamedInterceptor();
+  InterceptorInfo* GetIndexedInterceptor();
+
+  inline PropertyAttributes GetPropertyAttribute(String* name);
+  PropertyAttributes GetPropertyAttributeWithReceiver(JSObject* receiver,
+                                                      String* name);
+  PropertyAttributes GetLocalPropertyAttribute(String* name);
+
+  Object* DefineAccessor(String* name, bool is_getter, JSFunction* fun,
+                         PropertyAttributes attributes);
+  Object* LookupAccessor(String* name, bool is_getter);
+
+  // Used from Object::GetProperty().
+  Object* GetPropertyWithFailedAccessCheck(Object* receiver,
+                                           LookupResult* result,
+                                           String* name,
+                                           PropertyAttributes* attributes);
+  Object* GetPropertyWithInterceptor(JSObject* receiver,
+                                     String* name,
+                                     PropertyAttributes* attributes);
+  Object* GetPropertyPostInterceptor(JSObject* receiver,
+                                     String* name,
+                                     PropertyAttributes* attributes);
+  Object* GetLazyProperty(Object* receiver,
+                          LookupResult* result,
+                          String* name,
+                          PropertyAttributes* attributes);
+
+  // Tells whether this object needs to be loaded.
+  inline bool IsLoaded();
+
+  // Returns true if this is an instance of an api function and has
+  // been modified since it was created.  May give false positives.
+  bool IsDirty();
+
+  bool HasProperty(String* name) {
+    return GetPropertyAttribute(name) != ABSENT;
+  }
+
+  // Can cause a GC if it hits an interceptor.
+  bool HasLocalProperty(String* name) {
+    return GetLocalPropertyAttribute(name) != ABSENT;
+  }
+
+  Object* DeleteProperty(String* name, DeleteMode mode);
+  Object* DeleteElement(uint32_t index, DeleteMode mode);
+  Object* DeleteLazyProperty(LookupResult* result,
+                             String* name,
+                             DeleteMode mode);
+
+  // Tests for the fast common case for property enumeration.
+  bool IsSimpleEnum();
+
+  // Do we want to keep the elements in fast case when increasing the
+  // capacity?
+  bool ShouldConvertToSlowElements(int new_capacity);
+  // Returns true if the backing storage for the slow-case elements of
+  // this object takes up nearly as much space as a fast-case backing
+  // storage would.  In that case the JSObject should have fast
+  // elements.
+  bool ShouldConvertToFastElements();
+
+  // Return the object's prototype (might be Heap::null_value()).
+  inline Object* GetPrototype();
+
+  // Tells whether the index'th element is present.
+  inline bool HasElement(uint32_t index);
+  bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
+  bool HasLocalElement(uint32_t index);
+
+  bool HasElementWithInterceptor(JSObject* receiver, uint32_t index);
+  bool HasElementPostInterceptor(JSObject* receiver, uint32_t index);
+
+  Object* SetFastElement(uint32_t index, Object* value);
+
+  // Set the index'th array element.
+  // A Failure object is returned if GC is needed.
+  Object* SetElement(uint32_t index, Object* value);
+
+  // Returns the index'th element.
+  // The undefined object if index is out of bounds.
+  Object* GetElementWithReceiver(JSObject* receiver, uint32_t index);
+
+  void SetFastElements(FixedArray* elements);
+  Object* SetSlowElements(Object* length);
+
+  // Lookup interceptors are used for handling properties controlled by host
+  // objects.
+  inline bool HasNamedInterceptor();
+  inline bool HasIndexedInterceptor();
+
+  // Support functions for v8 api (needed for correct interceptor behavior).
+  bool HasRealNamedProperty(String* key);
+  bool HasRealElementProperty(uint32_t index);
+  bool HasRealNamedCallbackProperty(String* key);
+
+  // Initializes the array to a certain length
+  Object* SetElementsLength(Object* length);
+
+  // Get the header size for a JSObject.  Used to compute the index of
+  // internal fields as well as the number of internal fields.
+  inline int GetHeaderSize();
+
+  inline int GetInternalFieldCount();
+  inline Object* GetInternalField(int index);
+  inline void SetInternalField(int index, Object* value);
+
+  // Lookup a property.  If found, the result is valid and has
+  // detailed information.
+  void LocalLookup(String* name, LookupResult* result);
+  void Lookup(String* name, LookupResult* result);
+
+  // The following lookup functions skip interceptors.
+  void LocalLookupRealNamedProperty(String* name, LookupResult* result);
+  void LookupRealNamedProperty(String* name, LookupResult* result);
+  void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
+  void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
+  Object* LookupCallbackSetterInPrototypes(uint32_t index);
+  void LookupCallback(String* name, LookupResult* result);
+
+  // Returns the number of properties on this object filtering out properties
+  // with the specified attributes (ignoring interceptors).
+  int NumberOfLocalProperties(PropertyAttributes filter);
+  // Returns the number of enumerable properties (ignoring interceptors).
+  int NumberOfEnumProperties();
+  // Fill in details for properties into storage starting at the specified
+  // index.
+  void GetLocalPropertyNames(FixedArray* storage, int index);
+
+  // Returns the number of properties on this object filtering out properties
+  // with the specified attributes (ignoring interceptors).
+  int NumberOfLocalElements(PropertyAttributes filter);
+  // Returns the number of enumerable elements (ignoring interceptors).
+  int NumberOfEnumElements();
+  // Returns the number of elements on this object filtering out elements
+  // with the specified attributes (ignoring interceptors).
+  int GetLocalElementKeys(FixedArray* storage, PropertyAttributes filter);
+  // Count and fill in the enumerable elements into storage.
+  // (storage->length() == NumberOfEnumElements()).
+  // If storage is NULL, will count the elements without adding
+  // them to any storage.
+  // Returns the number of enumerable elements.
+  int GetEnumElementKeys(FixedArray* storage);
+
+  // Add a property to a fast-case object using a map transition to
+  // new_map.
+  Object* AddFastPropertyUsingMap(Map* new_map,
+                                  String* name,
+                                  Object* value);
+
+  // Add a constant function property to a fast-case object.
+  // This leaves a CONSTANT_TRANSITION in the old map, and
+  // if it is called on a second object with this map, a
+  // normal property is added instead, with a map transition.
+  // This avoids the creation of many maps with the same constant
+  // function, all orphaned.
+  Object* AddConstantFunctionProperty(String* name,
+                                      JSFunction* function,
+                                      PropertyAttributes attributes);
+
+  Object* ReplaceSlowProperty(String* name,
+                              Object* value,
+                              PropertyAttributes attributes);
+
+  // Converts a descriptor of any other type to a real field,
+  // backed by the properties array.  Descriptors of visible
+  // types, such as CONSTANT_FUNCTION, keep their enumeration order.
+  // Converts the descriptor on the original object's map to a
+  // map transition, and the the new field is on the object's new map.
+  Object* ConvertDescriptorToFieldAndMapTransition(
+      String* name,
+      Object* new_value,
+      PropertyAttributes attributes);
+
+  // Converts a descriptor of any other type to a real field,
+  // backed by the properties array.  Descriptors of visible
+  // types, such as CONSTANT_FUNCTION, keep their enumeration order.
+  Object* ConvertDescriptorToField(String* name,
+                                   Object* new_value,
+                                   PropertyAttributes attributes);
+
+  // Add a property to a fast-case object.
+  Object* AddFastProperty(String* name,
+                          Object* value,
+                          PropertyAttributes attributes);
+
+  // Add a property to a slow-case object.
+  Object* AddSlowProperty(String* name,
+                          Object* value,
+                          PropertyAttributes attributes);
+
+  // Add a property to an object.
+  Object* AddProperty(String* name,
+                      Object* value,
+                      PropertyAttributes attributes);
+
+  // Convert the object to use the canonical dictionary
+  // representation. If the object is expected to have additional properties
+  // added this number can be indicated to have the backing store allocated to
+  // an initial capacity for holding these properties.
+  Object* NormalizeProperties(PropertyNormalizationMode mode,
+                              int expected_additional_properties);
+  Object* NormalizeElements();
+
+  // Transform slow named properties to fast variants.
+  // Returns failure if allocation failed.
+  Object* TransformToFastProperties(int unused_property_fields);
+
+  // Access fast-case object properties at index.
+  inline Object* FastPropertyAt(int index);
+  inline Object* FastPropertyAtPut(int index, Object* value);
+
+  // Access to in object properties.
+  inline Object* InObjectPropertyAt(int index);
+  inline Object* InObjectPropertyAtPut(int index,
+                                       Object* value,
+                                       WriteBarrierMode mode
+                                       = UPDATE_WRITE_BARRIER);
+
+  // initializes the body after properties slot, properties slot is
+  // initialized by set_properties
+  // Note: this call does not update write barrier, it is caller's
+  // reponsibility to ensure that *v* can be collected without WB here.
+  inline void InitializeBody(int object_size);
+
+  // Check whether this object references another object
+  bool ReferencesObject(Object* obj);
+
+  // Casting.
+  static inline JSObject* cast(Object* obj);
+
+  // Dispatched behavior.
+  void JSObjectIterateBody(int object_size, ObjectVisitor* v);
+  void JSObjectShortPrint(StringStream* accumulator);
+#ifdef DEBUG
+  void JSObjectPrint();
+  void JSObjectVerify();
+  void PrintProperties();
+  void PrintElements();
+
+  // Structure for collecting spill information about JSObjects.
+  class SpillInformation {
+   public:
+    void Clear();
+    void Print();
+    int number_of_objects_;
+    int number_of_objects_with_fast_properties_;
+    int number_of_objects_with_fast_elements_;
+    int number_of_fast_used_fields_;
+    int number_of_fast_unused_fields_;
+    int number_of_slow_used_properties_;
+    int number_of_slow_unused_properties_;
+    int number_of_fast_used_elements_;
+    int number_of_fast_unused_elements_;
+    int number_of_slow_used_elements_;
+    int number_of_slow_unused_elements_;
+  };
+
+  void IncrementSpillStatistics(SpillInformation* info);
+#endif
+  Object* SlowReverseLookup(Object* value);
+
+  static const uint32_t kMaxGap = 1024;
+  static const int kMaxFastElementsLength = 5000;
+  static const int kInitialMaxFastElementArray = 100000;
+  static const int kMaxFastProperties = 8;
+  static const int kMaxInstanceSize = 255 * kPointerSize;
+  // When extending the backing storage for property values, we increase
+  // its size by more than the 1 entry necessary, so sequentially adding fields
+  // to the same object requires fewer allocations and copies.
+  static const int kFieldsAdded = 3;
+
+  // Layout description.
+  static const int kPropertiesOffset = HeapObject::kHeaderSize;
+  static const int kElementsOffset = kPropertiesOffset + kPointerSize;
+  static const int kHeaderSize = kElementsOffset + kPointerSize;
+
+  STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
+
+  Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
+
+ private:
+  Object* SetElementWithInterceptor(uint32_t index, Object* value);
+  Object* SetElementWithoutInterceptor(uint32_t index, Object* value);
+
+  Object* GetElementPostInterceptor(JSObject* receiver, uint32_t index);
+
+  Object* DeletePropertyPostInterceptor(String* name, DeleteMode mode);
+  Object* DeletePropertyWithInterceptor(String* name);
+
+  Object* DeleteElementPostInterceptor(uint32_t index, DeleteMode mode);
+  Object* DeleteElementWithInterceptor(uint32_t index);
+
+  PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
+                                                         String* name,
+                                                         bool continue_search);
+  PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
+                                                         String* name,
+                                                         bool continue_search);
+  PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
+      Object* receiver,
+      LookupResult* result,
+      String* name,
+      bool continue_search);
+  PropertyAttributes GetPropertyAttribute(JSObject* receiver,
+                                          LookupResult* result,
+                                          String* name,
+                                          bool continue_search);
+
+  // Returns true if most of the elements backing storage is used.
+  bool HasDenseElements();
+
+  Object* DefineGetterSetter(String* name, PropertyAttributes attributes);
+
+  void LookupInDescriptor(String* name, LookupResult* result);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
+};
+
+
+// Abstract super class arrays. It provides length behavior.
+class Array: public HeapObject {
+ public:
+  // [length]: length of the array.
+  inline int length();
+  inline void set_length(int value);
+
+  // Convert an object to an array index.
+  // Returns true if the conversion succeeded.
+  static inline bool IndexFromObject(Object* object, uint32_t* index);
+
+  // Layout descriptor.
+  static const int kLengthOffset = HeapObject::kHeaderSize;
+
+ protected:
+  // No code should use the Array class directly, only its subclasses.
+  // Use the kHeaderSize of the appropriate subclass, which may be aligned.
+  static const int kHeaderSize = kLengthOffset + kIntSize;
+  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
+};
+
+
+// FixedArray describes fixed sized arrays where element
+// type is Object*.
+
+class FixedArray: public Array {
+ public:
+
+  // Setter and getter for elements.
+  inline Object* get(int index);
+  // Setter that uses write barrier.
+  inline void set(int index, Object* value);
+
+  // Setter that doesn't need write barrier).
+  inline void set(int index, Smi* value);
+  // Setter with explicit barrier mode.
+  inline void set(int index, Object* value, WriteBarrierMode mode);
+
+  // Setters for frequently used oddballs located in old space.
+  inline void set_undefined(int index);
+  inline void set_null(int index);
+  inline void set_the_hole(int index);
+
+  // Copy operations.
+  inline Object* Copy();
+  Object* CopySize(int new_length);
+
+  // Add the elements of a JSArray to this FixedArray.
+  Object* AddKeysFromJSArray(JSArray* array);
+
+  // Compute the union of this and other.
+  Object* UnionOfKeys(FixedArray* other);
+
+  // Copy a sub array from the receiver to dest.
+  void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
+
+  // Garbage collection support.
+  static int SizeFor(int length) { return kHeaderSize + length * kPointerSize; }
+
+  // Code Generation support.
+  static int OffsetOfElementAt(int index) { return SizeFor(index); }
+
+  // Casting.
+  static inline FixedArray* cast(Object* obj);
+
+  // Align data at kPointerSize, even if Array.kHeaderSize isn't aligned.
+  static const int kHeaderSize = POINTER_SIZE_ALIGN(Array::kHeaderSize);
+
+  // Dispatched behavior.
+  int FixedArraySize() { return SizeFor(length()); }
+  void FixedArrayIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+  void FixedArrayPrint();
+  void FixedArrayVerify();
+  // Checks if two FixedArrays have identical contents.
+  bool IsEqualTo(FixedArray* other);
+#endif
+
+  // Swap two elements in a pair of arrays.  If this array and the
+  // numbers array are the same object, the elements are only swapped
+  // once.
+  void SwapPairs(FixedArray* numbers, int i, int j);
+
+  // Sort prefix of this array and the numbers array as pairs wrt. the
+  // numbers.  If the numbers array and the this array are the same
+  // object, the prefix of this array is sorted.
+  void SortPairs(FixedArray* numbers, uint32_t len);
+
+ protected:
+  // Set operation on FixedArray without using write barriers.
+  static inline void fast_set(FixedArray* array, int index, Object* value);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
+};
+
+
+// DescriptorArrays are fixed arrays used to hold instance descriptors.
+// The format of the these objects is:
+//   [0]: point to a fixed array with (value, detail) pairs.
+//   [1]: next enumeration index (Smi), or pointer to small fixed array:
+//          [0]: next enumeration index (Smi)
+//          [1]: pointer to fixed array with enum cache
+//   [2]: first key
+//   [length() - 1]: last key
+//
+class DescriptorArray: public FixedArray {
+ public:
+  // Is this the singleton empty_descriptor_array?
+  inline bool IsEmpty();
+  // Returns the number of descriptors in the array.
+  int number_of_descriptors() {
+    return IsEmpty() ? 0 : length() - kFirstIndex;
+  }
+
+  int NextEnumerationIndex() {
+    if (IsEmpty()) return PropertyDetails::kInitialIndex;
+    Object* obj = get(kEnumerationIndexIndex);
+    if (obj->IsSmi()) {
+      return Smi::cast(obj)->value();
+    } else {
+      Object* index = FixedArray::cast(obj)->get(kEnumCacheBridgeEnumIndex);
+      return Smi::cast(index)->value();
+    }
+  }
+
+  // Set next enumeration index and flush any enum cache.
+  void SetNextEnumerationIndex(int value) {
+    if (!IsEmpty()) {
+      fast_set(this, kEnumerationIndexIndex, Smi::FromInt(value));
+    }
+  }
+  bool HasEnumCache() {
+    return !IsEmpty() && !get(kEnumerationIndexIndex)->IsSmi();
+  }
+
+  Object* GetEnumCache() {
+    ASSERT(HasEnumCache());
+    FixedArray* bridge = FixedArray::cast(get(kEnumerationIndexIndex));
+    return bridge->get(kEnumCacheBridgeCacheIndex);
+  }
+
+  // Initialize or change the enum cache,
+  // using the supplied storage for the small "bridge".
+  void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache);
+
+  // Accessors for fetching instance descriptor at descriptor number.
+  inline String* GetKey(int descriptor_number);
+  inline Object* GetValue(int descriptor_number);
+  inline Smi* GetDetails(int descriptor_number);
+  inline PropertyType GetType(int descriptor_number);
+  inline int GetFieldIndex(int descriptor_number);
+  inline JSFunction* GetConstantFunction(int descriptor_number);
+  inline Object* GetCallbacksObject(int descriptor_number);
+  inline AccessorDescriptor* GetCallbacks(int descriptor_number);
+  inline bool IsProperty(int descriptor_number);
+  inline bool IsTransition(int descriptor_number);
+  inline bool IsNullDescriptor(int descriptor_number);
+  inline bool IsDontEnum(int descriptor_number);
+
+  // Accessor for complete descriptor.
+  inline void Get(int descriptor_number, Descriptor* desc);
+  inline void Set(int descriptor_number, Descriptor* desc);
+
+  // Transfer complete descriptor from another descriptor array to
+  // this one.
+  inline void CopyFrom(int index, DescriptorArray* src, int src_index);
+
+  // Copy the descriptor array, insert a new descriptor and optionally
+  // remove map transitions.  If the descriptor is already present, it is
+  // replaced.  If a replaced descriptor is a real property (not a transition
+  // or null), its enumeration index is kept as is.
+  // If adding a real property, map transitions must be removed.  If adding
+  // a transition, they must not be removed.  All null descriptors are removed.
+  Object* CopyInsert(Descriptor* descriptor, TransitionFlag transition_flag);
+
+  // Remove all transitions.  Return  a copy of the array with all transitions
+  // removed, or a Failure object if the new array could not be allocated.
+  Object* RemoveTransitions();
+
+  // Sort the instance descriptors by the hash codes of their keys.
+  void Sort();
+
+  // Search the instance descriptors for given name.
+  inline int Search(String* name);
+
+  // Tells whether the name is present int the array.
+  bool Contains(String* name) { return kNotFound != Search(name); }
+
+  // Perform a binary search in the instance descriptors represented
+  // by this fixed array.  low and high are descriptor indices.  If there
+  // are three instance descriptors in this array it should be called
+  // with low=0 and high=2.
+  int BinarySearch(String* name, int low, int high);
+
+  // Perform a linear search in the instance descriptors represented
+  // by this fixed array.  len is the number of descriptor indices that are
+  // valid.  Does not require the descriptors to be sorted.
+  int LinearSearch(String* name, int len);
+
+  // Allocates a DescriptorArray, but returns the singleton
+  // empty descriptor array object if number_of_descriptors is 0.
+  static Object* Allocate(int number_of_descriptors);
+
+  // Casting.
+  static inline DescriptorArray* cast(Object* obj);
+
+  // Constant for denoting key was not found.
+  static const int kNotFound = -1;
+
+  static const int kContentArrayIndex = 0;
+  static const int kEnumerationIndexIndex = 1;
+  static const int kFirstIndex = 2;
+
+  // The length of the "bridge" to the enum cache.
+  static const int kEnumCacheBridgeLength = 2;
+  static const int kEnumCacheBridgeEnumIndex = 0;
+  static const int kEnumCacheBridgeCacheIndex = 1;
+
+  // Layout description.
+  static const int kContentArrayOffset = FixedArray::kHeaderSize;
+  static const int kEnumerationIndexOffset = kContentArrayOffset + kPointerSize;
+  static const int kFirstOffset = kEnumerationIndexOffset + kPointerSize;
+
+  // Layout description for the bridge array.
+  static const int kEnumCacheBridgeEnumOffset = FixedArray::kHeaderSize;
+  static const int kEnumCacheBridgeCacheOffset =
+    kEnumCacheBridgeEnumOffset + kPointerSize;
+
+#ifdef DEBUG
+  // Print all the descriptors.
+  void PrintDescriptors();
+
+  // Is the descriptor array sorted and without duplicates?
+  bool IsSortedNoDuplicates();
+
+  // Are two DescriptorArrays equal?
+  bool IsEqualTo(DescriptorArray* other);
+#endif
+
+  // The maximum number of descriptors we want in a descriptor array (should
+  // fit in a page).
+  static const int kMaxNumberOfDescriptors = 1024 + 512;
+
+ private:
+  // Conversion from descriptor number to array indices.
+  static int ToKeyIndex(int descriptor_number) {
+    return descriptor_number+kFirstIndex;
+  }
+  static int ToValueIndex(int descriptor_number) {
+    return descriptor_number << 1;
+  }
+  static int ToDetailsIndex(int descriptor_number) {
+    return( descriptor_number << 1) + 1;
+  }
+
+  bool is_null_descriptor(int descriptor_number) {
+    return PropertyDetails(GetDetails(descriptor_number)).type() ==
+        NULL_DESCRIPTOR;
+  }
+  // Swap operation on FixedArray without using write barriers.
+  static inline void fast_swap(FixedArray* array, int first, int second);
+
+  // Swap descriptor first and second.
+  inline void Swap(int first, int second);
+
+  FixedArray* GetContentArray() {
+    return FixedArray::cast(get(kContentArrayIndex));
+  }
+  DISALLOW_IMPLICIT_CONSTRUCTORS(DescriptorArray);
+};
+
+
+// HashTable is a subclass of FixedArray that implements a hash table
+// that uses open addressing and quadratic probing.
+//
+// In order for the quadratic probing to work, elements that have not
+// yet been used and elements that have been deleted are
+// distinguished.  Probing continues when deleted elements are
+// encountered and stops when unused elements are encountered.
+//
+// - Elements with key == undefined have not been used yet.
+// - Elements with key == null have been deleted.
+//
+// The hash table class is parameterized with a Shape and a Key.
+// Shape must be a class with the following interface:
+//   class ExampleShape {
+//    public:
+//      // Tells whether key matches other.
+//     static bool IsMatch(Key key, Object* other);
+//     // Returns the hash value for key.
+//     static uint32_t Hash(Key key);
+//     // Returns the hash value for object.
+//     static uint32_t HashForObject(Key key, Object* object);
+//     // Convert key to an object.
+//     static inline Object* AsObject(Key key);
+//     // The prefix size indicates number of elements in the beginning
+//     // of the backing storage.
+//     static const int kPrefixSize = ..;
+//     // The Element size indicates number of elements per entry.
+//     static const int kEntrySize = ..;
+//   };
+// table.  The prefix size indicates an amount of memory in the
+// beginning of the backing storage that can be used for non-element
+// information by subclasses.
+
+template<typename Shape, typename Key>
+class HashTable: public FixedArray {
+ public:
+  // Returns the number of elements in the dictionary.
+  int NumberOfElements() {
+    return Smi::cast(get(kNumberOfElementsIndex))->value();
+  }
+
+  // Returns the capacity of the dictionary.
+  int Capacity() {
+    return Smi::cast(get(kCapacityIndex))->value();
+  }
+
+  // ElementAdded should be called whenever an element is added to a
+  // dictionary.
+  void ElementAdded() { SetNumberOfElements(NumberOfElements() + 1); }
+
+  // ElementRemoved should be called whenever an element is removed from
+  // a dictionary.
+  void ElementRemoved() { SetNumberOfElements(NumberOfElements() - 1); }
+  void ElementsRemoved(int n) { SetNumberOfElements(NumberOfElements() - n); }
+
+  // Returns a new array for dictionary usage. Might return Failure.
+  static Object* Allocate(int at_least_space_for);
+
+  // Returns the key at entry.
+  Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
+
+  // Tells whether k is a real key.  Null and undefined are not allowed
+  // as keys and can be used to indicate missing or deleted elements.
+  bool IsKey(Object* k) {
+    return !k->IsNull() && !k->IsUndefined();
+  }
+
+  // Garbage collection support.
+  void IteratePrefix(ObjectVisitor* visitor);
+  void IterateElements(ObjectVisitor* visitor);
+
+  // Casting.
+  static inline HashTable* cast(Object* obj);
+
+  // Compute the probe offset (quadratic probing).
+  INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
+    return (n + n * n) >> 1;
+  }
+
+  static const int kNumberOfElementsIndex = 0;
+  static const int kCapacityIndex         = 1;
+  static const int kPrefixStartIndex      = 2;
+  static const int kElementsStartIndex    =
+      kPrefixStartIndex + Shape::kPrefixSize;
+  static const int kEntrySize             = Shape::kEntrySize;
+  static const int kElementsStartOffset   =
+      kHeaderSize + kElementsStartIndex * kPointerSize;
+
+  // Constant used for denoting a absent entry.
+  static const int kNotFound = -1;
+
+  // Find entry for key otherwise return -1.
+  int FindEntry(Key key);
+
+ protected:
+
+  // Find the entry at which to insert element with the given key that
+  // has the given hash value.
+  uint32_t FindInsertionEntry(uint32_t hash);
+
+  // Returns the index for an entry (of the key)
+  static inline int EntryToIndex(int entry) {
+    return (entry * kEntrySize) + kElementsStartIndex;
+  }
+
+  // Update the number of elements in the dictionary.
+  void SetNumberOfElements(int nof) {
+    fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
+  }
+
+  // Sets the capacity of the hash table.
+  void SetCapacity(int capacity) {
+    // To scale a computed hash code to fit within the hash table, we
+    // use bit-wise AND with a mask, so the capacity must be positive
+    // and non-zero.
+    ASSERT(capacity > 0);
+    fast_set(this, kCapacityIndex, Smi::FromInt(capacity));
+  }
+
+
+  // Returns probe entry.
+  static uint32_t GetProbe(uint32_t hash, uint32_t number, uint32_t size) {
+    ASSERT(IsPowerOf2(size));
+    return (hash + GetProbeOffset(number)) & (size - 1);
+  }
+
+  // Ensure enough space for n additional elements.
+  Object* EnsureCapacity(int n, Key key);
+};
+
+
+
+// HashTableKey is an abstract superclass for virtual key behavior.
+class HashTableKey {
+ public:
+  // Returns whether the other object matches this key.
+  virtual bool IsMatch(Object* other) = 0;
+  // Returns the hash value for this key.
+  virtual uint32_t Hash() = 0;
+  // Returns the hash value for object.
+  virtual uint32_t HashForObject(Object* key) = 0;
+  // Returns the key object for storing into the dictionary.
+  // If allocations fails a failure object is returned.
+  virtual Object* AsObject() = 0;
+  // Required.
+  virtual ~HashTableKey() {}
+};
+
+class SymbolTableShape {
+ public:
+  static bool IsMatch(HashTableKey* key, Object* value) {
+    return key->IsMatch(value);
+  }
+  static uint32_t Hash(HashTableKey* key) {
+    return key->Hash();
+  }
+  static uint32_t HashForObject(HashTableKey* key, Object* object) {
+    return key->HashForObject(object);
+  }
+  static Object* AsObject(HashTableKey* key) {
+    return key->AsObject();
+  }
+
+  static const int kPrefixSize = 0;
+  static const int kEntrySize = 1;
+};
+
+// SymbolTable.
+//
+// No special elements in the prefix and the element size is 1
+// because only the symbol itself (the key) needs to be stored.
+class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> {
+ public:
+  // Find symbol in the symbol table.  If it is not there yet, it is
+  // added.  The return value is the symbol table which might have
+  // been enlarged.  If the return value is not a failure, the symbol
+  // pointer *s is set to the symbol found.
+  Object* LookupSymbol(Vector<const char> str, Object** s);
+  Object* LookupString(String* key, Object** s);
+
+  // Looks up a symbol that is equal to the given string and returns
+  // true if it is found, assigning the symbol to the given output
+  // parameter.
+  bool LookupSymbolIfExists(String* str, String** symbol);
+
+  // Casting.
+  static inline SymbolTable* cast(Object* obj);
+
+ private:
+  Object* LookupKey(HashTableKey* key, Object** s);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(SymbolTable);
+};
+
+
+class MapCacheShape {
+ public:
+  static bool IsMatch(HashTableKey* key, Object* value) {
+    return key->IsMatch(value);
+  }
+  static uint32_t Hash(HashTableKey* key) {
+    return key->Hash();
+  }
+
+  static uint32_t HashForObject(HashTableKey* key, Object* object) {
+    return key->HashForObject(object);
+  }
+
+  static Object* AsObject(HashTableKey* key) {
+    return key->AsObject();
+  }
+
+  static const int kPrefixSize = 0;
+  static const int kEntrySize = 2;
+};
+
+
+// MapCache.
+//
+// Maps keys that are a fixed array of symbols to a map.
+// Used for canonicalize maps for object literals.
+class MapCache: public HashTable<MapCacheShape, HashTableKey*> {
+ public:
+  // Find cached value for a string key, otherwise return null.
+  Object* Lookup(FixedArray* key);
+  Object* Put(FixedArray* key, Map* value);
+  static inline MapCache* cast(Object* obj);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(MapCache);
+};
+
+
+template <typename Shape, typename Key>
+class Dictionary: public HashTable<Shape, Key> {
+ public:
+
+  static inline Dictionary<Shape, Key>* cast(Object* obj) {
+    return reinterpret_cast<Dictionary<Shape, Key>*>(obj);
+  }
+
+  // Returns the value at entry.
+  Object* ValueAt(int entry) {
+    return get(HashTable<Shape, Key>::EntryToIndex(entry)+1);
+  }
+
+  // Set the value for entry.
+  void ValueAtPut(int entry, Object* value) {
+    set(HashTable<Shape, Key>::EntryToIndex(entry)+1, value);
+  }
+
+  // Returns the property details for the property at entry.
+  PropertyDetails DetailsAt(int entry) {
+    ASSERT(entry >= 0);  // Not found is -1, which is not caught by get().
+    return PropertyDetails(
+        Smi::cast(get(HashTable<Shape, Key>::EntryToIndex(entry) + 2)));
+  }
+
+  // Set the details for entry.
+  void DetailsAtPut(int entry, PropertyDetails value) {
+    set(HashTable<Shape, Key>::EntryToIndex(entry) + 2, value.AsSmi());
+  }
+
+  // Sorting support
+  void CopyValuesTo(FixedArray* elements);
+
+  // Delete a property from the dictionary.
+  Object* DeleteProperty(int entry, JSObject::DeleteMode mode);
+
+  // Returns the number of elements in the dictionary filtering out properties
+  // with the specified attributes.
+  int NumberOfElementsFilterAttributes(PropertyAttributes filter);
+
+  // Returns the number of enumerable elements in the dictionary.
+  int NumberOfEnumElements();
+
+  // Copies keys to preallocated fixed array.
+  void CopyKeysTo(FixedArray* storage, PropertyAttributes filter);
+  // Fill in details for properties into storage.
+  void CopyKeysTo(FixedArray* storage);
+
+  // Accessors for next enumeration index.
+  void SetNextEnumerationIndex(int index) {
+    fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index));
+  }
+
+  int NextEnumerationIndex() {
+    return Smi::cast(FixedArray::get(kNextEnumerationIndexIndex))->value();
+  }
+
+  // Returns a new array for dictionary usage. Might return Failure.
+  static Object* Allocate(int at_least_space_for);
+
+  // Ensure enough space for n additional elements.
+  Object* EnsureCapacity(int n, Key key);
+
+#ifdef DEBUG
+  void Print();
+#endif
+  // Returns the key (slow).
+  Object* SlowReverseLookup(Object* value);
+
+  // Sets the entry to (key, value) pair.
+  inline void SetEntry(int entry,
+                       Object* key,
+                       Object* value,
+                       PropertyDetails details);
+
+  Object* Add(Key key, Object* value, PropertyDetails details);
+
+ protected:
+  // Generic at put operation.
+  Object* AtPut(Key key, Object* value);
+
+  // Add entry to dictionary.
+  Object* AddEntry(Key key,
+                   Object* value,
+                   PropertyDetails details,
+                   uint32_t hash);
+
+  // Generate new enumeration indices to avoid enumeration index overflow.
+  Object* GenerateNewEnumerationIndices();
+  static const int kMaxNumberKeyIndex =
+      HashTable<Shape, Key>::kPrefixStartIndex;
+  static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
+};
+
+
+class StringDictionaryShape {
+ public:
+  static inline bool IsMatch(String* key, Object* other);
+  static inline uint32_t Hash(String* key);
+  static inline uint32_t HashForObject(String* key, Object* object);
+  static inline Object* AsObject(String* key);
+  static const int kPrefixSize = 2;
+  static const int kEntrySize = 3;
+  static const bool kIsEnumerable = true;
+};
+
+
+class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
+ public:
+  static inline StringDictionary* cast(Object* obj) {
+    ASSERT(obj->IsDictionary());
+    return reinterpret_cast<StringDictionary*>(obj);
+  }
+
+  // Copies enumerable keys to preallocated fixed array.
+  void CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array);
+
+  // For transforming properties of a JSObject.
+  Object* TransformPropertiesToFastFor(JSObject* obj,
+                                       int unused_property_fields);
+};
+
+
+class NumberDictionaryShape {
+ public:
+  static inline bool IsMatch(uint32_t key, Object* other);
+  static inline uint32_t Hash(uint32_t key);
+  static inline uint32_t HashForObject(uint32_t key, Object* object);
+  static inline Object* AsObject(uint32_t key);
+  static const int kPrefixSize = 2;
+  static const int kEntrySize = 3;
+  static const bool kIsEnumerable = false;
+};
+
+
+class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> {
+ public:
+  static NumberDictionary* cast(Object* obj) {
+    ASSERT(obj->IsDictionary());
+    return reinterpret_cast<NumberDictionary*>(obj);
+  }
+
+  // Type specific at put (default NONE attributes is used when adding).
+  Object* AtNumberPut(uint32_t key, Object* value);
+  Object* AddNumberEntry(uint32_t key,
+                         Object* value,
+                         PropertyDetails details);
+
+  // Set an existing entry or add a new one if needed.
+  Object* Set(uint32_t key, Object* value, PropertyDetails details);
+
+  void UpdateMaxNumberKey(uint32_t key);
+
+  // If slow elements are required we will never go back to fast-case
+  // for the elements kept in this dictionary.  We require slow
+  // elements if an element has been added at an index larger than
+  // kRequiresSlowElementsLimit or set_requires_slow_elements() has been called
+  // when defining a getter or setter with a number key.
+  inline bool requires_slow_elements();
+  inline void set_requires_slow_elements();
+
+  // Get the value of the max number key that has been added to this
+  // dictionary.  max_number_key can only be called if
+  // requires_slow_elements returns false.
+  inline uint32_t max_number_key();
+
+  // Remove all entries were key is a number and (from <= key && key < to).
+  void RemoveNumberEntries(uint32_t from, uint32_t to);
+
+  // Bit masks.
+  static const int kRequiresSlowElementsMask = 1;
+  static const int kRequiresSlowElementsTagSize = 1;
+  static const uint32_t kRequiresSlowElementsLimit = (1 << 29) - 1;
+};
+
+
+// ByteArray represents fixed sized byte arrays.  Used by the outside world,
+// such as PCRE, and also by the memory allocator and garbage collector to
+// fill in free blocks in the heap.
+class ByteArray: public Array {
+ public:
+  // Setter and getter.
+  inline byte get(int index);
+  inline void set(int index, byte value);
+
+  // Treat contents as an int array.
+  inline int get_int(int index);
+
+  static int SizeFor(int length) {
+    return OBJECT_SIZE_ALIGN(kHeaderSize + length);
+  }
+  // We use byte arrays for free blocks in the heap.  Given a desired size in
+  // bytes that is a multiple of the word size and big enough to hold a byte
+  // array, this function returns the number of elements a byte array should
+  // have.
+  static int LengthFor(int size_in_bytes) {
+    ASSERT(IsAligned(size_in_bytes, kPointerSize));
+    ASSERT(size_in_bytes >= kHeaderSize);
+    return size_in_bytes - kHeaderSize;
+  }
+
+  // Returns data start address.
+  inline Address GetDataStartAddress();
+
+  // Returns a pointer to the ByteArray object for a given data start address.
+  static inline ByteArray* FromDataStartAddress(Address address);
+
+  // Casting.
+  static inline ByteArray* cast(Object* obj);
+
+  // Dispatched behavior.
+  int ByteArraySize() { return SizeFor(length()); }
+#ifdef DEBUG
+  void ByteArrayPrint();
+  void ByteArrayVerify();
+#endif
+
+  // ByteArray headers are not quadword aligned.
+  static const int kHeaderSize = Array::kHeaderSize;
+  static const int kAlignedSize = Array::kAlignedSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
+};
+
+
+// A PixelArray represents a fixed-size byte array with special semantics
+// used for implementing the CanvasPixelArray object. Please see the
+// specification at:
+// http://www.whatwg.org/specs/web-apps/current-work/
+//                      multipage/the-canvas-element.html#canvaspixelarray
+// In particular, write access clamps the value written to 0 or 255 if the
+// value written is outside this range.
+class PixelArray: public Array {
+ public:
+  // [external_pointer]: The pointer to the external memory area backing this
+  // pixel array.
+  DECL_ACCESSORS(external_pointer, uint8_t)  // Pointer to the data store.
+
+  // Setter and getter.
+  inline uint8_t get(int index);
+  inline void set(int index, uint8_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber and
+  // undefined and clamps the converted value between 0 and 255.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline PixelArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void PixelArrayPrint();
+  void PixelArrayVerify();
+#endif  // DEBUG
+
+  // PixelArray headers are not quadword aligned.
+  static const int kExternalPointerOffset = Array::kAlignedSize;
+  static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
+  static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PixelArray);
+};
+
+
+// Code describes objects with on-the-fly generated machine code.
+class Code: public HeapObject {
+ public:
+  // Opaque data type for encapsulating code flags like kind, inline
+  // cache state, and arguments count.
+  enum Flags { };
+
+  enum Kind {
+    FUNCTION,
+    STUB,
+    BUILTIN,
+    LOAD_IC,
+    KEYED_LOAD_IC,
+    CALL_IC,
+    STORE_IC,
+    KEYED_STORE_IC,
+    // No more than eight kinds. The value currently encoded in three bits in
+    // Flags.
+
+    // Pseudo-kinds.
+    REGEXP = BUILTIN,
+    FIRST_IC_KIND = LOAD_IC,
+    LAST_IC_KIND = KEYED_STORE_IC
+  };
+
+  enum {
+    NUMBER_OF_KINDS = KEYED_STORE_IC + 1
+  };
+
+#ifdef ENABLE_DISASSEMBLER
+  // Printing
+  static const char* Kind2String(Kind kind);
+  static const char* ICState2String(InlineCacheState state);
+  static const char* PropertyType2String(PropertyType type);
+  void Disassemble(const char* name);
+#endif  // ENABLE_DISASSEMBLER
+
+  // [instruction_size]: Size of the native instructions
+  inline int instruction_size();
+  inline void set_instruction_size(int value);
+
+  // [relocation_size]: Size of relocation information.
+  inline int relocation_size();
+  inline void set_relocation_size(int value);
+
+  // [sinfo_size]: Size of scope information.
+  inline int sinfo_size();
+  inline void set_sinfo_size(int value);
+
+  // [flags]: Various code flags.
+  inline Flags flags();
+  inline void set_flags(Flags flags);
+
+  // [flags]: Access to specific code flags.
+  inline Kind kind();
+  inline InlineCacheState ic_state();  // Only valid for IC stubs.
+  inline InLoopFlag ic_in_loop();  // Only valid for IC stubs.
+  inline PropertyType type();  // Only valid for monomorphic IC stubs.
+  inline int arguments_count();  // Only valid for call IC stubs.
+
+  // Testers for IC stub kinds.
+  inline bool is_inline_cache_stub();
+  inline bool is_load_stub() { return kind() == LOAD_IC; }
+  inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
+  inline bool is_store_stub() { return kind() == STORE_IC; }
+  inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
+  inline bool is_call_stub() { return kind() == CALL_IC; }
+
+  // [major_key]: For kind STUB, the major key.
+  inline CodeStub::Major major_key();
+  inline void set_major_key(CodeStub::Major major);
+
+  // Flags operations.
+  static inline Flags ComputeFlags(Kind kind,
+                                   InLoopFlag in_loop = NOT_IN_LOOP,
+                                   InlineCacheState ic_state = UNINITIALIZED,
+                                   PropertyType type = NORMAL,
+                                   int argc = -1);
+
+  static inline Flags ComputeMonomorphicFlags(
+      Kind kind,
+      PropertyType type,
+      InLoopFlag in_loop = NOT_IN_LOOP,
+      int argc = -1);
+
+  static inline Kind ExtractKindFromFlags(Flags flags);
+  static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
+  static inline InLoopFlag ExtractICInLoopFromFlags(Flags flags);
+  static inline PropertyType ExtractTypeFromFlags(Flags flags);
+  static inline int ExtractArgumentsCountFromFlags(Flags flags);
+  static inline Flags RemoveTypeFromFlags(Flags flags);
+
+  // Convert a target address into a code object.
+  static inline Code* GetCodeFromTargetAddress(Address address);
+
+  // Returns the address of the first instruction.
+  inline byte* instruction_start();
+
+  // Returns the size of the instructions, padding, and relocation information.
+  inline int body_size();
+
+  // Returns the address of the first relocation info (read backwards!).
+  inline byte* relocation_start();
+
+  // Code entry point.
+  inline byte* entry();
+
+  // Returns true if pc is inside this object's instructions.
+  inline bool contains(byte* pc);
+
+  // Returns the address of the scope information.
+  inline byte* sinfo_start();
+
+  // Relocate the code by delta bytes. Called to signal that this code
+  // object has been moved by delta bytes.
+  void Relocate(int delta);
+
+  // Migrate code described by desc.
+  void CopyFrom(const CodeDesc& desc);
+
+  // Returns the object size for a given body and sinfo size (Used for
+  // allocation).
+  static int SizeFor(int body_size, int sinfo_size) {
+    ASSERT_SIZE_TAG_ALIGNED(body_size);
+    ASSERT_SIZE_TAG_ALIGNED(sinfo_size);
+    return RoundUp(kHeaderSize + body_size + sinfo_size, kCodeAlignment);
+  }
+
+  // Calculate the size of the code object to report for log events. This takes
+  // the layout of the code object into account.
+  int ExecutableSize() {
+    // Check that the assumptions about the layout of the code object holds.
+    ASSERT_EQ(static_cast<int>(instruction_start() - address()),
+              Code::kHeaderSize);
+    return instruction_size() + Code::kHeaderSize;
+  }
+
+  // Locating source position.
+  int SourcePosition(Address pc);
+  int SourceStatementPosition(Address pc);
+
+  // Casting.
+  static inline Code* cast(Object* obj);
+
+  // Dispatched behavior.
+  int CodeSize() { return SizeFor(body_size(), sinfo_size()); }
+  void CodeIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+  void CodePrint();
+  void CodeVerify();
+#endif
+  // Code entry points are aligned to 32 bytes.
+  static const int kCodeAlignment = 32;
+  static const int kCodeAlignmentMask = kCodeAlignment - 1;
+
+  // Layout description.
+  static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
+  static const int kRelocationSizeOffset = kInstructionSizeOffset + kIntSize;
+  static const int kSInfoSizeOffset = kRelocationSizeOffset + kIntSize;
+  static const int kFlagsOffset = kSInfoSizeOffset + kIntSize;
+  static const int kKindSpecificFlagsOffset  = kFlagsOffset + kIntSize;
+  // Add padding to align the instruction start following right after
+  // the Code object header.
+  static const int kHeaderSize =
+      (kKindSpecificFlagsOffset + kIntSize + kCodeAlignmentMask) &
+          ~kCodeAlignmentMask;
+
+  // Byte offsets within kKindSpecificFlagsOffset.
+  static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset + 1;
+
+  // Flags layout.
+  static const int kFlagsICStateShift        = 0;
+  static const int kFlagsICInLoopShift       = 3;
+  static const int kFlagsKindShift           = 4;
+  static const int kFlagsTypeShift           = 7;
+  static const int kFlagsArgumentsCountShift = 10;
+
+  static const int kFlagsICStateMask        = 0x00000007;  // 0000000111
+  static const int kFlagsICInLoopMask       = 0x00000008;  // 0000001000
+  static const int kFlagsKindMask           = 0x00000070;  // 0001110000
+  static const int kFlagsTypeMask           = 0x00000380;  // 1110000000
+  static const int kFlagsArgumentsCountMask = 0xFFFFFC00;
+
+  static const int kFlagsNotUsedInLookup =
+      (kFlagsICInLoopMask | kFlagsTypeMask);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
+};
+
+
+// All heap objects have a Map that describes their structure.
+//  A Map contains information about:
+//  - Size information about the object
+//  - How to iterate over an object (for garbage collection)
+class Map: public HeapObject {
+ public:
+  // Instance size.
+  inline int instance_size();
+  inline void set_instance_size(int value);
+
+  // Count of properties allocated in the object.
+  inline int inobject_properties();
+  inline void set_inobject_properties(int value);
+
+  // Count of property fields pre-allocated in the object when first allocated.
+  inline int pre_allocated_property_fields();
+  inline void set_pre_allocated_property_fields(int value);
+
+  // Instance type.
+  inline InstanceType instance_type();
+  inline void set_instance_type(InstanceType value);
+
+  // Tells how many unused property fields are available in the
+  // instance (only used for JSObject in fast mode).
+  inline int unused_property_fields();
+  inline void set_unused_property_fields(int value);
+
+  // Bit field.
+  inline byte bit_field();
+  inline void set_bit_field(byte value);
+
+  // Bit field 2.
+  inline byte bit_field2();
+  inline void set_bit_field2(byte value);
+
+  // Tells whether the object in the prototype property will be used
+  // for instances created from this function.  If the prototype
+  // property is set to a value that is not a JSObject, the prototype
+  // property will not be used to create instances of the function.
+  // See ECMA-262, 13.2.2.
+  inline void set_non_instance_prototype(bool value);
+  inline bool has_non_instance_prototype();
+
+  // Tells whether the instance with this map should be ignored by the
+  // __proto__ accessor.
+  inline void set_is_hidden_prototype() {
+    set_bit_field(bit_field() | (1 << kIsHiddenPrototype));
+  }
+
+  inline bool is_hidden_prototype() {
+    return ((1 << kIsHiddenPrototype) & bit_field()) != 0;
+  }
+
+  // Records and queries whether the instance has a named interceptor.
+  inline void set_has_named_interceptor() {
+    set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
+  }
+
+  inline bool has_named_interceptor() {
+    return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
+  }
+
+  // Records and queries whether the instance has an indexed interceptor.
+  inline void set_has_indexed_interceptor() {
+    set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
+  }
+
+  inline bool has_indexed_interceptor() {
+    return ((1 << kHasIndexedInterceptor) & bit_field()) != 0;
+  }
+
+  // Tells whether the instance is undetectable.
+  // An undetectable object is a special class of JSObject: 'typeof' operator
+  // returns undefined, ToBoolean returns false. Otherwise it behaves like
+  // a normal JS object.  It is useful for implementing undetectable
+  // document.all in Firefox & Safari.
+  // See https://bugzilla.mozilla.org/show_bug.cgi?id=248549.
+  inline void set_is_undetectable() {
+    set_bit_field(bit_field() | (1 << kIsUndetectable));
+  }
+
+  inline bool is_undetectable() {
+    return ((1 << kIsUndetectable) & bit_field()) != 0;
+  }
+
+  inline void set_needs_loading(bool value) {
+    if (value) {
+      set_bit_field2(bit_field2() | (1 << kNeedsLoading));
+    } else {
+      set_bit_field2(bit_field2() & ~(1 << kNeedsLoading));
+    }
+  }
+
+  // Does this object or function require a lazily loaded script to be
+  // run before being used?
+  inline bool needs_loading() {
+    return ((1 << kNeedsLoading) & bit_field2()) != 0;
+  }
+
+  // Tells whether the instance has a call-as-function handler.
+  inline void set_has_instance_call_handler() {
+    set_bit_field(bit_field() | (1 << kHasInstanceCallHandler));
+  }
+
+  inline bool has_instance_call_handler() {
+    return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
+  }
+
+  // Tells whether the instance needs security checks when accessing its
+  // properties.
+  inline void set_is_access_check_needed(bool access_check_needed);
+  inline bool is_access_check_needed();
+
+  // [prototype]: implicit prototype object.
+  DECL_ACCESSORS(prototype, Object)
+
+  // [constructor]: points back to the function responsible for this map.
+  DECL_ACCESSORS(constructor, Object)
+
+  // [instance descriptors]: describes the object.
+  DECL_ACCESSORS(instance_descriptors, DescriptorArray)
+
+  // [stub cache]: contains stubs compiled for this map.
+  DECL_ACCESSORS(code_cache, FixedArray)
+
+  // Returns a copy of the map.
+  Object* CopyDropDescriptors();
+
+  // Returns a copy of the map, with all transitions dropped from the
+  // instance descriptors.
+  Object* CopyDropTransitions();
+
+  // Returns the property index for name (only valid for FAST MODE).
+  int PropertyIndexFor(String* name);
+
+  // Returns the next free property index (only valid for FAST MODE).
+  int NextFreePropertyIndex();
+
+  // Returns the number of properties described in instance_descriptors.
+  int NumberOfDescribedProperties();
+
+  // Casting.
+  static inline Map* cast(Object* obj);
+
+  // Locate an accessor in the instance descriptor.
+  AccessorDescriptor* FindAccessor(String* name);
+
+  // Code cache operations.
+
+  // Clears the code cache.
+  inline void ClearCodeCache();
+
+  // Update code cache.
+  Object* UpdateCodeCache(String* name, Code* code);
+
+  // Returns the found code or undefined if absent.
+  Object* FindInCodeCache(String* name, Code::Flags flags);
+
+  // Returns the non-negative index of the code object if it is in the
+  // cache and -1 otherwise.
+  int IndexInCodeCache(Code* code);
+
+  // Removes a code object from the code cache at the given index.
+  void RemoveFromCodeCache(int index);
+
+  // For every transition in this map, makes the transition's
+  // target's prototype pointer point back to this map.
+  // This is undone in MarkCompactCollector::ClearNonLiveTransitions().
+  void CreateBackPointers();
+
+  // Set all map transitions from this map to dead maps to null.
+  // Also, restore the original prototype on the targets of these
+  // transitions, so that we do not process this map again while
+  // following back pointers.
+  void ClearNonLiveTransitions(Object* real_prototype);
+
+  // Dispatched behavior.
+  void MapIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+  void MapPrint();
+  void MapVerify();
+#endif
+
+  static const int kMaxPreAllocatedPropertyFields = 255;
+
+  // Layout description.
+  static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
+  static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
+  static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize;
+  static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
+  static const int kInstanceDescriptorsOffset =
+      kConstructorOffset + kPointerSize;
+  static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
+  static const int kSize = kCodeCacheOffset + kPointerSize;
+
+  // Byte offsets within kInstanceSizesOffset.
+  static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
+  static const int kInObjectPropertiesByte = 1;
+  static const int kInObjectPropertiesOffset =
+      kInstanceSizesOffset + kInObjectPropertiesByte;
+  static const int kPreAllocatedPropertyFieldsByte = 2;
+  static const int kPreAllocatedPropertyFieldsOffset =
+      kInstanceSizesOffset + kPreAllocatedPropertyFieldsByte;
+  // The byte at position 3 is not in use at the moment.
+
+  // Byte offsets within kInstanceAttributesOffset attributes.
+  static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0;
+  static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 1;
+  static const int kBitFieldOffset = kInstanceAttributesOffset + 2;
+  static const int kBitField2Offset = kInstanceAttributesOffset + 3;
+
+  STATIC_CHECK(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
+
+  // Bit positions for bit field.
+  static const int kUnused = 0;  // To be used for marking recently used maps.
+  static const int kHasNonInstancePrototype = 1;
+  static const int kIsHiddenPrototype = 2;
+  static const int kHasNamedInterceptor = 3;
+  static const int kHasIndexedInterceptor = 4;
+  static const int kIsUndetectable = 5;
+  static const int kHasInstanceCallHandler = 6;
+  static const int kIsAccessCheckNeeded = 7;
+
+  // Bit positions for bit field 2
+  static const int kNeedsLoading = 0;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
+};
+
+
+// An abstract superclass, a marker class really, for simple structure classes.
+// It doesn't carry much functionality but allows struct classes to me
+// identified in the type system.
+class Struct: public HeapObject {
+ public:
+  inline void InitializeBody(int object_size);
+  static inline Struct* cast(Object* that);
+};
+
+
+// Script describes a script which has been added to the VM.
+class Script: public Struct {
+ public:
+  // Script types.
+  enum Type {
+    TYPE_NATIVE = 0,
+    TYPE_EXTENSION = 1,
+    TYPE_NORMAL = 2
+  };
+
+  // Script compilation types.
+  enum CompilationType {
+    COMPILATION_TYPE_HOST = 0,
+    COMPILATION_TYPE_EVAL = 1,
+    COMPILATION_TYPE_JSON = 2
+  };
+
+  // [source]: the script source.
+  DECL_ACCESSORS(source, Object)
+
+  // [name]: the script name.
+  DECL_ACCESSORS(name, Object)
+
+  // [id]: the script id.
+  DECL_ACCESSORS(id, Object)
+
+  // [line_offset]: script line offset in resource from where it was extracted.
+  DECL_ACCESSORS(line_offset, Smi)
+
+  // [column_offset]: script column offset in resource from where it was
+  // extracted.
+  DECL_ACCESSORS(column_offset, Smi)
+
+  // [data]: additional data associated with this script.
+  DECL_ACCESSORS(data, Object)
+
+  // [context_data]: context data for the context this script was compiled in.
+  DECL_ACCESSORS(context_data, Object)
+
+  // [wrapper]: the wrapper cache.
+  DECL_ACCESSORS(wrapper, Proxy)
+
+  // [type]: the script type.
+  DECL_ACCESSORS(type, Smi)
+
+  // [compilation]: how the the script was compiled.
+  DECL_ACCESSORS(compilation_type, Smi)
+
+  // [line_ends]: array of line ends positions.
+  DECL_ACCESSORS(line_ends, Object)
+
+  // [eval_from_function]: for eval scripts the funcion from which eval was
+  // called.
+  DECL_ACCESSORS(eval_from_function, Object)
+
+  // [eval_from_instructions_offset]: the instruction offset in the code for the
+  // function from which eval was called where eval was called.
+  DECL_ACCESSORS(eval_from_instructions_offset, Smi)
+
+  static inline Script* cast(Object* obj);
+
+#ifdef DEBUG
+  void ScriptPrint();
+  void ScriptVerify();
+#endif
+
+  static const int kSourceOffset = HeapObject::kHeaderSize;
+  static const int kNameOffset = kSourceOffset + kPointerSize;
+  static const int kLineOffsetOffset = kNameOffset + kPointerSize;
+  static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
+  static const int kDataOffset = kColumnOffsetOffset + kPointerSize;
+  static const int kContextOffset = kDataOffset + kPointerSize;
+  static const int kWrapperOffset = kContextOffset + kPointerSize;
+  static const int kTypeOffset = kWrapperOffset + kPointerSize;
+  static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
+  static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize;
+  static const int kIdOffset = kLineEndsOffset + kPointerSize;
+  static const int kEvalFromFunctionOffset = kIdOffset + kPointerSize;
+  static const int kEvalFrominstructionsOffsetOffset =
+      kEvalFromFunctionOffset + kPointerSize;
+  static const int kSize = kEvalFrominstructionsOffsetOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
+};
+
+
+// SharedFunctionInfo describes the JSFunction information that can be
+// shared by multiple instances of the function.
+class SharedFunctionInfo: public HeapObject {
+ public:
+  // [name]: Function name.
+  DECL_ACCESSORS(name, Object)
+
+  // [code]: Function code.
+  DECL_ACCESSORS(code, Code)
+
+  // [construct stub]: Code stub for constructing instances of this function.
+  DECL_ACCESSORS(construct_stub, Code)
+
+  // Returns if this function has been compiled to native code yet.
+  inline bool is_compiled();
+
+  // [length]: The function length - usually the number of declared parameters.
+  // Use up to 2^30 parameters.
+  inline int length();
+  inline void set_length(int value);
+
+  // [formal parameter count]: The declared number of parameters.
+  inline int formal_parameter_count();
+  inline void set_formal_parameter_count(int value);
+
+  // Set the formal parameter count so the function code will be
+  // called without using argument adaptor frames.
+  inline void DontAdaptArguments();
+
+  // [expected_nof_properties]: Expected number of properties for the function.
+  inline int expected_nof_properties();
+  inline void set_expected_nof_properties(int value);
+
+  // [instance class name]: class name for instances.
+  DECL_ACCESSORS(instance_class_name, Object)
+
+  // [function data]: This field has been added for make benefit the API.
+  // In the long run we don't want all functions to have this field but
+  // we can fix that when we have a better model for storing hidden data
+  // on objects.
+  DECL_ACCESSORS(function_data, Object)
+
+  // [script info]: Script from which the function originates.
+  DECL_ACCESSORS(script, Object)
+
+  // [start_position_and_type]: Field used to store both the source code
+  // position, whether or not the function is a function expression,
+  // and whether or not the function is a toplevel function. The two
+  // least significants bit indicates whether the function is an
+  // expression and the rest contains the source code position.
+  inline int start_position_and_type();
+  inline void set_start_position_and_type(int value);
+
+  // [debug info]: Debug information.
+  DECL_ACCESSORS(debug_info, Object)
+
+  // [inferred name]: Name inferred from variable or property
+  // assignment of this function. Used to facilitate debugging and
+  // profiling of JavaScript code written in OO style, where almost
+  // all functions are anonymous but are assigned to object
+  // properties.
+  DECL_ACCESSORS(inferred_name, String)
+
+  // Position of the 'function' token in the script source.
+  inline int function_token_position();
+  inline void set_function_token_position(int function_token_position);
+
+  // Position of this function in the script source.
+  inline int start_position();
+  inline void set_start_position(int start_position);
+
+  // End position of this function in the script source.
+  inline int end_position();
+  inline void set_end_position(int end_position);
+
+  // Is this function a function expression in the source code.
+  inline bool is_expression();
+  inline void set_is_expression(bool value);
+
+  // Is this function a top-level function (scripts, evals).
+  inline bool is_toplevel();
+  inline void set_is_toplevel(bool value);
+
+  // Bit field containing various information collected by the compiler to
+  // drive optimization.
+  inline int compiler_hints();
+  inline void set_compiler_hints(int value);
+
+  // Add information on assignments of the form this.x = ...;
+  void SetThisPropertyAssignmentsInfo(
+      bool has_only_this_property_assignments,
+      bool has_only_simple_this_property_assignments,
+      FixedArray* this_property_assignments);
+
+  // Clear information on assignments of the form this.x = ...;
+  void ClearThisPropertyAssignmentsInfo();
+
+  // Indicate that this function only consists of assignments of the form
+  // this.x = ...;.
+  inline bool has_only_this_property_assignments();
+
+  // Indicate that this function only consists of assignments of the form
+  // this.x = y; where y is either a constant or refers to an argument.
+  inline bool has_only_simple_this_property_assignments();
+
+  // For functions which only contains this property assignments this provides
+  // access to the names for the properties assigned.
+  DECL_ACCESSORS(this_property_assignments, Object)
+  inline int this_property_assignments_count();
+  inline void set_this_property_assignments_count(int value);
+  String* GetThisPropertyAssignmentName(int index);
+  bool IsThisPropertyAssignmentArgument(int index);
+  int GetThisPropertyAssignmentArgument(int index);
+  Object* GetThisPropertyAssignmentConstant(int index);
+
+  // [source code]: Source code for the function.
+  bool HasSourceCode();
+  Object* GetSourceCode();
+
+  // Calculate the instance size.
+  int CalculateInstanceSize();
+
+  // Calculate the number of in-object properties.
+  int CalculateInObjectProperties();
+
+  // Dispatched behavior.
+  void SharedFunctionInfoIterateBody(ObjectVisitor* v);
+  // Set max_length to -1 for unlimited length.
+  void SourceCodePrint(StringStream* accumulator, int max_length);
+#ifdef DEBUG
+  void SharedFunctionInfoPrint();
+  void SharedFunctionInfoVerify();
+#endif
+
+  // Casting.
+  static inline SharedFunctionInfo* cast(Object* obj);
+
+  // Constants.
+  static const int kDontAdaptArgumentsSentinel = -1;
+
+  // Layout description.
+  // (An even number of integers has a size that is a multiple of a pointer.)
+  static const int kNameOffset = HeapObject::kHeaderSize;
+  static const int kCodeOffset = kNameOffset + kPointerSize;
+  static const int kConstructStubOffset = kCodeOffset + kPointerSize;
+  static const int kLengthOffset = kConstructStubOffset + kPointerSize;
+  static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
+  static const int kExpectedNofPropertiesOffset =
+      kFormalParameterCountOffset + kIntSize;
+  static const int kStartPositionAndTypeOffset =
+      kExpectedNofPropertiesOffset + kIntSize;
+  static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
+  static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
+  static const int kInstanceClassNameOffset =
+      kFunctionTokenPositionOffset + kIntSize;
+  static const int kExternalReferenceDataOffset =
+      kInstanceClassNameOffset + kPointerSize;
+  static const int kScriptOffset = kExternalReferenceDataOffset + kPointerSize;
+  static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
+  static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
+  static const int kCompilerHintsOffset = kInferredNameOffset + kPointerSize;
+  static const int kThisPropertyAssignmentsOffset =
+      kCompilerHintsOffset + kPointerSize;
+  static const int kThisPropertyAssignmentsCountOffset =
+      kThisPropertyAssignmentsOffset + kPointerSize;
+  static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
+
+ private:
+  // Bit positions in length_and_flg.
+  // The least significant bit is used as the flag.
+  static const int kFlagBit         = 0;
+  static const int kLengthShift     = 1;
+  static const int kLengthMask      = ~((1 << kLengthShift) - 1);
+
+  // Bit positions in start_position_and_type.
+  // The source code start position is in the 30 most significant bits of
+  // the start_position_and_type field.
+  static const int kIsExpressionBit = 0;
+  static const int kIsTopLevelBit   = 1;
+  static const int kStartPositionShift = 2;
+  static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
+
+  // Bit positions in compiler_hints.
+  static const int kHasOnlyThisPropertyAssignments = 0;
+  static const int kHasOnlySimpleThisPropertyAssignments = 1;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
+};
+
+
+// JSFunction describes JavaScript functions.
+class JSFunction: public JSObject {
+ public:
+  // [prototype_or_initial_map]:
+  DECL_ACCESSORS(prototype_or_initial_map, Object)
+
+  // [shared_function_info]: The information about the function that
+  // can be shared by instances.
+  DECL_ACCESSORS(shared, SharedFunctionInfo)
+
+  // [context]: The context for this function.
+  inline Context* context();
+  inline Object* unchecked_context();
+  inline void set_context(Object* context);
+
+  // [code]: The generated code object for this function.  Executed
+  // when the function is invoked, e.g. foo() or new foo(). See
+  // [[Call]] and [[Construct]] description in ECMA-262, section
+  // 8.6.2, page 27.
+  inline Code* code();
+  inline void set_code(Code* value);
+
+  // Tells whether this function is a context-independent boilerplate
+  // function.
+  inline bool IsBoilerplate();
+
+  // Tells whether this function is builtin.
+  inline bool IsBuiltin();
+
+  // [literals]: Fixed array holding the materialized literals.
+  //
+  // If the function contains object, regexp or array literals, the
+  // literals array prefix contains the object, regexp, and array
+  // function to be used when creating these literals.  This is
+  // necessary so that we do not dynamically lookup the object, regexp
+  // or array functions.  Performing a dynamic lookup, we might end up
+  // using the functions from a new context that we should not have
+  // access to.
+  DECL_ACCESSORS(literals, FixedArray)
+
+  // The initial map for an object created by this constructor.
+  inline Map* initial_map();
+  inline void set_initial_map(Map* value);
+  inline bool has_initial_map();
+
+  // Get and set the prototype property on a JSFunction. If the
+  // function has an initial map the prototype is set on the initial
+  // map. Otherwise, the prototype is put in the initial map field
+  // until an initial map is needed.
+  inline bool has_prototype();
+  inline bool has_instance_prototype();
+  inline Object* prototype();
+  inline Object* instance_prototype();
+  Object* SetInstancePrototype(Object* value);
+  Object* SetPrototype(Object* value);
+
+  // Accessor for this function's initial map's [[class]]
+  // property. This is primarily used by ECMA native functions.  This
+  // method sets the class_name field of this function's initial map
+  // to a given value. It creates an initial map if this function does
+  // not have one. Note that this method does not copy the initial map
+  // if it has one already, but simply replaces it with the new value.
+  // Instances created afterwards will have a map whose [[class]] is
+  // set to 'value', but there is no guarantees on instances created
+  // before.
+  Object* SetInstanceClassName(String* name);
+
+  // Returns if this function has been compiled to native code yet.
+  inline bool is_compiled();
+
+  // Casting.
+  static inline JSFunction* cast(Object* obj);
+
+  // Dispatched behavior.
+#ifdef DEBUG
+  void JSFunctionPrint();
+  void JSFunctionVerify();
+#endif
+
+  // Returns the number of allocated literals.
+  inline int NumberOfLiterals();
+
+  // Retrieve the global context from a function's literal array.
+  static Context* GlobalContextFromLiterals(FixedArray* literals);
+
+  // Layout descriptors.
+  static const int kPrototypeOrInitialMapOffset = JSObject::kHeaderSize;
+  static const int kSharedFunctionInfoOffset =
+      kPrototypeOrInitialMapOffset + kPointerSize;
+  static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
+  static const int kLiteralsOffset = kContextOffset + kPointerSize;
+  static const int kSize = kLiteralsOffset + kPointerSize;
+
+  // Layout of the literals array.
+  static const int kLiteralsPrefixSize = 1;
+  static const int kLiteralGlobalContextIndex = 0;
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
+};
+
+
+// JSGlobalProxy's prototype must be a JSGlobalObject or null,
+// and the prototype is hidden. JSGlobalProxy always delegates
+// property accesses to its prototype if the prototype is not null.
+//
+// A JSGlobalProxy can be reinitialized which will preserve its identity.
+//
+// Accessing a JSGlobalProxy requires security check.
+
+class JSGlobalProxy : public JSObject {
+ public:
+  // [context]: the owner global context of this proxy object.
+  // It is null value if this object is not used by any context.
+  DECL_ACCESSORS(context, Object)
+
+  // Casting.
+  static inline JSGlobalProxy* cast(Object* obj);
+
+  // Dispatched behavior.
+#ifdef DEBUG
+  void JSGlobalProxyPrint();
+  void JSGlobalProxyVerify();
+#endif
+
+  // Layout description.
+  static const int kContextOffset = JSObject::kHeaderSize;
+  static const int kSize = kContextOffset + kPointerSize;
+
+ private:
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
+};
+
+
+// Forward declaration.
+class JSBuiltinsObject;
+
+// Common super class for JavaScript global objects and the special
+// builtins global objects.
+class GlobalObject: public JSObject {
+ public:
+  // [builtins]: the object holding the runtime routines written in JS.
+  DECL_ACCESSORS(builtins, JSBuiltinsObject)
+
+  // [global context]: the global context corresponding to this global object.
+  DECL_ACCESSORS(global_context, Context)
+
+  // [global receiver]: the global receiver object of the context
+  DECL_ACCESSORS(global_receiver, JSObject)
+
+  // Retrieve the property cell used to store a property.
+  Object* GetPropertyCell(LookupResult* result);
+
+  // Ensure that the global object has a cell for the given property name.
+  Object* EnsurePropertyCell(String* name);
+
+  // Casting.
+  static inline GlobalObject* cast(Object* obj);
+
+  // Layout description.
+  static const int kBuiltinsOffset = JSObject::kHeaderSize;
+  static const int kGlobalContextOffset = kBuiltinsOffset + kPointerSize;
+  static const int kGlobalReceiverOffset = kGlobalContextOffset + kPointerSize;
+  static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize;
+
+ private:
+  friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
+};
+
+
+// JavaScript global object.
+class JSGlobalObject: public GlobalObject {
+ public:
+
+  // Casting.
+  static inline JSGlobalObject* cast(Object* obj);
+
+  // Dispatched behavior.
+#ifdef DEBUG
+  void JSGlobalObjectPrint();
+  void JSGlobalObjectVerify();
+#endif
+
+  // Layout description.
+  static const int kSize = GlobalObject::kHeaderSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalObject);
+};
+
+
+// Builtins global object which holds the runtime routines written in
+// JavaScript.
+class JSBuiltinsObject: public GlobalObject {
+ public:
+  // Accessors for the runtime routines written in JavaScript.
+  inline Object* javascript_builtin(Builtins::JavaScript id);
+  inline void set_javascript_builtin(Builtins::JavaScript id, Object* value);
+
+  // Casting.
+  static inline JSBuiltinsObject* cast(Object* obj);
+
+  // Dispatched behavior.
+#ifdef DEBUG
+  void JSBuiltinsObjectPrint();
+  void JSBuiltinsObjectVerify();
+#endif
+
+  // Layout description.  The size of the builtins object includes
+  // room for one pointer per runtime routine written in javascript.
+  static const int kJSBuiltinsCount = Builtins::id_count;
+  static const int kJSBuiltinsOffset = GlobalObject::kHeaderSize;
+  static const int kSize =
+      kJSBuiltinsOffset + (kJSBuiltinsCount * kPointerSize);
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSBuiltinsObject);
+};
+
+
+// Representation for JS Wrapper objects, String, Number, Boolean, Date, etc.
+class JSValue: public JSObject {
+ public:
+  // [value]: the object being wrapped.
+  DECL_ACCESSORS(value, Object)
+
+  // Casting.
+  static inline JSValue* cast(Object* obj);
+
+  // Dispatched behavior.
+#ifdef DEBUG
+  void JSValuePrint();
+  void JSValueVerify();
+#endif
+
+  // Layout description.
+  static const int kValueOffset = JSObject::kHeaderSize;
+  static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSValue);
+};
+
+// Regular expressions
+// The regular expression holds a single reference to a FixedArray in
+// the kDataOffset field.
+// The FixedArray contains the following data:
+// - tag : type of regexp implementation (not compiled yet, atom or irregexp)
+// - reference to the original source string
+// - reference to the original flag string
+// If it is an atom regexp
+// - a reference to a literal string to search for
+// If it is an irregexp regexp:
+// - a reference to code for ASCII inputs (bytecode or compiled).
+// - a reference to code for UC16 inputs (bytecode or compiled).
+// - max number of registers used by irregexp implementations.
+// - number of capture registers (output values) of the regexp.
+class JSRegExp: public JSObject {
+ public:
+  // Meaning of Type:
+  // NOT_COMPILED: Initial value. No data has been stored in the JSRegExp yet.
+  // ATOM: A simple string to match against using an indexOf operation.
+  // IRREGEXP: Compiled with Irregexp.
+  // IRREGEXP_NATIVE: Compiled to native code with Irregexp.
+  enum Type { NOT_COMPILED, ATOM, IRREGEXP };
+  enum Flag { NONE = 0, GLOBAL = 1, IGNORE_CASE = 2, MULTILINE = 4 };
+
+  class Flags {
+   public:
+    explicit Flags(uint32_t value) : value_(value) { }
+    bool is_global() { return (value_ & GLOBAL) != 0; }
+    bool is_ignore_case() { return (value_ & IGNORE_CASE) != 0; }
+    bool is_multiline() { return (value_ & MULTILINE) != 0; }
+    uint32_t value() { return value_; }
+   private:
+    uint32_t value_;
+  };
+
+  DECL_ACCESSORS(data, Object)
+
+  inline Type TypeTag();
+  inline int CaptureCount();
+  inline Flags GetFlags();
+  inline String* Pattern();
+  inline Object* DataAt(int index);
+  // Set implementation data after the object has been prepared.
+  inline void SetDataAt(int index, Object* value);
+  static int code_index(bool is_ascii) {
+    if (is_ascii) {
+      return kIrregexpASCIICodeIndex;
+    } else {
+      return kIrregexpUC16CodeIndex;
+    }
+  }
+
+  static inline JSRegExp* cast(Object* obj);
+
+  // Dispatched behavior.
+#ifdef DEBUG
+  void JSRegExpVerify();
+#endif
+
+  static const int kDataOffset = JSObject::kHeaderSize;
+  static const int kSize = kDataOffset + kPointerSize;
+
+  // Indices in the data array.
+  static const int kTagIndex = 0;
+  static const int kSourceIndex = kTagIndex + 1;
+  static const int kFlagsIndex = kSourceIndex + 1;
+  static const int kDataIndex = kFlagsIndex + 1;
+  // The data fields are used in different ways depending on the
+  // value of the tag.
+  // Atom regexps (literal strings).
+  static const int kAtomPatternIndex = kDataIndex;
+
+  static const int kAtomDataSize = kAtomPatternIndex + 1;
+
+  // Irregexp compiled code or bytecode for ASCII. If compilation
+  // fails, this fields hold an exception object that should be
+  // thrown if the regexp is used again.
+  static const int kIrregexpASCIICodeIndex = kDataIndex;
+  // Irregexp compiled code or bytecode for UC16.  If compilation
+  // fails, this fields hold an exception object that should be
+  // thrown if the regexp is used again.
+  static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
+  // Maximal number of registers used by either ASCII or UC16.
+  // Only used to check that there is enough stack space
+  static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 2;
+  // Number of captures in the compiled regexp.
+  static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
+
+  static const int kIrregexpDataSize = kIrregexpCaptureCountIndex + 1;
+};
+
+
+class CompilationCacheShape {
+ public:
+  static inline bool IsMatch(HashTableKey* key, Object* value) {
+    return key->IsMatch(value);
+  }
+
+  static inline uint32_t Hash(HashTableKey* key) {
+    return key->Hash();
+  }
+
+  static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
+    return key->HashForObject(object);
+  }
+
+  static Object* AsObject(HashTableKey* key) {
+    return key->AsObject();
+  }
+
+  static const int kPrefixSize = 0;
+  static const int kEntrySize = 2;
+};
+
+class CompilationCacheTable: public HashTable<CompilationCacheShape,
+                                              HashTableKey*> {
+ public:
+  // Find cached value for a string key, otherwise return null.
+  Object* Lookup(String* src);
+  Object* LookupEval(String* src, Context* context);
+  Object* LookupRegExp(String* source, JSRegExp::Flags flags);
+  Object* Put(String* src, Object* value);
+  Object* PutEval(String* src, Context* context, Object* value);
+  Object* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
+
+  static inline CompilationCacheTable* cast(Object* obj);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheTable);
+};
+
+
+enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
+enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
+
+
+class StringHasher {
+ public:
+  inline StringHasher(int length);
+
+  // Returns true if the hash of this string can be computed without
+  // looking at the contents.
+  inline bool has_trivial_hash();
+
+  // Add a character to the hash and update the array index calculation.
+  inline void AddCharacter(uc32 c);
+
+  // Adds a character to the hash but does not update the array index
+  // calculation.  This can only be called when it has been verified
+  // that the input is not an array index.
+  inline void AddCharacterNoIndex(uc32 c);
+
+  // Returns the value to store in the hash field of a string with
+  // the given length and contents.
+  uint32_t GetHashField();
+
+  // Returns true if the characters seen so far make up a legal array
+  // index.
+  bool is_array_index() { return is_array_index_; }
+
+  bool is_valid() { return is_valid_; }
+
+  void invalidate() { is_valid_ = false; }
+
+ private:
+
+  uint32_t array_index() {
+    ASSERT(is_array_index());
+    return array_index_;
+  }
+
+  inline uint32_t GetHash();
+
+  int length_;
+  uint32_t raw_running_hash_;
+  uint32_t array_index_;
+  bool is_array_index_;
+  bool is_first_char_;
+  bool is_valid_;
+};
+
+
+// The characteristics of a string are stored in its map.  Retrieving these
+// few bits of information is moderately expensive, involving two memory
+// loads where the second is dependent on the first.  To improve efficiency
+// the shape of the string is given its own class so that it can be retrieved
+// once and used for several string operations.  A StringShape is small enough
+// to be passed by value and is immutable, but be aware that flattening a
+// string can potentially alter its shape.  Also be aware that a GC caused by
+// something else can alter the shape of a string due to ConsString
+// shortcutting.  Keeping these restrictions in mind has proven to be error-
+// prone and so we no longer put StringShapes in variables unless there is a
+// concrete performance benefit at that particular point in the code.
+class StringShape BASE_EMBEDDED {
+ public:
+  inline explicit StringShape(String* s);
+  inline explicit StringShape(Map* s);
+  inline explicit StringShape(InstanceType t);
+  inline bool IsSequential();
+  inline bool IsExternal();
+  inline bool IsCons();
+  inline bool IsSliced();
+  inline bool IsExternalAscii();
+  inline bool IsExternalTwoByte();
+  inline bool IsSequentialAscii();
+  inline bool IsSequentialTwoByte();
+  inline bool IsSymbol();
+  inline StringRepresentationTag representation_tag();
+  inline uint32_t full_representation_tag();
+  inline uint32_t size_tag();
+#ifdef DEBUG
+  inline uint32_t type() { return type_; }
+  inline void invalidate() { valid_ = false; }
+  inline bool valid() { return valid_; }
+#else
+  inline void invalidate() { }
+#endif
+ private:
+  uint32_t type_;
+#ifdef DEBUG
+  inline void set_valid() { valid_ = true; }
+  bool valid_;
+#else
+  inline void set_valid() { }
+#endif
+};
+
+
+// The String abstract class captures JavaScript string values:
+//
+// Ecma-262:
+//  4.3.16 String Value
+//    A string value is a member of the type String and is a finite
+//    ordered sequence of zero or more 16-bit unsigned integer values.
+//
+// All string values have a length field.
+class String: public HeapObject {
+ public:
+  // Get and set the length of the string.
+  inline int length();
+  inline void set_length(int value);
+
+  // Get and set the uninterpreted length field of the string.  Notice
+  // that the length field is also used to cache the hash value of
+  // strings.  In order to get or set the actual length of the string
+  // use the length() and set_length methods.
+  inline uint32_t length_field();
+  inline void set_length_field(uint32_t value);
+
+  inline bool IsAsciiRepresentation();
+  inline bool IsTwoByteRepresentation();
+
+  // Get and set individual two byte chars in the string.
+  inline void Set(int index, uint16_t value);
+  // Get individual two byte char in the string.  Repeated calls
+  // to this method are not efficient unless the string is flat.
+  inline uint16_t Get(int index);
+
+  // Try to flatten the top level ConsString that is hiding behind this
+  // string.  This is a no-op unless the string is a ConsString or a
+  // SlicedString.  Flatten mutates the ConsString and might return a
+  // failure.
+  Object* TryFlatten();
+
+  // Try to flatten the string.  Checks first inline to see if it is necessary.
+  // Do not handle allocation failures.  After calling TryFlattenIfNotFlat, the
+  // string could still be a ConsString, in which case a failure is returned.
+  // Use FlattenString from Handles.cc to be sure to flatten.
+  inline Object* TryFlattenIfNotFlat();
+
+  Vector<const char> ToAsciiVector();
+  Vector<const uc16> ToUC16Vector();
+
+  // Mark the string as an undetectable object. It only applies to
+  // ascii and two byte string types.
+  bool MarkAsUndetectable();
+
+  // Slice the string and return a substring.
+  Object* Slice(int from, int to);
+
+  // String equality operations.
+  inline bool Equals(String* other);
+  bool IsEqualTo(Vector<const char> str);
+
+  // Return a UTF8 representation of the string.  The string is null
+  // terminated but may optionally contain nulls.  Length is returned
+  // in length_output if length_output is not a null pointer  The string
+  // should be nearly flat, otherwise the performance of this method may
+  // be very slow (quadratic in the length).  Setting robustness_flag to
+  // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust  This means it
+  // handles unexpected data without causing assert failures and it does not
+  // do any heap allocations.  This is useful when printing stack traces.
+  SmartPointer<char> ToCString(AllowNullsFlag allow_nulls,
+                               RobustnessFlag robustness_flag,
+                               int offset,
+                               int length,
+                               int* length_output = 0);
+  SmartPointer<char> ToCString(
+      AllowNullsFlag allow_nulls = DISALLOW_NULLS,
+      RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
+      int* length_output = 0);
+
+  int Utf8Length();
+
+  // Return a 16 bit Unicode representation of the string.
+  // The string should be nearly flat, otherwise the performance of
+  // of this method may be very bad.  Setting robustness_flag to
+  // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust  This means it
+  // handles unexpected data without causing assert failures and it does not
+  // do any heap allocations.  This is useful when printing stack traces.
+  SmartPointer<uc16> ToWideCString(
+      RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL);
+
+  // Tells whether the hash code has been computed.
+  inline bool HasHashCode();
+
+  // Returns a hash value used for the property table
+  inline uint32_t Hash();
+
+  static uint32_t ComputeLengthAndHashField(unibrow::CharacterStream* buffer,
+                                            int length);
+
+  static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
+                                uint32_t* index,
+                                int length);
+
+  // Externalization.
+  bool MakeExternal(v8::String::ExternalStringResource* resource);
+  bool MakeExternal(v8::String::ExternalAsciiStringResource* resource);
+
+  // Conversion.
+  inline bool AsArrayIndex(uint32_t* index);
+
+  // Casting.
+  static inline String* cast(Object* obj);
+
+  void PrintOn(FILE* out);
+
+  // For use during stack traces.  Performs rudimentary sanity check.
+  bool LooksValid();
+
+  // Dispatched behavior.
+  void StringShortPrint(StringStream* accumulator);
+#ifdef DEBUG
+  void StringPrint();
+  void StringVerify();
+#endif
+  inline bool IsFlat();
+
+  // Layout description.
+  static const int kLengthOffset = HeapObject::kHeaderSize;
+  static const int kSize = kLengthOffset + kIntSize;
+  // Notice: kSize is not pointer-size aligned if pointers are 64-bit.
+
+  // Limits on sizes of different types of strings.
+  static const int kMaxShortStringSize = 63;
+  static const int kMaxMediumStringSize = 16383;
+
+  static const int kMaxArrayIndexSize = 10;
+
+  // Max ascii char code.
+  static const int kMaxAsciiCharCode = unibrow::Utf8::kMaxOneByteChar;
+  static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
+  static const int kMaxUC16CharCode = 0xffff;
+
+  // Minimum length for a cons or sliced string.
+  static const int kMinNonFlatLength = 13;
+
+  // Mask constant for checking if a string has a computed hash code
+  // and if it is an array index.  The least significant bit indicates
+  // whether a hash code has been computed.  If the hash code has been
+  // computed the 2nd bit tells whether the string can be used as an
+  // array index.
+  static const int kHashComputedMask = 1;
+  static const int kIsArrayIndexMask = 1 << 1;
+  static const int kNofLengthBitFields = 2;
+
+  // Array index strings this short can keep their index in the hash
+  // field.
+  static const int kMaxCachedArrayIndexLength = 7;
+
+  // Shift constants for retriving length and hash code from
+  // length/hash field.
+  static const int kHashShift = kNofLengthBitFields;
+  static const int kShortLengthShift = kHashShift + kShortStringTag;
+  static const int kMediumLengthShift = kHashShift + kMediumStringTag;
+  static const int kLongLengthShift = kHashShift + kLongStringTag;
+
+  // Limit for truncation in short printing.
+  static const int kMaxShortPrintLength = 1024;
+
+  // Support for regular expressions.
+  const uc16* GetTwoByteData();
+  const uc16* GetTwoByteData(unsigned start);
+
+  // Support for StringInputBuffer
+  static const unibrow::byte* ReadBlock(String* input,
+                                        unibrow::byte* util_buffer,
+                                        unsigned capacity,
+                                        unsigned* remaining,
+                                        unsigned* offset);
+  static const unibrow::byte* ReadBlock(String** input,
+                                        unibrow::byte* util_buffer,
+                                        unsigned capacity,
+                                        unsigned* remaining,
+                                        unsigned* offset);
+
+  // Helper function for flattening strings.
+  template <typename sinkchar>
+  static void WriteToFlat(String* source,
+                          sinkchar* sink,
+                          int from,
+                          int to);
+
+ protected:
+  class ReadBlockBuffer {
+   public:
+    ReadBlockBuffer(unibrow::byte* util_buffer_,
+                    unsigned cursor_,
+                    unsigned capacity_,
+                    unsigned remaining_) :
+      util_buffer(util_buffer_),
+      cursor(cursor_),
+      capacity(capacity_),
+      remaining(remaining_) {
+    }
+    unibrow::byte* util_buffer;
+    unsigned       cursor;
+    unsigned       capacity;
+    unsigned       remaining;
+  };
+
+  // NOTE: If you call StringInputBuffer routines on strings that are
+  // too deeply nested trees of cons and slice strings, then this
+  // routine will overflow the stack. Strings that are merely deeply
+  // nested trees of cons strings do not have a problem apart from
+  // performance.
+
+  static inline const unibrow::byte* ReadBlock(String* input,
+                                               ReadBlockBuffer* buffer,
+                                               unsigned* offset,
+                                               unsigned max_chars);
+  static void ReadBlockIntoBuffer(String* input,
+                                  ReadBlockBuffer* buffer,
+                                  unsigned* offset_ptr,
+                                  unsigned max_chars);
+
+ private:
+  // Slow case of String::Equals.  This implementation works on any strings
+  // but it is most efficient on strings that are almost flat.
+  bool SlowEquals(String* other);
+
+  // Slow case of AsArrayIndex.
+  bool SlowAsArrayIndex(uint32_t* index);
+
+  // Compute and set the hash code.
+  uint32_t ComputeAndSetHash();
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(String);
+};
+
+
+// The SeqString abstract class captures sequential string values.
+class SeqString: public String {
+ public:
+
+  // Casting.
+  static inline SeqString* cast(Object* obj);
+
+  // Dispatched behaviour.
+  // For regexp code.
+  uint16_t* SeqStringGetTwoByteAddress();
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
+};
+
+
+// The AsciiString class captures sequential ascii string objects.
+// Each character in the AsciiString is an ascii character.
+class SeqAsciiString: public SeqString {
+ public:
+  // Dispatched behavior.
+  inline uint16_t SeqAsciiStringGet(int index);
+  inline void SeqAsciiStringSet(int index, uint16_t value);
+
+  // Get the address of the characters in this string.
+  inline Address GetCharsAddress();
+
+  inline char* GetChars();
+
+  // Casting
+  static inline SeqAsciiString* cast(Object* obj);
+
+  // Garbage collection support.  This method is called by the
+  // garbage collector to compute the actual size of an AsciiString
+  // instance.
+  inline int SeqAsciiStringSize(InstanceType instance_type);
+
+  // Computes the size for an AsciiString instance of a given length.
+  static int SizeFor(int length) {
+    return OBJECT_SIZE_ALIGN(kHeaderSize + length * kCharSize);
+  }
+
+  // Layout description.
+  static const int kHeaderSize = String::kSize;
+  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
+
+  // Support for StringInputBuffer.
+  inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+                                                unsigned* offset,
+                                                unsigned chars);
+  inline const unibrow::byte* SeqAsciiStringReadBlock(unsigned* remaining,
+                                                      unsigned* offset,
+                                                      unsigned chars);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString);
+};
+
+
+// The TwoByteString class captures sequential unicode string objects.
+// Each character in the TwoByteString is a two-byte uint16_t.
+class SeqTwoByteString: public SeqString {
+ public:
+  // Dispatched behavior.
+  inline uint16_t SeqTwoByteStringGet(int index);
+  inline void SeqTwoByteStringSet(int index, uint16_t value);
+
+  // Get the address of the characters in this string.
+  inline Address GetCharsAddress();
+
+  inline uc16* GetChars();
+
+  // For regexp code.
+  const uint16_t* SeqTwoByteStringGetData(unsigned start);
+
+  // Casting
+  static inline SeqTwoByteString* cast(Object* obj);
+
+  // Garbage collection support.  This method is called by the
+  // garbage collector to compute the actual size of a TwoByteString
+  // instance.
+  inline int SeqTwoByteStringSize(InstanceType instance_type);
+
+  // Computes the size for a TwoByteString instance of a given length.
+  static int SizeFor(int length) {
+    return OBJECT_SIZE_ALIGN(kHeaderSize + length * kShortSize);
+  }
+
+  // Layout description.
+  static const int kHeaderSize = String::kSize;
+  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
+
+  // Support for StringInputBuffer.
+  inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+                                                  unsigned* offset_ptr,
+                                                  unsigned chars);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
+};
+
+
+// The ConsString class describes string values built by using the
+// addition operator on strings.  A ConsString is a pair where the
+// first and second components are pointers to other string values.
+// One or both components of a ConsString can be pointers to other
+// ConsStrings, creating a binary tree of ConsStrings where the leaves
+// are non-ConsString string values.  The string value represented by
+// a ConsString can be obtained by concatenating the leaf string
+// values in a left-to-right depth-first traversal of the tree.
+class ConsString: public String {
+ public:
+  // First string of the cons cell.
+  inline String* first();
+  // Doesn't check that the result is a string, even in debug mode.  This is
+  // useful during GC where the mark bits confuse the checks.
+  inline Object* unchecked_first();
+  inline void set_first(String* first,
+                        WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+  // Second string of the cons cell.
+  inline String* second();
+  // Doesn't check that the result is a string, even in debug mode.  This is
+  // useful during GC where the mark bits confuse the checks.
+  inline Object* unchecked_second();
+  inline void set_second(String* second,
+                         WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+  // Dispatched behavior.
+  uint16_t ConsStringGet(int index);
+
+  // Casting.
+  static inline ConsString* cast(Object* obj);
+
+  // Garbage collection support.  This method is called during garbage
+  // collection to iterate through the heap pointers in the body of
+  // the ConsString.
+  void ConsStringIterateBody(ObjectVisitor* v);
+
+  // Layout description.
+  static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize);
+  static const int kSecondOffset = kFirstOffset + kPointerSize;
+  static const int kSize = kSecondOffset + kPointerSize;
+
+  // Support for StringInputBuffer.
+  inline const unibrow::byte* ConsStringReadBlock(ReadBlockBuffer* buffer,
+                                                  unsigned* offset_ptr,
+                                                  unsigned chars);
+  inline void ConsStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+                                            unsigned* offset_ptr,
+                                            unsigned chars);
+
+  // Minimum length for a cons string.
+  static const int kMinLength = 13;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
+};
+
+
+// The SlicedString class describes string values that are slices of
+// some other string.  SlicedStrings consist of a reference to an
+// underlying heap-allocated string value, a start index, and the
+// length field common to all strings.
+class SlicedString: public String {
+ public:
+  // The underlying string buffer.
+  inline String* buffer();
+  inline void set_buffer(String* buffer);
+
+  // The start index of the slice.
+  inline int start();
+  inline void set_start(int start);
+
+  // Dispatched behavior.
+  uint16_t SlicedStringGet(int index);
+
+  // Casting.
+  static inline SlicedString* cast(Object* obj);
+
+  // Garbage collection support.
+  void SlicedStringIterateBody(ObjectVisitor* v);
+
+  // Layout description
+#if V8_HOST_ARCH_64_BIT
+  // Optimizations expect buffer to be located at same offset as a ConsString's
+  // first substring. In 64 bit mode we have room for the start offset before
+  // the buffer.
+  static const int kStartOffset = String::kSize;
+  static const int kBufferOffset = kStartOffset + kIntSize;
+  static const int kSize = kBufferOffset + kPointerSize;
+#else
+  static const int kBufferOffset = String::kSize;
+  static const int kStartOffset = kBufferOffset + kPointerSize;
+  static const int kSize = kStartOffset + kIntSize;
+#endif
+
+  // Support for StringInputBuffer.
+  inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
+                                                    unsigned* offset_ptr,
+                                                    unsigned chars);
+  inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+                                              unsigned* offset_ptr,
+                                              unsigned chars);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
+};
+
+
+// The ExternalString class describes string values that are backed by
+// a string resource that lies outside the V8 heap.  ExternalStrings
+// consist of the length field common to all strings, a pointer to the
+// external resource.  It is important to ensure (externally) that the
+// resource is not deallocated while the ExternalString is live in the
+// V8 heap.
+//
+// The API expects that all ExternalStrings are created through the
+// API.  Therefore, ExternalStrings should not be used internally.
+class ExternalString: public String {
+ public:
+  // Casting
+  static inline ExternalString* cast(Object* obj);
+
+  // Layout description.
+  static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
+  static const int kSize = kResourceOffset + kPointerSize;
+
+  STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString);
+};
+
+
+// The ExternalAsciiString class is an external string backed by an
+// ASCII string.
+class ExternalAsciiString: public ExternalString {
+ public:
+  typedef v8::String::ExternalAsciiStringResource Resource;
+
+  // The underlying resource.
+  inline Resource* resource();
+  inline void set_resource(Resource* buffer);
+
+  // Dispatched behavior.
+  uint16_t ExternalAsciiStringGet(int index);
+
+  // Casting.
+  static inline ExternalAsciiString* cast(Object* obj);
+
+  // Support for StringInputBuffer.
+  const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining,
+                                                    unsigned* offset,
+                                                    unsigned chars);
+  inline void ExternalAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+                                                     unsigned* offset,
+                                                     unsigned chars);
+
+  // Identify the map for the external string/symbol with a particular length.
+  static inline Map* StringMap(int length);
+  static inline Map* SymbolMap(int length);
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString);
+};
+
+
+// The ExternalTwoByteString class is an external string backed by a UTF-16
+// encoded string.
+class ExternalTwoByteString: public ExternalString {
+ public:
+  typedef v8::String::ExternalStringResource Resource;
+
+  // The underlying string resource.
+  inline Resource* resource();
+  inline void set_resource(Resource* buffer);
+
+  // Dispatched behavior.
+  uint16_t ExternalTwoByteStringGet(int index);
+
+  // For regexp code.
+  const uint16_t* ExternalTwoByteStringGetData(unsigned start);
+
+  // Casting.
+  static inline ExternalTwoByteString* cast(Object* obj);
+
+  // Support for StringInputBuffer.
+  void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+                                                unsigned* offset_ptr,
+                                                unsigned chars);
+
+  // Identify the map for the external string/symbol with a particular length.
+  static inline Map* StringMap(int length);
+  static inline Map* SymbolMap(int length);
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
+};
+
+
+// Utility superclass for stack-allocated objects that must be updated
+// on gc.  It provides two ways for the gc to update instances, either
+// iterating or updating after gc.
+class Relocatable BASE_EMBEDDED {
+ public:
+  inline Relocatable() : prev_(top_) { top_ = this; }
+  virtual ~Relocatable() {
+    ASSERT_EQ(top_, this);
+    top_ = prev_;
+  }
+  virtual void IterateInstance(ObjectVisitor* v) { }
+  virtual void PostGarbageCollection() { }
+
+  static void PostGarbageCollectionProcessing();
+  static int ArchiveSpacePerThread();
+  static char* ArchiveState(char* to);
+  static char* RestoreState(char* from);
+  static void Iterate(ObjectVisitor* v);
+  static void Iterate(ObjectVisitor* v, Relocatable* top);
+  static char* Iterate(ObjectVisitor* v, char* t);
+ private:
+  static Relocatable* top_;
+  Relocatable* prev_;
+};
+
+
+// A flat string reader provides random access to the contents of a
+// string independent of the character width of the string.  The handle
+// must be valid as long as the reader is being used.
+class FlatStringReader : public Relocatable {
+ public:
+  explicit FlatStringReader(Handle<String> str);
+  explicit FlatStringReader(Vector<const char> input);
+  void PostGarbageCollection();
+  inline uc32 Get(int index);
+  int length() { return length_; }
+ private:
+  String** str_;
+  bool is_ascii_;
+  int length_;
+  const void* start_;
+};
+
+
+// Note that StringInputBuffers are not valid across a GC!  To fix this
+// it would have to store a String Handle instead of a String* and
+// AsciiStringReadBlock would have to be modified to use memcpy.
+//
+// StringInputBuffer is able to traverse any string regardless of how
+// deeply nested a sequence of ConsStrings it is made of.  However,
+// performance will be better if deep strings are flattened before they
+// are traversed.  Since flattening requires memory allocation this is
+// not always desirable, however (esp. in debugging situations).
+class StringInputBuffer: public unibrow::InputBuffer<String, String*, 1024> {
+ public:
+  virtual void Seek(unsigned pos);
+  inline StringInputBuffer(): unibrow::InputBuffer<String, String*, 1024>() {}
+  inline StringInputBuffer(String* backing):
+      unibrow::InputBuffer<String, String*, 1024>(backing) {}
+};
+
+
+class SafeStringInputBuffer
+  : public unibrow::InputBuffer<String, String**, 256> {
+ public:
+  virtual void Seek(unsigned pos);
+  inline SafeStringInputBuffer()
+      : unibrow::InputBuffer<String, String**, 256>() {}
+  inline SafeStringInputBuffer(String** backing)
+      : unibrow::InputBuffer<String, String**, 256>(backing) {}
+};
+
+
+template <typename T>
+class VectorIterator {
+ public:
+  VectorIterator(T* d, int l) : data_(Vector<const T>(d, l)), index_(0) { }
+  explicit VectorIterator(Vector<const T> data) : data_(data), index_(0) { }
+  T GetNext() { return data_[index_++]; }
+  bool has_more() { return index_ < data_.length(); }
+ private:
+  Vector<const T> data_;
+  int index_;
+};
+
+
+// The Oddball describes objects null, undefined, true, and false.
+class Oddball: public HeapObject {
+ public:
+  // [to_string]: Cached to_string computed at startup.
+  DECL_ACCESSORS(to_string, String)
+
+  // [to_number]: Cached to_number computed at startup.
+  DECL_ACCESSORS(to_number, Object)
+
+  // Casting.
+  static inline Oddball* cast(Object* obj);
+
+  // Dispatched behavior.
+  void OddballIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+  void OddballVerify();
+#endif
+
+  // Initialize the fields.
+  Object* Initialize(const char* to_string, Object* to_number);
+
+  // Layout description.
+  static const int kToStringOffset = HeapObject::kHeaderSize;
+  static const int kToNumberOffset = kToStringOffset + kPointerSize;
+  static const int kSize = kToNumberOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Oddball);
+};
+
+
+class JSGlobalPropertyCell: public HeapObject {
+ public:
+  // [value]: value of the global property.
+  DECL_ACCESSORS(value, Object)
+
+  // Casting.
+  static inline JSGlobalPropertyCell* cast(Object* obj);
+
+  // Dispatched behavior.
+  void JSGlobalPropertyCellIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+  void JSGlobalPropertyCellVerify();
+  void JSGlobalPropertyCellPrint();
+#endif
+
+  // Layout description.
+  static const int kValueOffset = HeapObject::kHeaderSize;
+  static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
+};
+
+
+
+// Proxy describes objects pointing from JavaScript to C structures.
+// Since they cannot contain references to JS HeapObjects they can be
+// placed in old_data_space.
+class Proxy: public HeapObject {
+ public:
+  // [proxy]: field containing the address.
+  inline Address proxy();
+  inline void set_proxy(Address value);
+
+  // Casting.
+  static inline Proxy* cast(Object* obj);
+
+  // Dispatched behavior.
+  inline void ProxyIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+  void ProxyPrint();
+  void ProxyVerify();
+#endif
+
+  // Layout description.
+
+  static const int kProxyOffset = HeapObject::kHeaderSize;
+  static const int kSize = kProxyOffset + kPointerSize;
+
+  STATIC_CHECK(kProxyOffset == Internals::kProxyProxyOffset);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy);
+};
+
+
+// The JSArray describes JavaScript Arrays
+//  Such an array can be in one of two modes:
+//    - fast, backing storage is a FixedArray and length <= elements.length();
+//       Please note: push and pop can be used to grow and shrink the array.
+//    - slow, backing storage is a HashTable with numbers as keys.
+class JSArray: public JSObject {
+ public:
+  // [length]: The length property.
+  DECL_ACCESSORS(length, Object)
+
+  Object* JSArrayUpdateLengthFromIndex(uint32_t index, Object* value);
+
+  // Initialize the array with the given capacity. The function may
+  // fail due to out-of-memory situations, but only if the requested
+  // capacity is non-zero.
+  Object* Initialize(int capacity);
+
+  // Set the content of the array to the content of storage.
+  inline void SetContent(FixedArray* storage);
+
+  // Casting.
+  static inline JSArray* cast(Object* obj);
+
+  // Uses handles.  Ensures that the fixed array backing the JSArray has at
+  // least the stated size.
+  inline void EnsureSize(int minimum_size_of_backing_fixed_array);
+
+  // Dispatched behavior.
+#ifdef DEBUG
+  void JSArrayPrint();
+  void JSArrayVerify();
+#endif
+
+  // Number of element slots to pre-allocate for an empty array.
+  static const int kPreallocatedArrayElements = 4;
+
+  // Layout description.
+  static const int kLengthOffset = JSObject::kHeaderSize;
+  static const int kSize = kLengthOffset + kPointerSize;
+
+ private:
+  // Expand the fixed array backing of a fast-case JSArray to at least
+  // the requested size.
+  void Expand(int minimum_size_of_backing_fixed_array);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
+};
+
+
+// An accessor must have a getter, but can have no setter.
+//
+// When setting a property, V8 searches accessors in prototypes.
+// If an accessor was found and it does not have a setter,
+// the request is ignored.
+//
+// If the accessor in the prototype has the READ_ONLY property attribute, then
+// a new value is added to the local object when the property is set.
+// This shadows the accessor in the prototype.
+class AccessorInfo: public Struct {
+ public:
+  DECL_ACCESSORS(getter, Object)
+  DECL_ACCESSORS(setter, Object)
+  DECL_ACCESSORS(data, Object)
+  DECL_ACCESSORS(name, Object)
+  DECL_ACCESSORS(flag, Smi)
+
+  inline bool all_can_read();
+  inline void set_all_can_read(bool value);
+
+  inline bool all_can_write();
+  inline void set_all_can_write(bool value);
+
+  inline bool prohibits_overwriting();
+  inline void set_prohibits_overwriting(bool value);
+
+  inline PropertyAttributes property_attributes();
+  inline void set_property_attributes(PropertyAttributes attributes);
+
+  static inline AccessorInfo* cast(Object* obj);
+
+#ifdef DEBUG
+  void AccessorInfoPrint();
+  void AccessorInfoVerify();
+#endif
+
+  static const int kGetterOffset = HeapObject::kHeaderSize;
+  static const int kSetterOffset = kGetterOffset + kPointerSize;
+  static const int kDataOffset = kSetterOffset + kPointerSize;
+  static const int kNameOffset = kDataOffset + kPointerSize;
+  static const int kFlagOffset = kNameOffset + kPointerSize;
+  static const int kSize = kFlagOffset + kPointerSize;
+
+ private:
+  // Bit positions in flag.
+  static const int kAllCanReadBit = 0;
+  static const int kAllCanWriteBit = 1;
+  static const int kProhibitsOverwritingBit = 2;
+  class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
+};
+
+
+class AccessCheckInfo: public Struct {
+ public:
+  DECL_ACCESSORS(named_callback, Object)
+  DECL_ACCESSORS(indexed_callback, Object)
+  DECL_ACCESSORS(data, Object)
+
+  static inline AccessCheckInfo* cast(Object* obj);
+
+#ifdef DEBUG
+  void AccessCheckInfoPrint();
+  void AccessCheckInfoVerify();
+#endif
+
+  static const int kNamedCallbackOffset   = HeapObject::kHeaderSize;
+  static const int kIndexedCallbackOffset = kNamedCallbackOffset + kPointerSize;
+  static const int kDataOffset = kIndexedCallbackOffset + kPointerSize;
+  static const int kSize = kDataOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AccessCheckInfo);
+};
+
+
+class InterceptorInfo: public Struct {
+ public:
+  DECL_ACCESSORS(getter, Object)
+  DECL_ACCESSORS(setter, Object)
+  DECL_ACCESSORS(query, Object)
+  DECL_ACCESSORS(deleter, Object)
+  DECL_ACCESSORS(enumerator, Object)
+  DECL_ACCESSORS(data, Object)
+
+  static inline InterceptorInfo* cast(Object* obj);
+
+#ifdef DEBUG
+  void InterceptorInfoPrint();
+  void InterceptorInfoVerify();
+#endif
+
+  static const int kGetterOffset = HeapObject::kHeaderSize;
+  static const int kSetterOffset = kGetterOffset + kPointerSize;
+  static const int kQueryOffset = kSetterOffset + kPointerSize;
+  static const int kDeleterOffset = kQueryOffset + kPointerSize;
+  static const int kEnumeratorOffset = kDeleterOffset + kPointerSize;
+  static const int kDataOffset = kEnumeratorOffset + kPointerSize;
+  static const int kSize = kDataOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
+};
+
+
+class CallHandlerInfo: public Struct {
+ public:
+  DECL_ACCESSORS(callback, Object)
+  DECL_ACCESSORS(data, Object)
+
+  static inline CallHandlerInfo* cast(Object* obj);
+
+#ifdef DEBUG
+  void CallHandlerInfoPrint();
+  void CallHandlerInfoVerify();
+#endif
+
+  static const int kCallbackOffset = HeapObject::kHeaderSize;
+  static const int kDataOffset = kCallbackOffset + kPointerSize;
+  static const int kSize = kDataOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CallHandlerInfo);
+};
+
+
+class TemplateInfo: public Struct {
+ public:
+  DECL_ACCESSORS(tag, Object)
+  DECL_ACCESSORS(property_list, Object)
+
+#ifdef DEBUG
+  void TemplateInfoVerify();
+#endif
+
+  static const int kTagOffset          = HeapObject::kHeaderSize;
+  static const int kPropertyListOffset = kTagOffset + kPointerSize;
+  static const int kHeaderSize         = kPropertyListOffset + kPointerSize;
+ protected:
+  friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
+};
+
+
+class FunctionTemplateInfo: public TemplateInfo {
+ public:
+  DECL_ACCESSORS(serial_number, Object)
+  DECL_ACCESSORS(call_code, Object)
+  DECL_ACCESSORS(property_accessors, Object)
+  DECL_ACCESSORS(prototype_template, Object)
+  DECL_ACCESSORS(parent_template, Object)
+  DECL_ACCESSORS(named_property_handler, Object)
+  DECL_ACCESSORS(indexed_property_handler, Object)
+  DECL_ACCESSORS(instance_template, Object)
+  DECL_ACCESSORS(class_name, Object)
+  DECL_ACCESSORS(signature, Object)
+  DECL_ACCESSORS(instance_call_handler, Object)
+  DECL_ACCESSORS(access_check_info, Object)
+  DECL_ACCESSORS(flag, Smi)
+
+  // Following properties use flag bits.
+  DECL_BOOLEAN_ACCESSORS(hidden_prototype)
+  DECL_BOOLEAN_ACCESSORS(undetectable)
+  // If the bit is set, object instances created by this function
+  // requires access check.
+  DECL_BOOLEAN_ACCESSORS(needs_access_check)
+
+  static inline FunctionTemplateInfo* cast(Object* obj);
+
+#ifdef DEBUG
+  void FunctionTemplateInfoPrint();
+  void FunctionTemplateInfoVerify();
+#endif
+
+  static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
+  static const int kCallCodeOffset = kSerialNumberOffset + kPointerSize;
+  static const int kPropertyAccessorsOffset = kCallCodeOffset + kPointerSize;
+  static const int kPrototypeTemplateOffset =
+      kPropertyAccessorsOffset + kPointerSize;
+  static const int kParentTemplateOffset =
+      kPrototypeTemplateOffset + kPointerSize;
+  static const int kNamedPropertyHandlerOffset =
+      kParentTemplateOffset + kPointerSize;
+  static const int kIndexedPropertyHandlerOffset =
+      kNamedPropertyHandlerOffset + kPointerSize;
+  static const int kInstanceTemplateOffset =
+      kIndexedPropertyHandlerOffset + kPointerSize;
+  static const int kClassNameOffset = kInstanceTemplateOffset + kPointerSize;
+  static const int kSignatureOffset = kClassNameOffset + kPointerSize;
+  static const int kInstanceCallHandlerOffset = kSignatureOffset + kPointerSize;
+  static const int kAccessCheckInfoOffset =
+      kInstanceCallHandlerOffset + kPointerSize;
+  static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
+  static const int kSize = kFlagOffset + kPointerSize;
+
+ private:
+  // Bit position in the flag, from least significant bit position.
+  static const int kHiddenPrototypeBit   = 0;
+  static const int kUndetectableBit      = 1;
+  static const int kNeedsAccessCheckBit  = 2;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
+};
+
+
+class ObjectTemplateInfo: public TemplateInfo {
+ public:
+  DECL_ACCESSORS(constructor, Object)
+  DECL_ACCESSORS(internal_field_count, Object)
+
+  static inline ObjectTemplateInfo* cast(Object* obj);
+
+#ifdef DEBUG
+  void ObjectTemplateInfoPrint();
+  void ObjectTemplateInfoVerify();
+#endif
+
+  static const int kConstructorOffset = TemplateInfo::kHeaderSize;
+  static const int kInternalFieldCountOffset =
+      kConstructorOffset + kPointerSize;
+  static const int kSize = kInternalFieldCountOffset + kPointerSize;
+};
+
+
+class SignatureInfo: public Struct {
+ public:
+  DECL_ACCESSORS(receiver, Object)
+  DECL_ACCESSORS(args, Object)
+
+  static inline SignatureInfo* cast(Object* obj);
+
+#ifdef DEBUG
+  void SignatureInfoPrint();
+  void SignatureInfoVerify();
+#endif
+
+  static const int kReceiverOffset = Struct::kHeaderSize;
+  static const int kArgsOffset     = kReceiverOffset + kPointerSize;
+  static const int kSize           = kArgsOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(SignatureInfo);
+};
+
+
+class TypeSwitchInfo: public Struct {
+ public:
+  DECL_ACCESSORS(types, Object)
+
+  static inline TypeSwitchInfo* cast(Object* obj);
+
+#ifdef DEBUG
+  void TypeSwitchInfoPrint();
+  void TypeSwitchInfoVerify();
+#endif
+
+  static const int kTypesOffset = Struct::kHeaderSize;
+  static const int kSize        = kTypesOffset + kPointerSize;
+};
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// The DebugInfo class holds additional information for a function being
+// debugged.
+class DebugInfo: public Struct {
+ public:
+  // The shared function info for the source being debugged.
+  DECL_ACCESSORS(shared, SharedFunctionInfo)
+  // Code object for the original code.
+  DECL_ACCESSORS(original_code, Code)
+  // Code object for the patched code. This code object is the code object
+  // currently active for the function.
+  DECL_ACCESSORS(code, Code)
+  // Fixed array holding status information for each active break point.
+  DECL_ACCESSORS(break_points, FixedArray)
+
+  // Check if there is a break point at a code position.
+  bool HasBreakPoint(int code_position);
+  // Get the break point info object for a code position.
+  Object* GetBreakPointInfo(int code_position);
+  // Clear a break point.
+  static void ClearBreakPoint(Handle<DebugInfo> debug_info,
+                              int code_position,
+                              Handle<Object> break_point_object);
+  // Set a break point.
+  static void SetBreakPoint(Handle<DebugInfo> debug_info, int code_position,
+                            int source_position, int statement_position,
+                            Handle<Object> break_point_object);
+  // Get the break point objects for a code position.
+  Object* GetBreakPointObjects(int code_position);
+  // Find the break point info holding this break point object.
+  static Object* FindBreakPointInfo(Handle<DebugInfo> debug_info,
+                                    Handle<Object> break_point_object);
+  // Get the number of break points for this function.
+  int GetBreakPointCount();
+
+  static inline DebugInfo* cast(Object* obj);
+
+#ifdef DEBUG
+  void DebugInfoPrint();
+  void DebugInfoVerify();
+#endif
+
+  static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
+  static const int kOriginalCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
+  static const int kPatchedCodeIndex = kOriginalCodeIndex + kPointerSize;
+  static const int kActiveBreakPointsCountIndex =
+      kPatchedCodeIndex + kPointerSize;
+  static const int kBreakPointsStateIndex =
+      kActiveBreakPointsCountIndex + kPointerSize;
+  static const int kSize = kBreakPointsStateIndex + kPointerSize;
+
+ private:
+  static const int kNoBreakPointInfo = -1;
+
+  // Lookup the index in the break_points array for a code position.
+  int GetBreakPointInfoIndex(int code_position);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(DebugInfo);
+};
+
+
+// The BreakPointInfo class holds information for break points set in a
+// function. The DebugInfo object holds a BreakPointInfo object for each code
+// position with one or more break points.
+class BreakPointInfo: public Struct {
+ public:
+  // The position in the code for the break point.
+  DECL_ACCESSORS(code_position, Smi)
+  // The position in the source for the break position.
+  DECL_ACCESSORS(source_position, Smi)
+  // The position in the source for the last statement before this break
+  // position.
+  DECL_ACCESSORS(statement_position, Smi)
+  // List of related JavaScript break points.
+  DECL_ACCESSORS(break_point_objects, Object)
+
+  // Removes a break point.
+  static void ClearBreakPoint(Handle<BreakPointInfo> info,
+                              Handle<Object> break_point_object);
+  // Set a break point.
+  static void SetBreakPoint(Handle<BreakPointInfo> info,
+                            Handle<Object> break_point_object);
+  // Check if break point info has this break point object.
+  static bool HasBreakPointObject(Handle<BreakPointInfo> info,
+                                  Handle<Object> break_point_object);
+  // Get the number of break points for this code position.
+  int GetBreakPointCount();
+
+  static inline BreakPointInfo* cast(Object* obj);
+
+#ifdef DEBUG
+  void BreakPointInfoPrint();
+  void BreakPointInfoVerify();
+#endif
+
+  static const int kCodePositionIndex = Struct::kHeaderSize;
+  static const int kSourcePositionIndex = kCodePositionIndex + kPointerSize;
+  static const int kStatementPositionIndex =
+      kSourcePositionIndex + kPointerSize;
+  static const int kBreakPointObjectsIndex =
+      kStatementPositionIndex + kPointerSize;
+  static const int kSize = kBreakPointObjectsIndex + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo);
+};
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
+#undef DECL_BOOLEAN_ACCESSORS
+#undef DECL_ACCESSORS
+
+
+// Abstract base class for visiting, and optionally modifying, the
+// pointers contained in Objects. Used in GC and serialization/deserialization.
+class ObjectVisitor BASE_EMBEDDED {
+ public:
+  virtual ~ObjectVisitor() {}
+
+  // Visits a contiguous arrays of pointers in the half-open range
+  // [start, end). Any or all of the values may be modified on return.
+  virtual void VisitPointers(Object** start, Object** end) = 0;
+
+  // To allow lazy clearing of inline caches the visitor has
+  // a rich interface for iterating over Code objects..
+
+  // Visits a code target in the instruction stream.
+  virtual void VisitCodeTarget(RelocInfo* rinfo);
+
+  // Visits a runtime entry in the instruction stream.
+  virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
+
+  // Visits a debug call target in the instruction stream.
+  virtual void VisitDebugTarget(RelocInfo* rinfo);
+
+  // Handy shorthand for visiting a single pointer.
+  virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+
+  // Visits a contiguous arrays of external references (references to the C++
+  // heap) in the half-open range [start, end). Any or all of the values
+  // may be modified on return.
+  virtual void VisitExternalReferences(Address* start, Address* end) {}
+
+  inline void VisitExternalReference(Address* p) {
+    VisitExternalReferences(p, p + 1);
+  }
+
+#ifdef DEBUG
+  // Intended for serialization/deserialization checking: insert, or
+  // check for the presence of, a tag at this position in the stream.
+  virtual void Synchronize(const char* tag) {}
+#endif
+};
+
+
+// BooleanBit is a helper class for setting and getting a bit in an
+// integer or Smi.
+class BooleanBit : public AllStatic {
+ public:
+  static inline bool get(Smi* smi, int bit_position) {
+    return get(smi->value(), bit_position);
+  }
+
+  static inline bool get(int value, int bit_position) {
+    return (value & (1 << bit_position)) != 0;
+  }
+
+  static inline Smi* set(Smi* smi, int bit_position, bool v) {
+    return Smi::FromInt(set(smi->value(), bit_position, v));
+  }
+
+  static inline int set(int value, int bit_position, bool v) {
+    if (v) {
+      value |= (1 << bit_position);
+    } else {
+      value &= ~(1 << bit_position);
+    }
+    return value;
+  }
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_OBJECTS_H_
diff --git a/src/oprofile-agent.cc b/src/oprofile-agent.cc
new file mode 100644
index 0000000..8aa3937
--- /dev/null
+++ b/src/oprofile-agent.cc
@@ -0,0 +1,116 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "oprofile-agent.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_OPROFILE_AGENT
+op_agent_t OProfileAgent::handle_ = NULL;
+#endif
+
+
+bool OProfileAgent::Initialize() {
+#ifdef ENABLE_OPROFILE_AGENT
+  if (FLAG_oprofile) {
+    if (handle_ != NULL) return false;
+
+    // Disable code moving by GC.
+    FLAG_always_compact = false;
+    FLAG_never_compact = true;
+
+    handle_ = op_open_agent();
+    return (handle_ != NULL);
+  } else {
+    return true;
+  }
+#else
+  if (FLAG_oprofile) {
+    OS::Print("Warning: --oprofile specified but binary compiled without "
+              "oprofile support.\n");
+  }
+  return true;
+#endif
+}
+
+
+void OProfileAgent::TearDown() {
+#ifdef ENABLE_OPROFILE_AGENT
+  if (handle_ != NULL) {
+    op_close_agent(handle_);
+  }
+#endif
+}
+
+
+void OProfileAgent::CreateNativeCodeRegion(const char* name,
+    const void* ptr, unsigned int size) {
+#ifdef ENABLE_OPROFILE_AGENT
+  if (handle_ == NULL) return;
+  op_write_native_code(handle_, name, (uint64_t)ptr, ptr, size);
+#endif
+}
+
+
+void OProfileAgent::CreateNativeCodeRegion(String* name,
+    const void* ptr, unsigned int size) {
+#ifdef ENABLE_OPROFILE_AGENT
+  if (handle_ != NULL) {
+    const char* func_name;
+    SmartPointer<char> str =
+        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+    func_name = name->length() > 0 ? *str : "<anonymous>";
+    CreateNativeCodeRegion(func_name, ptr, size);
+  }
+#endif
+}
+
+
+void OProfileAgent::CreateNativeCodeRegion(String* name, String* source,
+    int line_num, const void* ptr, unsigned int size) {
+#ifdef ENABLE_OPROFILE_AGENT
+  if (handle_ != NULL) {
+    Vector<char> buf = Vector<char>::New(OProfileAgent::kFormattingBufSize);
+    const char* func_name;
+    SmartPointer<char> str =
+        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+    func_name = name->length() > 0 ? *str : "<anonymous>";
+    SmartPointer<char> source_str =
+        source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+    if (v8::internal::OS::SNPrintF(buf, "%s %s:%d",
+                                   func_name, *source_str, line_num) != -1) {
+      CreateNativeCodeRegion(buf.start(), ptr, size);
+    } else {
+      CreateNativeCodeRegion("<script/func name too long>", ptr, size);
+    }
+  }
+#endif
+}
+} }
diff --git a/src/oprofile-agent.h b/src/oprofile-agent.h
new file mode 100644
index 0000000..4c299bf
--- /dev/null
+++ b/src/oprofile-agent.h
@@ -0,0 +1,69 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OPROFILE_AGENT_H_
+#define V8_OPROFILE_AGENT_H_
+
+#include <stdlib.h>
+
+#include "globals.h"
+
+#ifdef ENABLE_OPROFILE_AGENT
+// opagent.h uses uint64_t type, which can be missing in
+// system headers (they have __uint64_t), but is defined
+// in V8's headers.
+#include <opagent.h>  // NOLINT
+#endif
+
+namespace v8 {
+namespace internal {
+
+class OProfileAgent {
+ public:
+  static bool Initialize();
+  static void TearDown();
+  static void CreateNativeCodeRegion(const char* name,
+                                     const void* ptr, unsigned int size);
+  static void CreateNativeCodeRegion(String* name,
+                                     const void* ptr, unsigned int size);
+  static void CreateNativeCodeRegion(String* name, String* source, int line_num,
+                                     const void* ptr, unsigned int size);
+#ifdef ENABLE_OPROFILE_AGENT
+  static bool is_enabled() { return handle_ != NULL; }
+
+ private:
+  static op_agent_t handle_;
+
+  // Size of the buffer that is used for composing code areas names.
+  static const int kFormattingBufSize = 256;
+#else
+  static bool is_enabled() { return false; }
+#endif
+};
+} }
+
+#endif  // V8_OPROFILE_AGENT_H_
diff --git a/src/parser.cc b/src/parser.cc
new file mode 100644
index 0000000..3b24687
--- /dev/null
+++ b/src/parser.cc
@@ -0,0 +1,4835 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "ast.h"
+#include "bootstrapper.h"
+#include "compiler.h"
+#include "platform.h"
+#include "runtime.h"
+#include "parser.h"
+#include "scopes.h"
+#include "string-stream.h"
+
+namespace v8 {
+namespace internal {
+
+class ParserFactory;
+class ParserLog;
+class TemporaryScope;
+class Target;
+
+template <typename T> class ZoneListWrapper;
+
+
+// PositionStack is used for on-stack allocation of token positions for
+// new expressions. Please look at ParseNewExpression.
+
+class PositionStack  {
+ public:
+  explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {}
+  ~PositionStack() { ASSERT(!*ok_ || is_empty()); }
+
+  class Element  {
+   public:
+    Element(PositionStack* stack, int value) {
+      previous_ = stack->top();
+      value_ = value;
+      stack->set_top(this);
+    }
+
+   private:
+    Element* previous() { return previous_; }
+    int value() { return value_; }
+    friend class PositionStack;
+    Element* previous_;
+    int value_;
+  };
+
+  bool is_empty() { return top_ == NULL; }
+  int pop() {
+    ASSERT(!is_empty());
+    int result = top_->value();
+    top_ = top_->previous();
+    return result;
+  }
+
+ private:
+  Element* top() { return top_; }
+  void set_top(Element* value) { top_ = value; }
+  Element* top_;
+  bool* ok_;
+};
+
+
+class Parser {
+ public:
+  Parser(Handle<Script> script, bool allow_natives_syntax,
+         v8::Extension* extension, bool is_pre_parsing,
+         ParserFactory* factory, ParserLog* log, ScriptDataImpl* pre_data);
+  virtual ~Parser() { }
+
+  // Pre-parse the program from the character stream; returns true on
+  // success, false if a stack-overflow happened during parsing.
+  bool PreParseProgram(Handle<String> source, unibrow::CharacterStream* stream);
+
+  void ReportMessage(const char* message, Vector<const char*> args);
+  virtual void ReportMessageAt(Scanner::Location loc,
+                               const char* message,
+                               Vector<const char*> args) = 0;
+
+
+  // Returns NULL if parsing failed.
+  FunctionLiteral* ParseProgram(Handle<String> source,
+                                unibrow::CharacterStream* stream,
+                                bool in_global_context);
+  FunctionLiteral* ParseLazy(Handle<String> source,
+                             Handle<String> name,
+                             int start_position, bool is_expression);
+
+  // The minimum number of contiguous assignment that will
+  // be treated as an initialization block. Benchmarks show that
+  // the overhead exceeds the savings below this limit.
+  static const int kMinInitializationBlock = 3;
+
+ protected:
+
+  enum Mode {
+    PARSE_LAZILY,
+    PARSE_EAGERLY
+  };
+
+  // Report syntax error
+  void ReportUnexpectedToken(Token::Value token);
+
+  Handle<Script> script_;
+  Scanner scanner_;
+
+  Scope* top_scope_;
+  int with_nesting_level_;
+
+  TemporaryScope* temp_scope_;
+  Mode mode_;
+
+  Target* target_stack_;  // for break, continue statements
+  bool allow_natives_syntax_;
+  v8::Extension* extension_;
+  ParserFactory* factory_;
+  ParserLog* log_;
+  bool is_pre_parsing_;
+  ScriptDataImpl* pre_data_;
+
+  bool inside_with() const  { return with_nesting_level_ > 0; }
+  ParserFactory* factory() const  { return factory_; }
+  ParserLog* log() const { return log_; }
+  Scanner& scanner()  { return scanner_; }
+  Mode mode() const  { return mode_; }
+  ScriptDataImpl* pre_data() const  { return pre_data_; }
+
+  // All ParseXXX functions take as the last argument an *ok parameter
+  // which is set to false if parsing failed; it is unchanged otherwise.
+  // By making the 'exception handling' explicit, we are forced to check
+  // for failure at the call sites.
+  void* ParseSourceElements(ZoneListWrapper<Statement>* processor,
+                            int end_token, bool* ok);
+  Statement* ParseStatement(ZoneStringList* labels, bool* ok);
+  Statement* ParseFunctionDeclaration(bool* ok);
+  Statement* ParseNativeDeclaration(bool* ok);
+  Block* ParseBlock(ZoneStringList* labels, bool* ok);
+  Block* ParseVariableStatement(bool* ok);
+  Block* ParseVariableDeclarations(bool accept_IN, Expression** var, bool* ok);
+  Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
+                                                bool* ok);
+  IfStatement* ParseIfStatement(ZoneStringList* labels, bool* ok);
+  Statement* ParseContinueStatement(bool* ok);
+  Statement* ParseBreakStatement(ZoneStringList* labels, bool* ok);
+  Statement* ParseReturnStatement(bool* ok);
+  Block* WithHelper(Expression* obj,
+                    ZoneStringList* labels,
+                    bool is_catch_block,
+                    bool* ok);
+  Statement* ParseWithStatement(ZoneStringList* labels, bool* ok);
+  CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
+  SwitchStatement* ParseSwitchStatement(ZoneStringList* labels, bool* ok);
+  LoopStatement* ParseDoStatement(ZoneStringList* labels, bool* ok);
+  LoopStatement* ParseWhileStatement(ZoneStringList* labels, bool* ok);
+  Statement* ParseForStatement(ZoneStringList* labels, bool* ok);
+  Statement* ParseThrowStatement(bool* ok);
+  Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
+  TryStatement* ParseTryStatement(bool* ok);
+  DebuggerStatement* ParseDebuggerStatement(bool* ok);
+
+  Expression* ParseExpression(bool accept_IN, bool* ok);
+  Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
+  Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
+  Expression* ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
+  Expression* ParseUnaryExpression(bool* ok);
+  Expression* ParsePostfixExpression(bool* ok);
+  Expression* ParseLeftHandSideExpression(bool* ok);
+  Expression* ParseNewExpression(bool* ok);
+  Expression* ParseMemberExpression(bool* ok);
+  Expression* ParseNewPrefix(PositionStack* stack, bool* ok);
+  Expression* ParseMemberWithNewPrefixesExpression(PositionStack* stack,
+                                                   bool* ok);
+  Expression* ParsePrimaryExpression(bool* ok);
+  Expression* ParseArrayLiteral(bool* ok);
+  Expression* ParseObjectLiteral(bool* ok);
+  Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
+
+  // Decide if a property should be the object boilerplate.
+  bool IsBoilerplateProperty(ObjectLiteral::Property* property);
+  // If the expression is a literal, return the literal value;
+  // if the expression is a materialized literal and is simple return a
+  // compile time value as encoded by CompileTimeValue::GetValue().
+  // Otherwise, return undefined literal as the placeholder
+  // in the object literal boilerplate.
+  Handle<Object> GetBoilerplateValue(Expression* expression);
+
+  enum FunctionLiteralType {
+    EXPRESSION,
+    DECLARATION,
+    NESTED
+  };
+
+  ZoneList<Expression*>* ParseArguments(bool* ok);
+  FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
+                                        int function_token_position,
+                                        FunctionLiteralType type,
+                                        bool* ok);
+
+
+  // Magical syntax support.
+  Expression* ParseV8Intrinsic(bool* ok);
+
+  INLINE(Token::Value peek()) { return scanner_.peek(); }
+  INLINE(Token::Value Next()) { return scanner_.Next(); }
+  INLINE(void Consume(Token::Value token));
+  void Expect(Token::Value token, bool* ok);
+  void ExpectSemicolon(bool* ok);
+
+  // Get odd-ball literals.
+  Literal* GetLiteralUndefined();
+  Literal* GetLiteralTheHole();
+  Literal* GetLiteralNumber(double value);
+
+  Handle<String> ParseIdentifier(bool* ok);
+  Handle<String> ParseIdentifierOrGetOrSet(bool* is_get,
+                                           bool* is_set,
+                                           bool* ok);
+
+  // Parser support
+  virtual VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
+                                 FunctionLiteral* fun,
+                                 bool resolve,
+                                 bool* ok) = 0;
+
+  bool TargetStackContainsLabel(Handle<String> label);
+  BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
+  IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
+
+  void RegisterTargetUse(BreakTarget* target, Target* stop);
+
+  // Create a number literal.
+  Literal* NewNumberLiteral(double value);
+
+  // Generate AST node that throw a ReferenceError with the given type.
+  Expression* NewThrowReferenceError(Handle<String> type);
+
+  // Generate AST node that throw a SyntaxError with the given
+  // type. The first argument may be null (in the handle sense) in
+  // which case no arguments are passed to the constructor.
+  Expression* NewThrowSyntaxError(Handle<String> type, Handle<Object> first);
+
+  // Generate AST node that throw a TypeError with the given
+  // type. Both arguments must be non-null (in the handle sense).
+  Expression* NewThrowTypeError(Handle<String> type,
+                                Handle<Object> first,
+                                Handle<Object> second);
+
+  // Generic AST generator for throwing errors from compiled code.
+  Expression* NewThrowError(Handle<String> constructor,
+                            Handle<String> type,
+                            Vector< Handle<Object> > arguments);
+
+  friend class Target;
+  friend class TargetScope;
+  friend class LexicalScope;
+  friend class TemporaryScope;
+};
+
+
+template <typename T, int initial_size>
+class BufferedZoneList {
+ public:
+
+  BufferedZoneList() :
+    list_(NULL), last_(NULL) {}
+
+  // Adds element at end of list. This element is buffered and can
+  // be read using last() or removed using RemoveLast until a new Add or until
+  // RemoveLast or GetList has been called.
+  void Add(T* value) {
+    if (last_ != NULL) {
+      if (list_ == NULL) {
+        list_ = new ZoneList<T*>(initial_size);
+      }
+      list_->Add(last_);
+    }
+    last_ = value;
+  }
+
+  T* last() {
+    ASSERT(last_ != NULL);
+    return last_;
+  }
+
+  T* RemoveLast() {
+    ASSERT(last_ != NULL);
+    T* result = last_;
+    if (list_ != NULL && list_->length() > 0)
+      last_ = list_->RemoveLast();
+    else
+      last_ = NULL;
+    return result;
+  }
+
+  T* Get(int i) {
+    ASSERT(0 <= i && i < length());
+    if (list_ == NULL) {
+      ASSERT_EQ(0, i);
+      return last_;
+    } else {
+      if (i == list_->length()) {
+        ASSERT(last_ != NULL);
+        return last_;
+      } else {
+        return list_->at(i);
+      }
+    }
+  }
+
+  void Clear() {
+    list_ = NULL;
+    last_ = NULL;
+  }
+
+  int length() {
+    int length = (list_ == NULL) ? 0 : list_->length();
+    return length + ((last_ == NULL) ? 0 : 1);
+  }
+
+  ZoneList<T*>* GetList() {
+    if (list_ == NULL) {
+      list_ = new ZoneList<T*>(initial_size);
+    }
+    if (last_ != NULL) {
+      list_->Add(last_);
+      last_ = NULL;
+    }
+    return list_;
+  }
+
+ private:
+  ZoneList<T*>* list_;
+  T* last_;
+};
+
+// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
+class RegExpBuilder: public ZoneObject {
+ public:
+  RegExpBuilder();
+  void AddCharacter(uc16 character);
+  // "Adds" an empty expression. Does nothing except consume a
+  // following quantifier
+  void AddEmpty();
+  void AddAtom(RegExpTree* tree);
+  void AddAssertion(RegExpTree* tree);
+  void NewAlternative();  // '|'
+  void AddQuantifierToAtom(int min, int max, bool is_greedy);
+  RegExpTree* ToRegExp();
+ private:
+  void FlushCharacters();
+  void FlushText();
+  void FlushTerms();
+  bool pending_empty_;
+  ZoneList<uc16>* characters_;
+  BufferedZoneList<RegExpTree, 2> terms_;
+  BufferedZoneList<RegExpTree, 2> text_;
+  BufferedZoneList<RegExpTree, 2> alternatives_;
+#ifdef DEBUG
+  enum {ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM} last_added_;
+#define LAST(x) last_added_ = x;
+#else
+#define LAST(x)
+#endif
+};
+
+
+RegExpBuilder::RegExpBuilder()
+  : pending_empty_(false),
+    characters_(NULL),
+    terms_(),
+    alternatives_()
+#ifdef DEBUG
+  , last_added_(ADD_NONE)
+#endif
+  {}
+
+
+void RegExpBuilder::FlushCharacters() {
+  pending_empty_ = false;
+  if (characters_ != NULL) {
+    RegExpTree* atom = new RegExpAtom(characters_->ToConstVector());
+    characters_ = NULL;
+    text_.Add(atom);
+    LAST(ADD_ATOM);
+  }
+}
+
+
+void RegExpBuilder::FlushText() {
+  FlushCharacters();
+  int num_text = text_.length();
+  if (num_text == 0) {
+    return;
+  } else if (num_text == 1) {
+    terms_.Add(text_.last());
+  } else {
+    RegExpText* text = new RegExpText();
+    for (int i = 0; i < num_text; i++)
+      text_.Get(i)->AppendToText(text);
+    terms_.Add(text);
+  }
+  text_.Clear();
+}
+
+
+void RegExpBuilder::AddCharacter(uc16 c) {
+  pending_empty_ = false;
+  if (characters_ == NULL) {
+    characters_ = new ZoneList<uc16>(4);
+  }
+  characters_->Add(c);
+  LAST(ADD_CHAR);
+}
+
+
+void RegExpBuilder::AddEmpty() {
+  pending_empty_ = true;
+}
+
+
+void RegExpBuilder::AddAtom(RegExpTree* term) {
+  if (term->IsEmpty()) {
+    AddEmpty();
+    return;
+  }
+  if (term->IsTextElement()) {
+    FlushCharacters();
+    text_.Add(term);
+  } else {
+    FlushText();
+    terms_.Add(term);
+  }
+  LAST(ADD_ATOM);
+}
+
+
+void RegExpBuilder::AddAssertion(RegExpTree* assert) {
+  FlushText();
+  terms_.Add(assert);
+  LAST(ADD_ASSERT);
+}
+
+
+void RegExpBuilder::NewAlternative() {
+  FlushTerms();
+}
+
+
+void RegExpBuilder::FlushTerms() {
+  FlushText();
+  int num_terms = terms_.length();
+  RegExpTree* alternative;
+  if (num_terms == 0) {
+    alternative = RegExpEmpty::GetInstance();
+  } else if (num_terms == 1) {
+    alternative = terms_.last();
+  } else {
+    alternative = new RegExpAlternative(terms_.GetList());
+  }
+  alternatives_.Add(alternative);
+  terms_.Clear();
+  LAST(ADD_NONE);
+}
+
+
+RegExpTree* RegExpBuilder::ToRegExp() {
+  FlushTerms();
+  int num_alternatives = alternatives_.length();
+  if (num_alternatives == 0) {
+    return RegExpEmpty::GetInstance();
+  }
+  if (num_alternatives == 1) {
+    return alternatives_.last();
+  }
+  return new RegExpDisjunction(alternatives_.GetList());
+}
+
+
+void RegExpBuilder::AddQuantifierToAtom(int min, int max, bool is_greedy) {
+  if (pending_empty_) {
+    pending_empty_ = false;
+    return;
+  }
+  RegExpTree* atom;
+  if (characters_ != NULL) {
+    ASSERT(last_added_ == ADD_CHAR);
+    // Last atom was character.
+    Vector<const uc16> char_vector = characters_->ToConstVector();
+    int num_chars = char_vector.length();
+    if (num_chars > 1) {
+      Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
+      text_.Add(new RegExpAtom(prefix));
+      char_vector = char_vector.SubVector(num_chars - 1, num_chars);
+    }
+    characters_ = NULL;
+    atom = new RegExpAtom(char_vector);
+    FlushText();
+  } else if (text_.length() > 0) {
+    ASSERT(last_added_ == ADD_ATOM);
+    atom = text_.RemoveLast();
+    FlushText();
+  } else if (terms_.length() > 0) {
+    ASSERT(last_added_ == ADD_ATOM);
+    atom = terms_.RemoveLast();
+    if (atom->max_match() == 0) {
+      // Guaranteed to only match an empty string.
+      LAST(ADD_TERM);
+      if (min == 0) {
+        return;
+      }
+      terms_.Add(atom);
+      return;
+    }
+  } else {
+    // Only call immediately after adding an atom or character!
+    UNREACHABLE();
+    return;
+  }
+  terms_.Add(new RegExpQuantifier(min, max, is_greedy, atom));
+  LAST(ADD_TERM);
+}
+
+
+class RegExpParser {
+ public:
+  RegExpParser(FlatStringReader* in,
+               Handle<String>* error,
+               bool multiline_mode);
+  RegExpTree* ParsePattern();
+  RegExpTree* ParseDisjunction();
+  RegExpTree* ParseGroup();
+  RegExpTree* ParseCharacterClass();
+
+  // Parses a {...,...} quantifier and stores the range in the given
+  // out parameters.
+  bool ParseIntervalQuantifier(int* min_out, int* max_out);
+
+  // Parses and returns a single escaped character.  The character
+  // must not be 'b' or 'B' since they are usually handle specially.
+  uc32 ParseClassCharacterEscape();
+
+  // Checks whether the following is a length-digit hexadecimal number,
+  // and sets the value if it is.
+  bool ParseHexEscape(int length, uc32* value);
+
+  uc32 ParseControlLetterEscape();
+  uc32 ParseOctalLiteral();
+
+  // Tries to parse the input as a back reference.  If successful it
+  // stores the result in the output parameter and returns true.  If
+  // it fails it will push back the characters read so the same characters
+  // can be reparsed.
+  bool ParseBackReferenceIndex(int* index_out);
+
+  CharacterRange ParseClassAtom(uc16* char_class);
+  RegExpTree* ReportError(Vector<const char> message);
+  void Advance();
+  void Advance(int dist);
+  void Reset(int pos);
+
+  // Reports whether the pattern might be used as a literal search string.
+  // Only use if the result of the parse is a single atom node.
+  bool simple();
+  bool contains_anchor() { return contains_anchor_; }
+  void set_contains_anchor() { contains_anchor_ = true; }
+  int captures_started() { return captures_ == NULL ? 0 : captures_->length(); }
+  int position() { return next_pos_ - 1; }
+  bool failed() { return failed_; }
+
+  static const int kMaxCaptures = 1 << 16;
+  static const uc32 kEndMarker = (1 << 21);
+ private:
+  enum SubexpressionType {
+    INITIAL,
+    CAPTURE,  // All positive values represent captures.
+    POSITIVE_LOOKAHEAD,
+    NEGATIVE_LOOKAHEAD,
+    GROUPING
+  };
+
+  class RegExpParserState : public ZoneObject {
+   public:
+    RegExpParserState(RegExpParserState* previous_state,
+                      SubexpressionType group_type,
+                      int disjunction_capture_index)
+        : previous_state_(previous_state),
+          builder_(new RegExpBuilder()),
+          group_type_(group_type),
+          disjunction_capture_index_(disjunction_capture_index) {}
+    // Parser state of containing expression, if any.
+    RegExpParserState* previous_state() { return previous_state_; }
+    bool IsSubexpression() { return previous_state_ != NULL; }
+    // RegExpBuilder building this regexp's AST.
+    RegExpBuilder* builder() { return builder_; }
+    // Type of regexp being parsed (parenthesized group or entire regexp).
+    SubexpressionType group_type() { return group_type_; }
+    // Index in captures array of first capture in this sub-expression, if any.
+    // Also the capture index of this sub-expression itself, if group_type
+    // is CAPTURE.
+    int capture_index() { return disjunction_capture_index_; }
+   private:
+    // Linked list implementation of stack of states.
+    RegExpParserState* previous_state_;
+    // Builder for the stored disjunction.
+    RegExpBuilder* builder_;
+    // Stored disjunction type (capture, look-ahead or grouping), if any.
+    SubexpressionType group_type_;
+    // Stored disjunction's capture index (if any).
+    int disjunction_capture_index_;
+  };
+
+  uc32 current() { return current_; }
+  bool has_more() { return has_more_; }
+  bool has_next() { return next_pos_ < in()->length(); }
+  uc32 Next();
+  FlatStringReader* in() { return in_; }
+  void ScanForCaptures();
+  uc32 current_;
+  bool has_more_;
+  bool multiline_;
+  int next_pos_;
+  FlatStringReader* in_;
+  Handle<String>* error_;
+  bool simple_;
+  bool contains_anchor_;
+  ZoneList<RegExpCapture*>* captures_;
+  bool is_scanned_for_captures_;
+  // The capture count is only valid after we have scanned for captures.
+  int capture_count_;
+  bool failed_;
+};
+
+
+// A temporary scope stores information during parsing, just like
+// a plain scope.  However, temporary scopes are not kept around
+// after parsing or referenced by syntax trees so they can be stack-
+// allocated and hence used by the pre-parser.
+class TemporaryScope BASE_EMBEDDED {
+ public:
+  explicit TemporaryScope(Parser* parser);
+  ~TemporaryScope();
+
+  int NextMaterializedLiteralIndex() {
+    int next_index =
+        materialized_literal_count_ + JSFunction::kLiteralsPrefixSize;
+    materialized_literal_count_++;
+    return next_index;
+  }
+  int materialized_literal_count() { return materialized_literal_count_; }
+
+  void set_contains_array_literal() { contains_array_literal_ = true; }
+  bool contains_array_literal() { return contains_array_literal_; }
+
+  void SetThisPropertyAssignmentInfo(
+      bool only_this_property_assignments,
+      bool only_simple_this_property_assignments,
+      Handle<FixedArray> this_property_assignments) {
+    only_this_property_assignments_ = only_this_property_assignments;
+    only_simple_this_property_assignments_ =
+        only_simple_this_property_assignments;
+    this_property_assignments_ = this_property_assignments;
+  }
+  bool only_this_property_assignments() {
+    return only_this_property_assignments_;
+  }
+  bool only_simple_this_property_assignments() {
+    return only_simple_this_property_assignments_;
+  }
+  Handle<FixedArray> this_property_assignments() {
+    return this_property_assignments_;
+  }
+
+  void AddProperty() { expected_property_count_++; }
+  int expected_property_count() { return expected_property_count_; }
+ private:
+  // Captures the number of nodes that need materialization in the
+  // function.  regexp literals, and boilerplate for object literals.
+  int materialized_literal_count_;
+
+  // Captures whether or not the function contains array literals.  If
+  // the function contains array literals, we have to allocate space
+  // for the array constructor in the literals array of the function.
+  // This array constructor is used when creating the actual array
+  // literals.
+  bool contains_array_literal_;
+
+  // Properties count estimation.
+  int expected_property_count_;
+
+  bool only_this_property_assignments_;
+  bool only_simple_this_property_assignments_;
+  Handle<FixedArray> this_property_assignments_;
+
+  // Bookkeeping
+  Parser* parser_;
+  TemporaryScope* parent_;
+
+  friend class Parser;
+};
+
+
+TemporaryScope::TemporaryScope(Parser* parser)
+  : materialized_literal_count_(0),
+    contains_array_literal_(false),
+    expected_property_count_(0),
+    only_this_property_assignments_(false),
+    only_simple_this_property_assignments_(false),
+    this_property_assignments_(Factory::empty_fixed_array()),
+    parser_(parser),
+    parent_(parser->temp_scope_) {
+  parser->temp_scope_ = this;
+}
+
+
+TemporaryScope::~TemporaryScope() {
+  parser_->temp_scope_ = parent_;
+}
+
+
+// A zone list wrapper lets code either access a access a zone list
+// or appear to do so while actually ignoring all operations.
+template <typename T>
+class ZoneListWrapper {
+ public:
+  ZoneListWrapper() : list_(NULL) { }
+  explicit ZoneListWrapper(int size) : list_(new ZoneList<T*>(size)) { }
+  void Add(T* that) { if (list_) list_->Add(that); }
+  int length() { return list_->length(); }
+  ZoneList<T*>* elements() { return list_; }
+  T* at(int index) { return list_->at(index); }
+ private:
+  ZoneList<T*>* list_;
+};
+
+
+// Allocation macro that should be used to allocate objects that must
+// only be allocated in real parsing mode.  Note that in preparse mode
+// not only is the syntax tree not created but the constructor
+// arguments are not evaluated.
+#define NEW(expr) (is_pre_parsing_ ? NULL : new expr)
+
+
+class ParserFactory BASE_EMBEDDED {
+ public:
+  explicit ParserFactory(bool is_pre_parsing) :
+      is_pre_parsing_(is_pre_parsing) { }
+
+  virtual ~ParserFactory() { }
+
+  virtual Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with);
+
+  virtual Handle<String> LookupSymbol(const char* string, int length) {
+    return Handle<String>();
+  }
+
+  virtual Handle<String> EmptySymbol() {
+    return Handle<String>();
+  }
+
+  virtual Expression* NewProperty(Expression* obj, Expression* key, int pos) {
+    if (obj == VariableProxySentinel::this_proxy()) {
+      return Property::this_property();
+    } else {
+      return ValidLeftHandSideSentinel::instance();
+    }
+  }
+
+  virtual Expression* NewCall(Expression* expression,
+                              ZoneList<Expression*>* arguments,
+                              int pos) {
+    return Call::sentinel();
+  }
+
+  virtual Statement* EmptyStatement() {
+    return NULL;
+  }
+
+  template <typename T> ZoneListWrapper<T> NewList(int size) {
+    return is_pre_parsing_ ? ZoneListWrapper<T>() : ZoneListWrapper<T>(size);
+  }
+
+ private:
+  bool is_pre_parsing_;
+};
+
+
+class ParserLog BASE_EMBEDDED {
+ public:
+  virtual ~ParserLog() { }
+
+  // Records the occurrence of a function.  The returned object is
+  // only guaranteed to be valid until the next function has been
+  // logged.
+  virtual FunctionEntry LogFunction(int start) { return FunctionEntry(); }
+
+  virtual void LogError() { }
+};
+
+
+class AstBuildingParserFactory : public ParserFactory {
+ public:
+  AstBuildingParserFactory() : ParserFactory(false) { }
+
+  virtual Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with);
+
+  virtual Handle<String> LookupSymbol(const char* string, int length) {
+    return Factory::LookupSymbol(Vector<const char>(string, length));
+  }
+
+  virtual Handle<String> EmptySymbol() {
+    return Factory::empty_symbol();
+  }
+
+  virtual Expression* NewProperty(Expression* obj, Expression* key, int pos) {
+    return new Property(obj, key, pos);
+  }
+
+  virtual Expression* NewCall(Expression* expression,
+                              ZoneList<Expression*>* arguments,
+                              int pos) {
+    return new Call(expression, arguments, pos);
+  }
+
+  virtual Statement* EmptyStatement();
+};
+
+
+class ParserRecorder: public ParserLog {
+ public:
+  ParserRecorder();
+  virtual FunctionEntry LogFunction(int start);
+  virtual void LogError() { }
+  virtual void LogMessage(Scanner::Location loc,
+                          const char* message,
+                          Vector<const char*> args);
+  void WriteString(Vector<const char> str);
+  static const char* ReadString(unsigned* start, int* chars);
+  List<unsigned>* store() { return &store_; }
+ private:
+  bool has_error_;
+  List<unsigned> store_;
+};
+
+
+FunctionEntry ScriptDataImpl::GetFunctionEnd(int start) {
+  if (nth(last_entry_).start_pos() > start) {
+    // If the last entry we looked up is higher than what we're
+    // looking for then it's useless and we reset it.
+    last_entry_ = 0;
+  }
+  for (int i = last_entry_; i < EntryCount(); i++) {
+    FunctionEntry entry = nth(i);
+    if (entry.start_pos() == start) {
+      last_entry_ = i;
+      return entry;
+    }
+  }
+  return FunctionEntry();
+}
+
+
+bool ScriptDataImpl::SanityCheck() {
+  if (store_.length() < static_cast<int>(ScriptDataImpl::kHeaderSize))
+    return false;
+  if (magic() != ScriptDataImpl::kMagicNumber)
+    return false;
+  if (version() != ScriptDataImpl::kCurrentVersion)
+    return false;
+  return true;
+}
+
+
+int ScriptDataImpl::EntryCount() {
+  return (store_.length() - kHeaderSize) / FunctionEntry::kSize;
+}
+
+
+FunctionEntry ScriptDataImpl::nth(int n) {
+  int offset = kHeaderSize + n * FunctionEntry::kSize;
+  return FunctionEntry(Vector<unsigned>(store_.start() + offset,
+                                        FunctionEntry::kSize));
+}
+
+
+ParserRecorder::ParserRecorder()
+  : has_error_(false), store_(4) {
+  Vector<unsigned> preamble = store()->AddBlock(0, ScriptDataImpl::kHeaderSize);
+  preamble[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber;
+  preamble[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion;
+  preamble[ScriptDataImpl::kHasErrorOffset] = false;
+}
+
+
+void ParserRecorder::WriteString(Vector<const char> str) {
+  store()->Add(str.length());
+  for (int i = 0; i < str.length(); i++)
+    store()->Add(str[i]);
+}
+
+
+const char* ParserRecorder::ReadString(unsigned* start, int* chars) {
+  int length = start[0];
+  char* result = NewArray<char>(length + 1);
+  for (int i = 0; i < length; i++)
+    result[i] = start[i + 1];
+  result[length] = '\0';
+  if (chars != NULL) *chars = length;
+  return result;
+}
+
+
+void ParserRecorder::LogMessage(Scanner::Location loc, const char* message,
+                                Vector<const char*> args) {
+  if (has_error_) return;
+  store()->Rewind(ScriptDataImpl::kHeaderSize);
+  store()->at(ScriptDataImpl::kHasErrorOffset) = true;
+  store()->Add(loc.beg_pos);
+  store()->Add(loc.end_pos);
+  store()->Add(args.length());
+  WriteString(CStrVector(message));
+  for (int i = 0; i < args.length(); i++)
+    WriteString(CStrVector(args[i]));
+}
+
+
+Scanner::Location ScriptDataImpl::MessageLocation() {
+  int beg_pos = Read(0);
+  int end_pos = Read(1);
+  return Scanner::Location(beg_pos, end_pos);
+}
+
+
+const char* ScriptDataImpl::BuildMessage() {
+  unsigned* start = ReadAddress(3);
+  return ParserRecorder::ReadString(start, NULL);
+}
+
+
+Vector<const char*> ScriptDataImpl::BuildArgs() {
+  int arg_count = Read(2);
+  const char** array = NewArray<const char*>(arg_count);
+  int pos = ScriptDataImpl::kHeaderSize + Read(3);
+  for (int i = 0; i < arg_count; i++) {
+    int count = 0;
+    array[i] = ParserRecorder::ReadString(ReadAddress(pos), &count);
+    pos += count + 1;
+  }
+  return Vector<const char*>(array, arg_count);
+}
+
+
+unsigned ScriptDataImpl::Read(int position) {
+  return store_[ScriptDataImpl::kHeaderSize + position];
+}
+
+
+unsigned* ScriptDataImpl::ReadAddress(int position) {
+  return &store_[ScriptDataImpl::kHeaderSize + position];
+}
+
+
+FunctionEntry ParserRecorder::LogFunction(int start) {
+  if (has_error_) return FunctionEntry();
+  FunctionEntry result(store()->AddBlock(0, FunctionEntry::kSize));
+  result.set_start_pos(start);
+  return result;
+}
+
+
+class AstBuildingParser : public Parser {
+ public:
+  AstBuildingParser(Handle<Script> script, bool allow_natives_syntax,
+                    v8::Extension* extension, ScriptDataImpl* pre_data)
+      : Parser(script, allow_natives_syntax, extension, false,
+               factory(), log(), pre_data) { }
+  virtual void ReportMessageAt(Scanner::Location loc, const char* message,
+                               Vector<const char*> args);
+  virtual VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
+                                 FunctionLiteral* fun, bool resolve, bool* ok);
+  AstBuildingParserFactory* factory() { return &factory_; }
+  ParserLog* log() { return &log_; }
+
+ private:
+  ParserLog log_;
+  AstBuildingParserFactory factory_;
+};
+
+
+class PreParser : public Parser {
+ public:
+  PreParser(Handle<Script> script, bool allow_natives_syntax,
+            v8::Extension* extension)
+      : Parser(script, allow_natives_syntax, extension, true,
+               factory(), recorder(), NULL)
+      , factory_(true) { }
+  virtual void ReportMessageAt(Scanner::Location loc, const char* message,
+                               Vector<const char*> args);
+  virtual VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
+                                 FunctionLiteral* fun, bool resolve, bool* ok);
+  ParserFactory* factory() { return &factory_; }
+  ParserRecorder* recorder() { return &recorder_; }
+
+ private:
+  ParserRecorder recorder_;
+  ParserFactory factory_;
+};
+
+
+Scope* AstBuildingParserFactory::NewScope(Scope* parent, Scope::Type type,
+                                          bool inside_with) {
+  Scope* result = new Scope(parent, type);
+  result->Initialize(inside_with);
+  return result;
+}
+
+
+Statement* AstBuildingParserFactory::EmptyStatement() {
+  // Use a statically allocated empty statement singleton to avoid
+  // allocating lots and lots of empty statements.
+  static v8::internal::EmptyStatement empty;
+  return &empty;
+}
+
+
+Scope* ParserFactory::NewScope(Scope* parent, Scope::Type type,
+                               bool inside_with) {
+  ASSERT(parent != NULL);
+  parent->type_ = type;
+  return parent;
+}
+
+
+VariableProxy* PreParser::Declare(Handle<String> name, Variable::Mode mode,
+                                  FunctionLiteral* fun, bool resolve,
+                                  bool* ok) {
+  return NULL;
+}
+
+
+
+// ----------------------------------------------------------------------------
+// Target is a support class to facilitate manipulation of the
+// Parser's target_stack_ (the stack of potential 'break' and
+// 'continue' statement targets). Upon construction, a new target is
+// added; it is removed upon destruction.
+
+class Target BASE_EMBEDDED {
+ public:
+  Target(Parser* parser, AstNode* node)
+      : parser_(parser), node_(node), previous_(parser_->target_stack_) {
+    parser_->target_stack_ = this;
+  }
+
+  ~Target() {
+    parser_->target_stack_ = previous_;
+  }
+
+  Target* previous() { return previous_; }
+  AstNode* node() { return node_; }
+
+ private:
+  Parser* parser_;
+  AstNode* node_;
+  Target* previous_;
+};
+
+
+class TargetScope BASE_EMBEDDED {
+ public:
+  explicit TargetScope(Parser* parser)
+      : parser_(parser), previous_(parser->target_stack_) {
+    parser->target_stack_ = NULL;
+  }
+
+  ~TargetScope() {
+    parser_->target_stack_ = previous_;
+  }
+
+ private:
+  Parser* parser_;
+  Target* previous_;
+};
+
+
+// ----------------------------------------------------------------------------
+// LexicalScope is a support class to facilitate manipulation of the
+// Parser's scope stack. The constructor sets the parser's top scope
+// to the incoming scope, and the destructor resets it.
+
+class LexicalScope BASE_EMBEDDED {
+ public:
+  LexicalScope(Parser* parser, Scope* scope)
+    : parser_(parser),
+      prev_scope_(parser->top_scope_),
+      prev_level_(parser->with_nesting_level_) {
+    parser_->top_scope_ = scope;
+    parser_->with_nesting_level_ = 0;
+  }
+
+  ~LexicalScope() {
+    parser_->top_scope_ = prev_scope_;
+    parser_->with_nesting_level_ = prev_level_;
+  }
+
+ private:
+  Parser* parser_;
+  Scope* prev_scope_;
+  int prev_level_;
+};
+
+
+// ----------------------------------------------------------------------------
+// The CHECK_OK macro is a convenient macro to enforce error
+// handling for functions that may fail (by returning !*ok).
+//
+// CAUTION: This macro appends extra statements after a call,
+// thus it must never be used where only a single statement
+// is correct (e.g. an if statement branch w/o braces)!
+
+#define CHECK_OK  ok);   \
+  if (!*ok) return NULL; \
+  ((void)0
+#define DUMMY )  // to make indentation work
+#undef DUMMY
+
+#define CHECK_FAILED  /**/);   \
+  if (failed_) return NULL; \
+  ((void)0
+#define DUMMY )  // to make indentation work
+#undef DUMMY
+
+// ----------------------------------------------------------------------------
+// Implementation of Parser
+
+Parser::Parser(Handle<Script> script,
+               bool allow_natives_syntax,
+               v8::Extension* extension,
+               bool is_pre_parsing,
+               ParserFactory* factory,
+               ParserLog* log,
+               ScriptDataImpl* pre_data)
+    : script_(script),
+      scanner_(is_pre_parsing),
+      top_scope_(NULL),
+      with_nesting_level_(0),
+      temp_scope_(NULL),
+      target_stack_(NULL),
+      allow_natives_syntax_(allow_natives_syntax),
+      extension_(extension),
+      factory_(factory),
+      log_(log),
+      is_pre_parsing_(is_pre_parsing),
+      pre_data_(pre_data) {
+}
+
+
+bool Parser::PreParseProgram(Handle<String> source,
+                             unibrow::CharacterStream* stream) {
+  HistogramTimerScope timer(&Counters::pre_parse);
+  AssertNoZoneAllocation assert_no_zone_allocation;
+  AssertNoAllocation assert_no_allocation;
+  NoHandleAllocation no_handle_allocation;
+  scanner_.Init(source, stream, 0);
+  ASSERT(target_stack_ == NULL);
+  mode_ = PARSE_EAGERLY;
+  DummyScope top_scope;
+  LexicalScope scope(this, &top_scope);
+  TemporaryScope temp_scope(this);
+  ZoneListWrapper<Statement> processor;
+  bool ok = true;
+  ParseSourceElements(&processor, Token::EOS, &ok);
+  return !scanner().stack_overflow();
+}
+
+
+FunctionLiteral* Parser::ParseProgram(Handle<String> source,
+                                      unibrow::CharacterStream* stream,
+                                      bool in_global_context) {
+  CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
+
+  HistogramTimerScope timer(&Counters::parse);
+  Counters::total_parse_size.Increment(source->length());
+
+  // Initialize parser state.
+  source->TryFlattenIfNotFlat();
+  scanner_.Init(source, stream, 0);
+  ASSERT(target_stack_ == NULL);
+
+  // Compute the parsing mode.
+  mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
+  if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
+
+  Scope::Type type =
+    in_global_context
+      ? Scope::GLOBAL_SCOPE
+      : Scope::EVAL_SCOPE;
+  Handle<String> no_name = factory()->EmptySymbol();
+
+  FunctionLiteral* result = NULL;
+  { Scope* scope = factory()->NewScope(top_scope_, type, inside_with());
+    LexicalScope lexical_scope(this, scope);
+    TemporaryScope temp_scope(this);
+    ZoneListWrapper<Statement> body(16);
+    bool ok = true;
+    ParseSourceElements(&body, Token::EOS, &ok);
+    if (ok) {
+      result = NEW(FunctionLiteral(
+          no_name,
+          top_scope_,
+          body.elements(),
+          temp_scope.materialized_literal_count(),
+          temp_scope.contains_array_literal(),
+          temp_scope.expected_property_count(),
+          temp_scope.only_this_property_assignments(),
+          temp_scope.only_simple_this_property_assignments(),
+          temp_scope.this_property_assignments(),
+          0,
+          0,
+          source->length(),
+          false));
+    } else if (scanner().stack_overflow()) {
+      Top::StackOverflow();
+    }
+  }
+
+  // Make sure the target stack is empty.
+  ASSERT(target_stack_ == NULL);
+
+  // If there was a syntax error we have to get rid of the AST
+  // and it is not safe to do so before the scope has been deleted.
+  if (result == NULL) zone_scope.DeleteOnExit();
+  return result;
+}
+
+
+FunctionLiteral* Parser::ParseLazy(Handle<String> source,
+                                   Handle<String> name,
+                                   int start_position,
+                                   bool is_expression) {
+  CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
+  HistogramTimerScope timer(&Counters::parse_lazy);
+  source->TryFlattenIfNotFlat();
+  Counters::total_parse_size.Increment(source->length());
+  SafeStringInputBuffer buffer(source.location());
+
+  // Initialize parser state.
+  scanner_.Init(source, &buffer, start_position);
+  ASSERT(target_stack_ == NULL);
+  mode_ = PARSE_EAGERLY;
+
+  // Place holder for the result.
+  FunctionLiteral* result = NULL;
+
+  {
+    // Parse the function literal.
+    Handle<String> no_name = factory()->EmptySymbol();
+    Scope* scope =
+        factory()->NewScope(top_scope_, Scope::GLOBAL_SCOPE, inside_with());
+    LexicalScope lexical_scope(this, scope);
+    TemporaryScope temp_scope(this);
+
+    FunctionLiteralType type = is_expression ? EXPRESSION : DECLARATION;
+    bool ok = true;
+    result = ParseFunctionLiteral(name, RelocInfo::kNoPosition, type, &ok);
+    // Make sure the results agree.
+    ASSERT(ok == (result != NULL));
+    // The only errors should be stack overflows.
+    ASSERT(ok || scanner_.stack_overflow());
+  }
+
+  // Make sure the target stack is empty.
+  ASSERT(target_stack_ == NULL);
+
+  // If there was a stack overflow we have to get rid of AST and it is
+  // not safe to do before scope has been deleted.
+  if (result == NULL) {
+    Top::StackOverflow();
+    zone_scope.DeleteOnExit();
+  }
+  return result;
+}
+
+
+void Parser::ReportMessage(const char* type, Vector<const char*> args) {
+  Scanner::Location source_location = scanner_.location();
+  ReportMessageAt(source_location, type, args);
+}
+
+
+void AstBuildingParser::ReportMessageAt(Scanner::Location source_location,
+                                        const char* type,
+                                        Vector<const char*> args) {
+  MessageLocation location(script_,
+                           source_location.beg_pos, source_location.end_pos);
+  Handle<JSArray> array = Factory::NewJSArray(args.length());
+  for (int i = 0; i < args.length(); i++) {
+    SetElement(array, i, Factory::NewStringFromUtf8(CStrVector(args[i])));
+  }
+  Handle<Object> result = Factory::NewSyntaxError(type, array);
+  Top::Throw(*result, &location);
+}
+
+
+void PreParser::ReportMessageAt(Scanner::Location source_location,
+                                const char* type,
+                                Vector<const char*> args) {
+  recorder()->LogMessage(source_location, type, args);
+}
+
+
+// Base class containing common code for the different finder classes used by
+// the parser.
+class ParserFinder {
+ protected:
+  ParserFinder() {}
+  static Assignment* AsAssignment(Statement* stat) {
+    if (stat == NULL) return NULL;
+    ExpressionStatement* exp_stat = stat->AsExpressionStatement();
+    if (exp_stat == NULL) return NULL;
+    return exp_stat->expression()->AsAssignment();
+  }
+};
+
+
+// An InitializationBlockFinder finds and marks sequences of statements of the
+// form x.y.z.a = ...; x.y.z.b = ...; etc.
+class InitializationBlockFinder : public ParserFinder {
+ public:
+  InitializationBlockFinder()
+    : first_in_block_(NULL), last_in_block_(NULL), block_size_(0) {}
+
+  ~InitializationBlockFinder() {
+    if (InBlock()) EndBlock();
+  }
+
+  void Update(Statement* stat) {
+    Assignment* assignment = AsAssignment(stat);
+    if (InBlock()) {
+      if (BlockContinues(assignment)) {
+        UpdateBlock(assignment);
+      } else {
+        EndBlock();
+      }
+    }
+    if (!InBlock() && (assignment != NULL) &&
+        (assignment->op() == Token::ASSIGN)) {
+      StartBlock(assignment);
+    }
+  }
+
+ private:
+  // Returns true if the expressions appear to denote the same object.
+  // In the context of initialization blocks, we only consider expressions
+  // of the form 'x.y.z'.
+  static bool SameObject(Expression* e1, Expression* e2) {
+    VariableProxy* v1 = e1->AsVariableProxy();
+    VariableProxy* v2 = e2->AsVariableProxy();
+    if (v1 != NULL && v2 != NULL) {
+      return v1->name()->Equals(*v2->name());
+    }
+    Property* p1 = e1->AsProperty();
+    Property* p2 = e2->AsProperty();
+    if ((p1 == NULL) || (p2 == NULL)) return false;
+    Literal* key1 = p1->key()->AsLiteral();
+    Literal* key2 = p2->key()->AsLiteral();
+    if ((key1 == NULL) || (key2 == NULL)) return false;
+    if (!key1->handle()->IsString() || !key2->handle()->IsString()) {
+      return false;
+    }
+    String* name1 = String::cast(*key1->handle());
+    String* name2 = String::cast(*key2->handle());
+    if (!name1->Equals(name2)) return false;
+    return SameObject(p1->obj(), p2->obj());
+  }
+
+  // Returns true if the expressions appear to denote different properties
+  // of the same object.
+  static bool PropertyOfSameObject(Expression* e1, Expression* e2) {
+    Property* p1 = e1->AsProperty();
+    Property* p2 = e2->AsProperty();
+    if ((p1 == NULL) || (p2 == NULL)) return false;
+    return SameObject(p1->obj(), p2->obj());
+  }
+
+  bool BlockContinues(Assignment* assignment) {
+    if ((assignment == NULL) || (first_in_block_ == NULL)) return false;
+    if (assignment->op() != Token::ASSIGN) return false;
+    return PropertyOfSameObject(first_in_block_->target(),
+                                assignment->target());
+  }
+
+  void StartBlock(Assignment* assignment) {
+    first_in_block_ = assignment;
+    last_in_block_ = assignment;
+    block_size_ = 1;
+  }
+
+  void UpdateBlock(Assignment* assignment) {
+    last_in_block_ = assignment;
+    ++block_size_;
+  }
+
+  void EndBlock() {
+    if (block_size_ >= Parser::kMinInitializationBlock) {
+      first_in_block_->mark_block_start();
+      last_in_block_->mark_block_end();
+    }
+    last_in_block_ = first_in_block_ = NULL;
+    block_size_ = 0;
+  }
+
+  bool InBlock() { return first_in_block_ != NULL; }
+
+  Assignment* first_in_block_;
+  Assignment* last_in_block_;
+  int block_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(InitializationBlockFinder);
+};
+
+
+// A ThisNamedPropertyAssigmentFinder finds and marks statements of the form
+// this.x = ...;, where x is a named property. It also determines whether a
+// function contains only assignments of this type.
+class ThisNamedPropertyAssigmentFinder : public ParserFinder {
+ public:
+  ThisNamedPropertyAssigmentFinder()
+      : only_this_property_assignments_(true),
+        only_simple_this_property_assignments_(true),
+        names_(NULL),
+        assigned_arguments_(NULL),
+        assigned_constants_(NULL) {}
+
+  void Update(Scope* scope, Statement* stat) {
+    // Bail out if function already has non this property assignment
+    // statements.
+    if (!only_this_property_assignments_) {
+      return;
+    }
+
+    // Check whether this statement is of the form this.x = ...;
+    Assignment* assignment = AsAssignment(stat);
+    if (IsThisPropertyAssignment(assignment)) {
+      HandleThisPropertyAssignment(scope, assignment);
+    } else {
+      only_this_property_assignments_ = false;
+      only_simple_this_property_assignments_ = false;
+    }
+  }
+
+  // Returns whether only statements of the form this.x = ...; was encountered.
+  bool only_this_property_assignments() {
+    return only_this_property_assignments_;
+  }
+
+  // Returns whether only statements of the form this.x = y; where y is either a
+  // constant or a function argument was encountered.
+  bool only_simple_this_property_assignments() {
+    return only_simple_this_property_assignments_;
+  }
+
+  // Returns a fixed array containing three elements for each assignment of the
+  // form this.x = y;
+  Handle<FixedArray> GetThisPropertyAssignments() {
+    if (names_ == NULL) {
+      return Factory::empty_fixed_array();
+    }
+    ASSERT(names_ != NULL);
+    ASSERT(assigned_arguments_ != NULL);
+    ASSERT_EQ(names_->length(), assigned_arguments_->length());
+    ASSERT_EQ(names_->length(), assigned_constants_->length());
+    Handle<FixedArray> assignments =
+        Factory::NewFixedArray(names_->length() * 3);
+    for (int i = 0; i < names_->length(); i++) {
+      assignments->set(i * 3, *names_->at(i));
+      assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_->at(i)));
+      assignments->set(i * 3 + 2, *assigned_constants_->at(i));
+    }
+    return assignments;
+  }
+
+ private:
+  bool IsThisPropertyAssignment(Assignment* assignment) {
+    if (assignment != NULL) {
+      Property* property = assignment->target()->AsProperty();
+      return assignment->op() == Token::ASSIGN
+             && property != NULL
+             && property->obj()->AsVariableProxy() != NULL
+             && property->obj()->AsVariableProxy()->is_this();
+    }
+    return false;
+  }
+
+  void HandleThisPropertyAssignment(Scope* scope, Assignment* assignment) {
+    // Check that the property assigned to is a named property.
+    Property* property = assignment->target()->AsProperty();
+    ASSERT(property != NULL);
+    Literal* literal = property->key()->AsLiteral();
+    uint32_t dummy;
+    if (literal != NULL &&
+        literal->handle()->IsString() &&
+        !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+      Handle<String> key = Handle<String>::cast(literal->handle());
+
+      // Check whether the value assigned is either a constant or matches the
+      // name of one of the arguments to the function.
+      if (assignment->value()->AsLiteral() != NULL) {
+        // Constant assigned.
+        Literal* literal = assignment->value()->AsLiteral();
+        AssignmentFromConstant(key, literal->handle());
+      } else if (assignment->value()->AsVariableProxy() != NULL) {
+        // Variable assigned.
+        Handle<String> name =
+            assignment->value()->AsVariableProxy()->name();
+        // Check whether the variable assigned matches an argument name.
+        int index = -1;
+        for (int i = 0; i < scope->num_parameters(); i++) {
+          if (*scope->parameter(i)->name() == *name) {
+            // Assigned from function argument.
+            index = i;
+            break;
+          }
+        }
+        if (index != -1) {
+          AssignmentFromParameter(key, index);
+        } else {
+          AssignmentFromSomethingElse(key);
+        }
+      } else {
+        AssignmentFromSomethingElse(key);
+      }
+    }
+  }
+
+  void AssignmentFromParameter(Handle<String> name, int index) {
+    EnsureAllocation();
+    names_->Add(name);
+    assigned_arguments_->Add(index);
+    assigned_constants_->Add(Factory::undefined_value());
+  }
+
+  void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
+    EnsureAllocation();
+    names_->Add(name);
+    assigned_arguments_->Add(-1);
+    assigned_constants_->Add(value);
+  }
+
+  void AssignmentFromSomethingElse(Handle<String> name) {
+    EnsureAllocation();
+    names_->Add(name);
+    assigned_arguments_->Add(-1);
+    assigned_constants_->Add(Factory::undefined_value());
+
+    // The this assignment is not a simple one.
+    only_simple_this_property_assignments_ = false;
+  }
+
+  void EnsureAllocation() {
+    if (names_ == NULL) {
+      ASSERT(assigned_arguments_ == NULL);
+      ASSERT(assigned_constants_ == NULL);
+      names_ = new ZoneStringList(4);
+      assigned_arguments_ = new ZoneList<int>(4);
+      assigned_constants_ = new ZoneObjectList(4);
+    }
+  }
+
+  bool only_this_property_assignments_;
+  bool only_simple_this_property_assignments_;
+  ZoneStringList* names_;
+  ZoneList<int>* assigned_arguments_;
+  ZoneObjectList* assigned_constants_;
+};
+
+
+void* Parser::ParseSourceElements(ZoneListWrapper<Statement>* processor,
+                                  int end_token,
+                                  bool* ok) {
+  // SourceElements ::
+  //   (Statement)* <end_token>
+
+  // Allocate a target stack to use for this set of source
+  // elements. This way, all scripts and functions get their own
+  // target stack thus avoiding illegal breaks and continues across
+  // functions.
+  TargetScope scope(this);
+
+  ASSERT(processor != NULL);
+  InitializationBlockFinder block_finder;
+  ThisNamedPropertyAssigmentFinder this_property_assignment_finder;
+  while (peek() != end_token) {
+    Statement* stat = ParseStatement(NULL, CHECK_OK);
+    if (stat == NULL || stat->IsEmpty()) continue;
+    // We find and mark the initialization blocks on top level code only.
+    // This is because the optimization prevents reuse of the map transitions,
+    // so it should be used only for code that will only be run once.
+    if (top_scope_->is_global_scope()) {
+      block_finder.Update(stat);
+    }
+    // Find and mark all assignments to named properties in this (this.x =)
+    if (top_scope_->is_function_scope()) {
+      this_property_assignment_finder.Update(top_scope_, stat);
+    }
+    processor->Add(stat);
+  }
+
+  // Propagate the collected information on this property assignments.
+  if (top_scope_->is_function_scope()) {
+    if (this_property_assignment_finder.only_this_property_assignments()) {
+      temp_scope_->SetThisPropertyAssignmentInfo(
+          this_property_assignment_finder.only_this_property_assignments(),
+          this_property_assignment_finder.
+              only_simple_this_property_assignments(),
+          this_property_assignment_finder.GetThisPropertyAssignments());
+    }
+  }
+  return 0;
+}
+
+
+Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
+  // Statement ::
+  //   Block
+  //   VariableStatement
+  //   EmptyStatement
+  //   ExpressionStatement
+  //   IfStatement
+  //   IterationStatement
+  //   ContinueStatement
+  //   BreakStatement
+  //   ReturnStatement
+  //   WithStatement
+  //   LabelledStatement
+  //   SwitchStatement
+  //   ThrowStatement
+  //   TryStatement
+  //   DebuggerStatement
+
+  // Note: Since labels can only be used by 'break' and 'continue'
+  // statements, which themselves are only valid within blocks,
+  // iterations or 'switch' statements (i.e., BreakableStatements),
+  // labels can be simply ignored in all other cases; except for
+  // trivial labeled break statements 'label: break label' which is
+  // parsed into an empty statement.
+
+  // Keep the source position of the statement
+  int statement_pos = scanner().peek_location().beg_pos;
+  Statement* stmt = NULL;
+  switch (peek()) {
+    case Token::LBRACE:
+      return ParseBlock(labels, ok);
+
+    case Token::CONST:  // fall through
+    case Token::VAR:
+      stmt = ParseVariableStatement(ok);
+      break;
+
+    case Token::SEMICOLON:
+      Next();
+      return factory()->EmptyStatement();
+
+    case Token::IF:
+      stmt = ParseIfStatement(labels, ok);
+      break;
+
+    case Token::DO:
+      stmt = ParseDoStatement(labels, ok);
+      break;
+
+    case Token::WHILE:
+      stmt = ParseWhileStatement(labels, ok);
+      break;
+
+    case Token::FOR:
+      stmt = ParseForStatement(labels, ok);
+      break;
+
+    case Token::CONTINUE:
+      stmt = ParseContinueStatement(ok);
+      break;
+
+    case Token::BREAK:
+      stmt = ParseBreakStatement(labels, ok);
+      break;
+
+    case Token::RETURN:
+      stmt = ParseReturnStatement(ok);
+      break;
+
+    case Token::WITH:
+      stmt = ParseWithStatement(labels, ok);
+      break;
+
+    case Token::SWITCH:
+      stmt = ParseSwitchStatement(labels, ok);
+      break;
+
+    case Token::THROW:
+      stmt = ParseThrowStatement(ok);
+      break;
+
+    case Token::TRY: {
+      // NOTE: It is somewhat complicated to have labels on
+      // try-statements. When breaking out of a try-finally statement,
+      // one must take great care not to treat it as a
+      // fall-through. It is much easier just to wrap the entire
+      // try-statement in a statement block and put the labels there
+      Block* result = NEW(Block(labels, 1, false));
+      Target target(this, result);
+      TryStatement* statement = ParseTryStatement(CHECK_OK);
+      if (statement) {
+        statement->set_statement_pos(statement_pos);
+      }
+      if (result) result->AddStatement(statement);
+      return result;
+    }
+
+    case Token::FUNCTION:
+      return ParseFunctionDeclaration(ok);
+
+    case Token::NATIVE:
+      return ParseNativeDeclaration(ok);
+
+    case Token::DEBUGGER:
+      stmt = ParseDebuggerStatement(ok);
+      break;
+
+    default:
+      stmt = ParseExpressionOrLabelledStatement(labels, ok);
+  }
+
+  // Store the source position of the statement
+  if (stmt != NULL) stmt->set_statement_pos(statement_pos);
+  return stmt;
+}
+
+
+VariableProxy* AstBuildingParser::Declare(Handle<String> name,
+                                          Variable::Mode mode,
+                                          FunctionLiteral* fun,
+                                          bool resolve,
+                                          bool* ok) {
+  Variable* var = NULL;
+  // If we are inside a function, a declaration of a variable
+  // is a truly local variable, and the scope of the variable
+  // is always the function scope.
+
+  // If a function scope exists, then we can statically declare this
+  // variable and also set its mode. In any case, a Declaration node
+  // will be added to the scope so that the declaration can be added
+  // to the corresponding activation frame at runtime if necessary.
+  // For instance declarations inside an eval scope need to be added
+  // to the calling function context.
+  if (top_scope_->is_function_scope()) {
+    // Declare the variable in the function scope.
+    var = top_scope_->LocalLookup(name);
+    if (var == NULL) {
+      // Declare the name.
+      var = top_scope_->DeclareLocal(name, mode);
+    } else {
+      // The name was declared before; check for conflicting
+      // re-declarations. If the previous declaration was a const or the
+      // current declaration is a const then we have a conflict. There is
+      // similar code in runtime.cc in the Declare functions.
+      if ((mode == Variable::CONST) || (var->mode() == Variable::CONST)) {
+        // We only have vars and consts in declarations.
+        ASSERT(var->mode() == Variable::VAR ||
+               var->mode() == Variable::CONST);
+        const char* type = (var->mode() == Variable::VAR) ? "var" : "const";
+        Handle<String> type_string =
+            Factory::NewStringFromUtf8(CStrVector(type), TENURED);
+        Expression* expression =
+            NewThrowTypeError(Factory::redeclaration_symbol(),
+                              type_string, name);
+        top_scope_->SetIllegalRedeclaration(expression);
+      }
+    }
+  }
+
+  // We add a declaration node for every declaration. The compiler
+  // will only generate code if necessary. In particular, declarations
+  // for inner local variables that do not represent functions won't
+  // result in any generated code.
+  //
+  // Note that we always add an unresolved proxy even if it's not
+  // used, simply because we don't know in this method (w/o extra
+  // parameters) if the proxy is needed or not. The proxy will be
+  // bound during variable resolution time unless it was pre-bound
+  // below.
+  //
+  // WARNING: This will lead to multiple declaration nodes for the
+  // same variable if it is declared several times. This is not a
+  // semantic issue as long as we keep the source order, but it may be
+  // a performance issue since it may lead to repeated
+  // Runtime::DeclareContextSlot() calls.
+  VariableProxy* proxy = top_scope_->NewUnresolved(name, inside_with());
+  top_scope_->AddDeclaration(NEW(Declaration(proxy, mode, fun)));
+
+  // For global const variables we bind the proxy to a variable.
+  if (mode == Variable::CONST && top_scope_->is_global_scope()) {
+    ASSERT(resolve);  // should be set by all callers
+    Variable::Kind kind = Variable::NORMAL;
+    var = NEW(Variable(top_scope_, name, Variable::CONST, true, kind));
+  }
+
+  // If requested and we have a local variable, bind the proxy to the variable
+  // at parse-time. This is used for functions (and consts) declared inside
+  // statements: the corresponding function (or const) variable must be in the
+  // function scope and not a statement-local scope, e.g. as provided with a
+  // 'with' statement:
+  //
+  //   with (obj) {
+  //     function f() {}
+  //   }
+  //
+  // which is translated into:
+  //
+  //   with (obj) {
+  //     // in this case this is not: 'var f; f = function () {};'
+  //     var f = function () {};
+  //   }
+  //
+  // Note that if 'f' is accessed from inside the 'with' statement, it
+  // will be allocated in the context (because we must be able to look
+  // it up dynamically) but it will also be accessed statically, i.e.,
+  // with a context slot index and a context chain length for this
+  // initialization code. Thus, inside the 'with' statement, we need
+  // both access to the static and the dynamic context chain; the
+  // runtime needs to provide both.
+  if (resolve && var != NULL) proxy->BindTo(var);
+
+  return proxy;
+}
+
+
+// Language extension which is only enabled for source files loaded
+// through the API's extension mechanism.  A native function
+// declaration is resolved by looking up the function through a
+// callback provided by the extension.
+Statement* Parser::ParseNativeDeclaration(bool* ok) {
+  if (extension_ == NULL) {
+    ReportUnexpectedToken(Token::NATIVE);
+    *ok = false;
+    return NULL;
+  }
+
+  Expect(Token::NATIVE, CHECK_OK);
+  Expect(Token::FUNCTION, CHECK_OK);
+  Handle<String> name = ParseIdentifier(CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  bool done = (peek() == Token::RPAREN);
+  while (!done) {
+    ParseIdentifier(CHECK_OK);
+    done = (peek() == Token::RPAREN);
+    if (!done) Expect(Token::COMMA, CHECK_OK);
+  }
+  Expect(Token::RPAREN, CHECK_OK);
+  Expect(Token::SEMICOLON, CHECK_OK);
+
+  if (is_pre_parsing_) return NULL;
+
+  // Make sure that the function containing the native declaration
+  // isn't lazily compiled. The extension structures are only
+  // accessible while parsing the first time not when reparsing
+  // because of lazy compilation.
+  top_scope_->ForceEagerCompilation();
+
+  // Compute the function template for the native function.
+  v8::Handle<v8::FunctionTemplate> fun_template =
+      extension_->GetNativeFunction(v8::Utils::ToLocal(name));
+  ASSERT(!fun_template.IsEmpty());
+
+  // Instantiate the function and create a boilerplate function from it.
+  Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
+  const int literals = fun->NumberOfLiterals();
+  Handle<Code> code = Handle<Code>(fun->shared()->code());
+  Handle<JSFunction> boilerplate =
+      Factory::NewFunctionBoilerplate(name, literals, false, code);
+
+  // Copy the function data to the boilerplate. Used by
+  // builtins.cc:HandleApiCall to perform argument type checks and to
+  // find the right native code to call.
+  boilerplate->shared()->set_function_data(fun->shared()->function_data());
+  int parameters = fun->shared()->formal_parameter_count();
+  boilerplate->shared()->set_formal_parameter_count(parameters);
+
+  // TODO(1240846): It's weird that native function declarations are
+  // introduced dynamically when we meet their declarations, whereas
+  // other functions are setup when entering the surrounding scope.
+  FunctionBoilerplateLiteral* lit =
+      NEW(FunctionBoilerplateLiteral(boilerplate));
+  VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
+  return NEW(ExpressionStatement(
+      new Assignment(Token::INIT_VAR, var, lit, RelocInfo::kNoPosition)));
+}
+
+
+Statement* Parser::ParseFunctionDeclaration(bool* ok) {
+  // FunctionDeclaration ::
+  //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
+  Expect(Token::FUNCTION, CHECK_OK);
+  int function_token_position = scanner().location().beg_pos;
+  Handle<String> name = ParseIdentifier(CHECK_OK);
+  FunctionLiteral* fun = ParseFunctionLiteral(name,
+                                              function_token_position,
+                                              DECLARATION,
+                                              CHECK_OK);
+  // Even if we're not at the top-level of the global or a function
+  // scope, we treat is as such and introduce the function with it's
+  // initial value upon entering the corresponding scope.
+  Declare(name, Variable::VAR, fun, true, CHECK_OK);
+  return factory()->EmptyStatement();
+}
+
+
+Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
+  // Block ::
+  //   '{' Statement* '}'
+
+  // Note that a Block does not introduce a new execution scope!
+  // (ECMA-262, 3rd, 12.2)
+  //
+  // Construct block expecting 16 statements.
+  Block* result = NEW(Block(labels, 16, false));
+  Target target(this, result);
+  Expect(Token::LBRACE, CHECK_OK);
+  while (peek() != Token::RBRACE) {
+    Statement* stat = ParseStatement(NULL, CHECK_OK);
+    if (stat && !stat->IsEmpty()) result->AddStatement(stat);
+  }
+  Expect(Token::RBRACE, CHECK_OK);
+  return result;
+}
+
+
+Block* Parser::ParseVariableStatement(bool* ok) {
+  // VariableStatement ::
+  //   VariableDeclarations ';'
+
+  Expression* dummy;  // to satisfy the ParseVariableDeclarations() signature
+  Block* result = ParseVariableDeclarations(true, &dummy, CHECK_OK);
+  ExpectSemicolon(CHECK_OK);
+  return result;
+}
+
+
+// If the variable declaration declares exactly one non-const
+// variable, then *var is set to that variable. In all other cases,
+// *var is untouched; in particular, it is the caller's responsibility
+// to initialize it properly. This mechanism is used for the parsing
+// of 'for-in' loops.
+Block* Parser::ParseVariableDeclarations(bool accept_IN,
+                                         Expression** var,
+                                         bool* ok) {
+  // VariableDeclarations ::
+  //   ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
+
+  Variable::Mode mode = Variable::VAR;
+  bool is_const = false;
+  if (peek() == Token::VAR) {
+    Consume(Token::VAR);
+  } else if (peek() == Token::CONST) {
+    Consume(Token::CONST);
+    mode = Variable::CONST;
+    is_const = true;
+  } else {
+    UNREACHABLE();  // by current callers
+  }
+
+  // The scope of a variable/const declared anywhere inside a function
+  // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
+  // transform a source-level variable/const declaration into a (Function)
+  // Scope declaration, and rewrite the source-level initialization into an
+  // assignment statement. We use a block to collect multiple assignments.
+  //
+  // We mark the block as initializer block because we don't want the
+  // rewriter to add a '.result' assignment to such a block (to get compliant
+  // behavior for code such as print(eval('var x = 7')), and for cosmetic
+  // reasons when pretty-printing. Also, unless an assignment (initialization)
+  // is inside an initializer block, it is ignored.
+  //
+  // Create new block with one expected declaration.
+  Block* block = NEW(Block(NULL, 1, true));
+  VariableProxy* last_var = NULL;  // the last variable declared
+  int nvars = 0;  // the number of variables declared
+  do {
+    // Parse variable name.
+    if (nvars > 0) Consume(Token::COMMA);
+    Handle<String> name = ParseIdentifier(CHECK_OK);
+
+    // Declare variable.
+    // Note that we *always* must treat the initial value via a separate init
+    // assignment for variables and constants because the value must be assigned
+    // when the variable is encountered in the source. But the variable/constant
+    // is declared (and set to 'undefined') upon entering the function within
+    // which the variable or constant is declared. Only function variables have
+    // an initial value in the declaration (because they are initialized upon
+    // entering the function).
+    //
+    // If we have a const declaration, in an inner scope, the proxy is always
+    // bound to the declared variable (independent of possibly surrounding with
+    // statements).
+    last_var = Declare(name, mode, NULL,
+                       is_const /* always bound for CONST! */,
+                       CHECK_OK);
+    nvars++;
+
+    // Parse initialization expression if present and/or needed. A
+    // declaration of the form:
+    //
+    //    var v = x;
+    //
+    // is syntactic sugar for:
+    //
+    //    var v; v = x;
+    //
+    // In particular, we need to re-lookup 'v' as it may be a
+    // different 'v' than the 'v' in the declaration (if we are inside
+    // a 'with' statement that makes a object property with name 'v'
+    // visible).
+    //
+    // However, note that const declarations are different! A const
+    // declaration of the form:
+    //
+    //   const c = x;
+    //
+    // is *not* syntactic sugar for:
+    //
+    //   const c; c = x;
+    //
+    // The "variable" c initialized to x is the same as the declared
+    // one - there is no re-lookup (see the last parameter of the
+    // Declare() call above).
+
+    Expression* value = NULL;
+    int position = -1;
+    if (peek() == Token::ASSIGN) {
+      Expect(Token::ASSIGN, CHECK_OK);
+      position = scanner().location().beg_pos;
+      value = ParseAssignmentExpression(accept_IN, CHECK_OK);
+    }
+
+    // Make sure that 'const c' actually initializes 'c' to undefined
+    // even though it seems like a stupid thing to do.
+    if (value == NULL && is_const) {
+      value = GetLiteralUndefined();
+    }
+
+    // Global variable declarations must be compiled in a specific
+    // way. When the script containing the global variable declaration
+    // is entered, the global variable must be declared, so that if it
+    // doesn't exist (not even in a prototype of the global object) it
+    // gets created with an initial undefined value. This is handled
+    // by the declarations part of the function representing the
+    // top-level global code; see Runtime::DeclareGlobalVariable. If
+    // it already exists (in the object or in a prototype), it is
+    // *not* touched until the variable declaration statement is
+    // executed.
+    //
+    // Executing the variable declaration statement will always
+    // guarantee to give the global object a "local" variable; a
+    // variable defined in the global object and not in any
+    // prototype. This way, global variable declarations can shadow
+    // properties in the prototype chain, but only after the variable
+    // declaration statement has been executed. This is important in
+    // browsers where the global object (window) has lots of
+    // properties defined in prototype objects.
+
+    if (!is_pre_parsing_ && top_scope_->is_global_scope()) {
+      // Compute the arguments for the runtime call.
+      ZoneList<Expression*>* arguments = new ZoneList<Expression*>(2);
+      // Be careful not to assign a value to the global variable if
+      // we're in a with. The initialization value should not
+      // necessarily be stored in the global object in that case,
+      // which is why we need to generate a separate assignment node.
+      arguments->Add(NEW(Literal(name)));  // we have at least 1 parameter
+      if (is_const || (value != NULL && !inside_with())) {
+        arguments->Add(value);
+        value = NULL;  // zap the value to avoid the unnecessary assignment
+      }
+      // Construct the call to Runtime::DeclareGlobal{Variable,Const}Locally
+      // and add it to the initialization statement block. Note that
+      // this function does different things depending on if we have
+      // 1 or 2 parameters.
+      CallRuntime* initialize;
+      if (is_const) {
+        initialize =
+          NEW(CallRuntime(
+            Factory::InitializeConstGlobal_symbol(),
+            Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
+            arguments));
+      } else {
+        initialize =
+          NEW(CallRuntime(
+            Factory::InitializeVarGlobal_symbol(),
+            Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
+            arguments));
+      }
+      block->AddStatement(NEW(ExpressionStatement(initialize)));
+    }
+
+    // Add an assignment node to the initialization statement block if
+    // we still have a pending initialization value. We must distinguish
+    // between variables and constants: Variable initializations are simply
+    // assignments (with all the consequences if they are inside a 'with'
+    // statement - they may change a 'with' object property). Constant
+    // initializations always assign to the declared constant which is
+    // always at the function scope level. This is only relevant for
+    // dynamically looked-up variables and constants (the start context
+    // for constant lookups is always the function context, while it is
+    // the top context for variables). Sigh...
+    if (value != NULL) {
+      Token::Value op = (is_const ? Token::INIT_CONST : Token::INIT_VAR);
+      Assignment* assignment = NEW(Assignment(op, last_var, value, position));
+      if (block) block->AddStatement(NEW(ExpressionStatement(assignment)));
+    }
+  } while (peek() == Token::COMMA);
+
+  if (!is_const && nvars == 1) {
+    // We have a single, non-const variable.
+    if (is_pre_parsing_) {
+      // If we're preparsing then we need to set the var to something
+      // in order for for-in loops to parse correctly.
+      *var = ValidLeftHandSideSentinel::instance();
+    } else {
+      ASSERT(last_var != NULL);
+      *var = last_var;
+    }
+  }
+
+  return block;
+}
+
+
+static bool ContainsLabel(ZoneStringList* labels, Handle<String> label) {
+  ASSERT(!label.is_null());
+  if (labels != NULL)
+    for (int i = labels->length(); i-- > 0; )
+      if (labels->at(i).is_identical_to(label))
+        return true;
+
+  return false;
+}
+
+
+Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
+                                                      bool* ok) {
+  // ExpressionStatement | LabelledStatement ::
+  //   Expression ';'
+  //   Identifier ':' Statement
+
+  Expression* expr = ParseExpression(true, CHECK_OK);
+  if (peek() == Token::COLON && expr &&
+      expr->AsVariableProxy() != NULL &&
+      !expr->AsVariableProxy()->is_this()) {
+    VariableProxy* var = expr->AsVariableProxy();
+    Handle<String> label = var->name();
+    // TODO(1240780): We don't check for redeclaration of labels
+    // during preparsing since keeping track of the set of active
+    // labels requires nontrivial changes to the way scopes are
+    // structured.  However, these are probably changes we want to
+    // make later anyway so we should go back and fix this then.
+    if (!is_pre_parsing_) {
+      if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
+        SmartPointer<char> c_string = label->ToCString(DISALLOW_NULLS);
+        const char* elms[2] = { "Label", *c_string };
+        Vector<const char*> args(elms, 2);
+        ReportMessage("redeclaration", args);
+        *ok = false;
+        return NULL;
+      }
+      if (labels == NULL) labels = new ZoneStringList(4);
+      labels->Add(label);
+      // Remove the "ghost" variable that turned out to be a label
+      // from the top scope. This way, we don't try to resolve it
+      // during the scope processing.
+      top_scope_->RemoveUnresolved(var);
+    }
+    Expect(Token::COLON, CHECK_OK);
+    return ParseStatement(labels, ok);
+  }
+
+  // Parsed expression statement.
+  ExpectSemicolon(CHECK_OK);
+  return NEW(ExpressionStatement(expr));
+}
+
+
+IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
+  // IfStatement ::
+  //   'if' '(' Expression ')' Statement ('else' Statement)?
+
+  Expect(Token::IF, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  Expression* condition = ParseExpression(true, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+  Statement* then_statement = ParseStatement(labels, CHECK_OK);
+  Statement* else_statement = NULL;
+  if (peek() == Token::ELSE) {
+    Next();
+    else_statement = ParseStatement(labels, CHECK_OK);
+  } else if (!is_pre_parsing_) {
+    else_statement = factory()->EmptyStatement();
+  }
+  return NEW(IfStatement(condition, then_statement, else_statement));
+}
+
+
+Statement* Parser::ParseContinueStatement(bool* ok) {
+  // ContinueStatement ::
+  //   'continue' Identifier? ';'
+
+  Expect(Token::CONTINUE, CHECK_OK);
+  Handle<String> label = Handle<String>::null();
+  Token::Value tok = peek();
+  if (!scanner_.has_line_terminator_before_next() &&
+      tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
+    label = ParseIdentifier(CHECK_OK);
+  }
+  IterationStatement* target = NULL;
+  if (!is_pre_parsing_) {
+    target = LookupContinueTarget(label, CHECK_OK);
+    if (target == NULL) {
+      // Illegal continue statement.  To be consistent with KJS we delay
+      // reporting of the syntax error until runtime.
+      Handle<String> error_type = Factory::illegal_continue_symbol();
+      if (!label.is_null()) error_type = Factory::unknown_label_symbol();
+      Expression* throw_error = NewThrowSyntaxError(error_type, label);
+      return NEW(ExpressionStatement(throw_error));
+    }
+  }
+  ExpectSemicolon(CHECK_OK);
+  return NEW(ContinueStatement(target));
+}
+
+
+Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
+  // BreakStatement ::
+  //   'break' Identifier? ';'
+
+  Expect(Token::BREAK, CHECK_OK);
+  Handle<String> label;
+  Token::Value tok = peek();
+  if (!scanner_.has_line_terminator_before_next() &&
+      tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
+    label = ParseIdentifier(CHECK_OK);
+  }
+  // Parse labeled break statements that target themselves into
+  // empty statements, e.g. 'l1: l2: l3: break l2;'
+  if (!label.is_null() && ContainsLabel(labels, label)) {
+    return factory()->EmptyStatement();
+  }
+  BreakableStatement* target = NULL;
+  if (!is_pre_parsing_) {
+    target = LookupBreakTarget(label, CHECK_OK);
+    if (target == NULL) {
+      // Illegal break statement.  To be consistent with KJS we delay
+      // reporting of the syntax error until runtime.
+      Handle<String> error_type = Factory::illegal_break_symbol();
+      if (!label.is_null()) error_type = Factory::unknown_label_symbol();
+      Expression* throw_error = NewThrowSyntaxError(error_type, label);
+      return NEW(ExpressionStatement(throw_error));
+    }
+  }
+  ExpectSemicolon(CHECK_OK);
+  return NEW(BreakStatement(target));
+}
+
+
+Statement* Parser::ParseReturnStatement(bool* ok) {
+  // ReturnStatement ::
+  //   'return' Expression? ';'
+
+  // Consume the return token. It is necessary to do the before
+  // reporting any errors on it, because of the way errors are
+  // reported (underlining).
+  Expect(Token::RETURN, CHECK_OK);
+
+  // An ECMAScript program is considered syntactically incorrect if it
+  // contains a return statement that is not within the body of a
+  // function. See ECMA-262, section 12.9, page 67.
+  //
+  // To be consistent with KJS we report the syntax error at runtime.
+  if (!is_pre_parsing_ && !top_scope_->is_function_scope()) {
+    Handle<String> type = Factory::illegal_return_symbol();
+    Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
+    return NEW(ExpressionStatement(throw_error));
+  }
+
+  Token::Value tok = peek();
+  if (scanner_.has_line_terminator_before_next() ||
+      tok == Token::SEMICOLON ||
+      tok == Token::RBRACE ||
+      tok == Token::EOS) {
+    ExpectSemicolon(CHECK_OK);
+    return NEW(ReturnStatement(GetLiteralUndefined()));
+  }
+
+  Expression* expr = ParseExpression(true, CHECK_OK);
+  ExpectSemicolon(CHECK_OK);
+  return NEW(ReturnStatement(expr));
+}
+
+
+Block* Parser::WithHelper(Expression* obj,
+                          ZoneStringList* labels,
+                          bool is_catch_block,
+                          bool* ok) {
+  // Parse the statement and collect escaping labels.
+  ZoneList<BreakTarget*>* target_list = NEW(ZoneList<BreakTarget*>(0));
+  TargetCollector collector(target_list);
+  Statement* stat;
+  { Target target(this, &collector);
+    with_nesting_level_++;
+    top_scope_->RecordWithStatement();
+    stat = ParseStatement(labels, CHECK_OK);
+    with_nesting_level_--;
+  }
+  // Create resulting block with two statements.
+  // 1: Evaluate the with expression.
+  // 2: The try-finally block evaluating the body.
+  Block* result = NEW(Block(NULL, 2, false));
+
+  if (result != NULL) {
+    result->AddStatement(NEW(WithEnterStatement(obj, is_catch_block)));
+
+    // Create body block.
+    Block* body = NEW(Block(NULL, 1, false));
+    body->AddStatement(stat);
+
+    // Create exit block.
+    Block* exit = NEW(Block(NULL, 1, false));
+    exit->AddStatement(NEW(WithExitStatement()));
+
+    // Return a try-finally statement.
+    TryFinally* wrapper = NEW(TryFinally(body, exit));
+    wrapper->set_escaping_targets(collector.targets());
+    result->AddStatement(wrapper);
+  }
+  return result;
+}
+
+
+Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
+  // WithStatement ::
+  //   'with' '(' Expression ')' Statement
+
+  Expect(Token::WITH, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  Expression* expr = ParseExpression(true, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+
+  return WithHelper(expr, labels, false, CHECK_OK);
+}
+
+
+CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
+  // CaseClause ::
+  //   'case' Expression ':' Statement*
+  //   'default' ':' Statement*
+
+  Expression* label = NULL;  // NULL expression indicates default case
+  if (peek() == Token::CASE) {
+    Expect(Token::CASE, CHECK_OK);
+    label = ParseExpression(true, CHECK_OK);
+  } else {
+    Expect(Token::DEFAULT, CHECK_OK);
+    if (*default_seen_ptr) {
+      ReportMessage("multiple_defaults_in_switch",
+                    Vector<const char*>::empty());
+      *ok = false;
+      return NULL;
+    }
+    *default_seen_ptr = true;
+  }
+  Expect(Token::COLON, CHECK_OK);
+
+  ZoneListWrapper<Statement> statements = factory()->NewList<Statement>(5);
+  while (peek() != Token::CASE &&
+         peek() != Token::DEFAULT &&
+         peek() != Token::RBRACE) {
+    Statement* stat = ParseStatement(NULL, CHECK_OK);
+    statements.Add(stat);
+  }
+
+  return NEW(CaseClause(label, statements.elements()));
+}
+
+
+SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels,
+                                              bool* ok) {
+  // SwitchStatement ::
+  //   'switch' '(' Expression ')' '{' CaseClause* '}'
+
+  SwitchStatement* statement = NEW(SwitchStatement(labels));
+  Target target(this, statement);
+
+  Expect(Token::SWITCH, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  Expression* tag = ParseExpression(true, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+
+  bool default_seen = false;
+  ZoneListWrapper<CaseClause> cases = factory()->NewList<CaseClause>(4);
+  Expect(Token::LBRACE, CHECK_OK);
+  while (peek() != Token::RBRACE) {
+    CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
+    cases.Add(clause);
+  }
+  Expect(Token::RBRACE, CHECK_OK);
+
+  if (statement) statement->Initialize(tag, cases.elements());
+  return statement;
+}
+
+
+Statement* Parser::ParseThrowStatement(bool* ok) {
+  // ThrowStatement ::
+  //   'throw' Expression ';'
+
+  Expect(Token::THROW, CHECK_OK);
+  int pos = scanner().location().beg_pos;
+  if (scanner_.has_line_terminator_before_next()) {
+    ReportMessage("newline_after_throw", Vector<const char*>::empty());
+    *ok = false;
+    return NULL;
+  }
+  Expression* exception = ParseExpression(true, CHECK_OK);
+  ExpectSemicolon(CHECK_OK);
+
+  return NEW(ExpressionStatement(new Throw(exception, pos)));
+}
+
+
+TryStatement* Parser::ParseTryStatement(bool* ok) {
+  // TryStatement ::
+  //   'try' Block Catch
+  //   'try' Block Finally
+  //   'try' Block Catch Finally
+  //
+  // Catch ::
+  //   'catch' '(' Identifier ')' Block
+  //
+  // Finally ::
+  //   'finally' Block
+
+  Expect(Token::TRY, CHECK_OK);
+
+  ZoneList<BreakTarget*>* target_list = NEW(ZoneList<BreakTarget*>(0));
+  TargetCollector collector(target_list);
+  Block* try_block;
+
+  { Target target(this, &collector);
+    try_block = ParseBlock(NULL, CHECK_OK);
+  }
+
+  Block* catch_block = NULL;
+  VariableProxy* catch_var = NULL;
+  Block* finally_block = NULL;
+
+  Token::Value tok = peek();
+  if (tok != Token::CATCH && tok != Token::FINALLY) {
+    ReportMessage("no_catch_or_finally", Vector<const char*>::empty());
+    *ok = false;
+    return NULL;
+  }
+
+  // If we can break out from the catch block and there is a finally block,
+  // then we will need to collect jump targets from the catch block. Since
+  // we don't know yet if there will be a finally block, we always collect
+  // the jump targets.
+  ZoneList<BreakTarget*>* catch_target_list = NEW(ZoneList<BreakTarget*>(0));
+  TargetCollector catch_collector(catch_target_list);
+  bool has_catch = false;
+  if (tok == Token::CATCH) {
+    has_catch = true;
+    Consume(Token::CATCH);
+
+    Expect(Token::LPAREN, CHECK_OK);
+    Handle<String> name = ParseIdentifier(CHECK_OK);
+    Expect(Token::RPAREN, CHECK_OK);
+
+    if (peek() == Token::LBRACE) {
+      // Allocate a temporary for holding the finally state while
+      // executing the finally block.
+      catch_var = top_scope_->NewTemporary(Factory::catch_var_symbol());
+      Literal* name_literal = NEW(Literal(name));
+      Expression* obj = NEW(CatchExtensionObject(name_literal, catch_var));
+      { Target target(this, &catch_collector);
+        catch_block = WithHelper(obj, NULL, true, CHECK_OK);
+      }
+    } else {
+      Expect(Token::LBRACE, CHECK_OK);
+    }
+
+    tok = peek();
+  }
+
+  if (tok == Token::FINALLY || !has_catch) {
+    Consume(Token::FINALLY);
+    // Declare a variable for holding the finally state while
+    // executing the finally block.
+    finally_block = ParseBlock(NULL, CHECK_OK);
+  }
+
+  // Simplify the AST nodes by converting:
+  //   'try { } catch { } finally { }'
+  // to:
+  //   'try { try { } catch { } } finally { }'
+
+  if (!is_pre_parsing_ && catch_block != NULL && finally_block != NULL) {
+    TryCatch* statement = NEW(TryCatch(try_block, catch_var, catch_block));
+    statement->set_escaping_targets(collector.targets());
+    try_block = NEW(Block(NULL, 1, false));
+    try_block->AddStatement(statement);
+    catch_block = NULL;
+  }
+
+  TryStatement* result = NULL;
+  if (!is_pre_parsing_) {
+    if (catch_block != NULL) {
+      ASSERT(finally_block == NULL);
+      result = NEW(TryCatch(try_block, catch_var, catch_block));
+      result->set_escaping_targets(collector.targets());
+    } else {
+      ASSERT(finally_block != NULL);
+      result = NEW(TryFinally(try_block, finally_block));
+      // Add the jump targets of the try block and the catch block.
+      for (int i = 0; i < collector.targets()->length(); i++) {
+        catch_collector.AddTarget(collector.targets()->at(i));
+      }
+      result->set_escaping_targets(catch_collector.targets());
+    }
+  }
+
+  return result;
+}
+
+
+LoopStatement* Parser::ParseDoStatement(ZoneStringList* labels, bool* ok) {
+  // DoStatement ::
+  //   'do' Statement 'while' '(' Expression ')' ';'
+
+  LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::DO_LOOP));
+  Target target(this, loop);
+
+  Expect(Token::DO, CHECK_OK);
+  Statement* body = ParseStatement(NULL, CHECK_OK);
+  Expect(Token::WHILE, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  Expression* cond = ParseExpression(true, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+
+  // Allow do-statements to be terminated with and without
+  // semi-colons. This allows code such as 'do;while(0)return' to
+  // parse, which would not be the case if we had used the
+  // ExpectSemicolon() functionality here.
+  if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
+
+  if (loop) loop->Initialize(NULL, cond, NULL, body);
+  return loop;
+}
+
+
+LoopStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
+  // WhileStatement ::
+  //   'while' '(' Expression ')' Statement
+
+  LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::WHILE_LOOP));
+  Target target(this, loop);
+
+  Expect(Token::WHILE, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  Expression* cond = ParseExpression(true, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+  Statement* body = ParseStatement(NULL, CHECK_OK);
+
+  if (loop) loop->Initialize(NULL, cond, NULL, body);
+  return loop;
+}
+
+
+Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
+  // ForStatement ::
+  //   'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
+
+  Statement* init = NULL;
+
+  Expect(Token::FOR, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  if (peek() != Token::SEMICOLON) {
+    if (peek() == Token::VAR || peek() == Token::CONST) {
+      Expression* each = NULL;
+      Block* variable_statement =
+          ParseVariableDeclarations(false, &each, CHECK_OK);
+      if (peek() == Token::IN && each != NULL) {
+        ForInStatement* loop = NEW(ForInStatement(labels));
+        Target target(this, loop);
+
+        Expect(Token::IN, CHECK_OK);
+        Expression* enumerable = ParseExpression(true, CHECK_OK);
+        Expect(Token::RPAREN, CHECK_OK);
+
+        Statement* body = ParseStatement(NULL, CHECK_OK);
+        if (is_pre_parsing_) {
+          return NULL;
+        } else {
+          loop->Initialize(each, enumerable, body);
+          Block* result = NEW(Block(NULL, 2, false));
+          result->AddStatement(variable_statement);
+          result->AddStatement(loop);
+          // Parsed for-in loop w/ variable/const declaration.
+          return result;
+        }
+
+      } else {
+        init = variable_statement;
+      }
+
+    } else {
+      Expression* expression = ParseExpression(false, CHECK_OK);
+      if (peek() == Token::IN) {
+        // Signal a reference error if the expression is an invalid
+        // left-hand side expression.  We could report this as a syntax
+        // error here but for compatibility with JSC we choose to report
+        // the error at runtime.
+        if (expression == NULL || !expression->IsValidLeftHandSide()) {
+          Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
+          expression = NewThrowReferenceError(type);
+        }
+        ForInStatement* loop = NEW(ForInStatement(labels));
+        Target target(this, loop);
+
+        Expect(Token::IN, CHECK_OK);
+        Expression* enumerable = ParseExpression(true, CHECK_OK);
+        Expect(Token::RPAREN, CHECK_OK);
+
+        Statement* body = ParseStatement(NULL, CHECK_OK);
+        if (loop) loop->Initialize(expression, enumerable, body);
+
+        // Parsed for-in loop.
+        return loop;
+
+      } else {
+        init = NEW(ExpressionStatement(expression));
+      }
+    }
+  }
+
+  // Standard 'for' loop
+  LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::FOR_LOOP));
+  Target target(this, loop);
+
+  // Parsed initializer at this point.
+  Expect(Token::SEMICOLON, CHECK_OK);
+
+  Expression* cond = NULL;
+  if (peek() != Token::SEMICOLON) {
+    cond = ParseExpression(true, CHECK_OK);
+  }
+  Expect(Token::SEMICOLON, CHECK_OK);
+
+  Statement* next = NULL;
+  if (peek() != Token::RPAREN) {
+    Expression* exp = ParseExpression(true, CHECK_OK);
+    next = NEW(ExpressionStatement(exp));
+  }
+  Expect(Token::RPAREN, CHECK_OK);
+
+  Statement* body = ParseStatement(NULL, CHECK_OK);
+
+  if (loop) loop->Initialize(init, cond, next, body);
+  return loop;
+}
+
+
+// Precedence = 1
+Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
+  // Expression ::
+  //   AssignmentExpression
+  //   Expression ',' AssignmentExpression
+
+  Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
+  while (peek() == Token::COMMA) {
+    Expect(Token::COMMA, CHECK_OK);
+    Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+    result = NEW(BinaryOperation(Token::COMMA, result, right));
+  }
+  return result;
+}
+
+
+// Precedence = 2
+Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
+  // AssignmentExpression ::
+  //   ConditionalExpression
+  //   LeftHandSideExpression AssignmentOperator AssignmentExpression
+
+  Expression* expression = ParseConditionalExpression(accept_IN, CHECK_OK);
+
+  if (!Token::IsAssignmentOp(peek())) {
+    // Parsed conditional expression only (no assignment).
+    return expression;
+  }
+
+  // Signal a reference error if the expression is an invalid left-hand
+  // side expression.  We could report this as a syntax error here but
+  // for compatibility with JSC we choose to report the error at
+  // runtime.
+  if (expression == NULL || !expression->IsValidLeftHandSide()) {
+    Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
+    expression = NewThrowReferenceError(type);
+  }
+
+  Token::Value op = Next();  // Get assignment operator.
+  int pos = scanner().location().beg_pos;
+  Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+
+  // TODO(1231235): We try to estimate the set of properties set by
+  // constructors. We define a new property whenever there is an
+  // assignment to a property of 'this'. We should probably only add
+  // properties if we haven't seen them before. Otherwise we'll
+  // probably overestimate the number of properties.
+  Property* property = expression ? expression->AsProperty() : NULL;
+  if (op == Token::ASSIGN &&
+      property != NULL &&
+      property->obj()->AsVariableProxy() != NULL &&
+      property->obj()->AsVariableProxy()->is_this()) {
+    temp_scope_->AddProperty();
+  }
+
+  return NEW(Assignment(op, expression, right, pos));
+}
+
+
+// Precedence = 3
+Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
+  // ConditionalExpression ::
+  //   LogicalOrExpression
+  //   LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
+
+  // We start using the binary expression parser for prec >= 4 only!
+  Expression* expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
+  if (peek() != Token::CONDITIONAL) return expression;
+  Consume(Token::CONDITIONAL);
+  // In parsing the first assignment expression in conditional
+  // expressions we always accept the 'in' keyword; see ECMA-262,
+  // section 11.12, page 58.
+  Expression* left = ParseAssignmentExpression(true, CHECK_OK);
+  Expect(Token::COLON, CHECK_OK);
+  Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+  return NEW(Conditional(expression, left, right));
+}
+
+
+static int Precedence(Token::Value tok, bool accept_IN) {
+  if (tok == Token::IN && !accept_IN)
+    return 0;  // 0 precedence will terminate binary expression parsing
+
+  return Token::Precedence(tok);
+}
+
+
+// Precedence >= 4
+Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
+  ASSERT(prec >= 4);
+  Expression* x = ParseUnaryExpression(CHECK_OK);
+  for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
+    // prec1 >= 4
+    while (Precedence(peek(), accept_IN) == prec1) {
+      Token::Value op = Next();
+      Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
+
+      // Compute some expressions involving only number literals.
+      if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber() &&
+          y && y->AsLiteral() && y->AsLiteral()->handle()->IsNumber()) {
+        double x_val = x->AsLiteral()->handle()->Number();
+        double y_val = y->AsLiteral()->handle()->Number();
+
+        switch (op) {
+          case Token::ADD:
+            x = NewNumberLiteral(x_val + y_val);
+            continue;
+          case Token::SUB:
+            x = NewNumberLiteral(x_val - y_val);
+            continue;
+          case Token::MUL:
+            x = NewNumberLiteral(x_val * y_val);
+            continue;
+          case Token::DIV:
+            x = NewNumberLiteral(x_val / y_val);
+            continue;
+          case Token::BIT_OR:
+            x = NewNumberLiteral(DoubleToInt32(x_val) | DoubleToInt32(y_val));
+            continue;
+          case Token::BIT_AND:
+            x = NewNumberLiteral(DoubleToInt32(x_val) & DoubleToInt32(y_val));
+            continue;
+          case Token::BIT_XOR:
+            x = NewNumberLiteral(DoubleToInt32(x_val) ^ DoubleToInt32(y_val));
+            continue;
+          case Token::SHL: {
+            int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
+            x = NewNumberLiteral(value);
+            continue;
+          }
+          case Token::SHR: {
+            uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+            uint32_t value = DoubleToUint32(x_val) >> shift;
+            x = NewNumberLiteral(value);
+            continue;
+          }
+          case Token::SAR: {
+            uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+            int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
+            x = NewNumberLiteral(value);
+            continue;
+          }
+          default:
+            break;
+        }
+      }
+
+      // Convert constant divisions to multiplications for speed.
+      if (op == Token::DIV &&
+          y && y->AsLiteral() && y->AsLiteral()->handle()->IsNumber()) {
+        double y_val = y->AsLiteral()->handle()->Number();
+        int64_t y_int = static_cast<int64_t>(y_val);
+        // There are rounding issues with this optimization, but they don't
+        // apply if the number to be divided with has a reciprocal that can be
+        // precisely represented as a floating point number.  This is the case
+        // if the number is an integer power of 2.  Negative integer powers of
+        // 2 work too, but for -2, -1, 1 and 2 we don't do the strength
+        // reduction because the inlined optimistic idiv has a reasonable
+        // chance of succeeding by producing a Smi answer with no remainder.
+        if (static_cast<double>(y_int) == y_val &&
+            (IsPowerOf2(y_int) || IsPowerOf2(-y_int)) &&
+            (y_int > 2 || y_int < -2)) {
+          y = NewNumberLiteral(1 / y_val);
+          op = Token::MUL;
+        }
+      }
+
+      // For now we distinguish between comparisons and other binary
+      // operations.  (We could combine the two and get rid of this
+      // code an AST node eventually.)
+      if (Token::IsCompareOp(op)) {
+        // We have a comparison.
+        Token::Value cmp = op;
+        switch (op) {
+          case Token::NE: cmp = Token::EQ; break;
+          case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
+          default: break;
+        }
+        x = NEW(CompareOperation(cmp, x, y));
+        if (cmp != op) {
+          // The comparison was negated - add a NOT.
+          x = NEW(UnaryOperation(Token::NOT, x));
+        }
+
+      } else {
+        // We have a "normal" binary operation.
+        x = NEW(BinaryOperation(op, x, y));
+      }
+    }
+  }
+  return x;
+}
+
+
+Expression* Parser::ParseUnaryExpression(bool* ok) {
+  // UnaryExpression ::
+  //   PostfixExpression
+  //   'delete' UnaryExpression
+  //   'void' UnaryExpression
+  //   'typeof' UnaryExpression
+  //   '++' UnaryExpression
+  //   '--' UnaryExpression
+  //   '+' UnaryExpression
+  //   '-' UnaryExpression
+  //   '~' UnaryExpression
+  //   '!' UnaryExpression
+
+  Token::Value op = peek();
+  if (Token::IsUnaryOp(op)) {
+    op = Next();
+    Expression* expression = ParseUnaryExpression(CHECK_OK);
+
+    // Compute some expressions involving only number literals.
+    if (expression != NULL && expression->AsLiteral() &&
+        expression->AsLiteral()->handle()->IsNumber()) {
+      double value = expression->AsLiteral()->handle()->Number();
+      switch (op) {
+        case Token::ADD:
+          return expression;
+        case Token::SUB:
+          return NewNumberLiteral(-value);
+        case Token::BIT_NOT:
+          return NewNumberLiteral(~DoubleToInt32(value));
+        default: break;
+      }
+    }
+
+    return NEW(UnaryOperation(op, expression));
+
+  } else if (Token::IsCountOp(op)) {
+    op = Next();
+    Expression* expression = ParseUnaryExpression(CHECK_OK);
+    // Signal a reference error if the expression is an invalid
+    // left-hand side expression.  We could report this as a syntax
+    // error here but for compatibility with JSC we choose to report the
+    // error at runtime.
+    if (expression == NULL || !expression->IsValidLeftHandSide()) {
+      Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
+      expression = NewThrowReferenceError(type);
+    }
+    return NEW(CountOperation(true /* prefix */, op, expression));
+
+  } else {
+    return ParsePostfixExpression(ok);
+  }
+}
+
+
+Expression* Parser::ParsePostfixExpression(bool* ok) {
+  // PostfixExpression ::
+  //   LeftHandSideExpression ('++' | '--')?
+
+  Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
+  if (!scanner_.has_line_terminator_before_next() && Token::IsCountOp(peek())) {
+    // Signal a reference error if the expression is an invalid
+    // left-hand side expression.  We could report this as a syntax
+    // error here but for compatibility with JSC we choose to report the
+    // error at runtime.
+    if (expression == NULL || !expression->IsValidLeftHandSide()) {
+      Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
+      expression = NewThrowReferenceError(type);
+    }
+    Token::Value next = Next();
+    expression = NEW(CountOperation(false /* postfix */, next, expression));
+  }
+  return expression;
+}
+
+
+Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
+  // LeftHandSideExpression ::
+  //   (NewExpression | MemberExpression) ...
+
+  Expression* result;
+  if (peek() == Token::NEW) {
+    result = ParseNewExpression(CHECK_OK);
+  } else {
+    result = ParseMemberExpression(CHECK_OK);
+  }
+
+  while (true) {
+    switch (peek()) {
+      case Token::LBRACK: {
+        Consume(Token::LBRACK);
+        int pos = scanner().location().beg_pos;
+        Expression* index = ParseExpression(true, CHECK_OK);
+        result = factory()->NewProperty(result, index, pos);
+        Expect(Token::RBRACK, CHECK_OK);
+        break;
+      }
+
+      case Token::LPAREN: {
+        int pos = scanner().location().beg_pos;
+        ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
+
+        // Keep track of eval() calls since they disable all local variable
+        // optimizations.
+        // The calls that need special treatment are the
+        // direct (i.e. not aliased) eval calls. These calls are all of the
+        // form eval(...) with no explicit receiver object where eval is not
+        // declared in the current scope chain. These calls are marked as
+        // potentially direct eval calls. Whether they are actually direct calls
+        // to eval is determined at run time.
+        if (!is_pre_parsing_) {
+          VariableProxy* callee = result->AsVariableProxy();
+          if (callee != NULL && callee->IsVariable(Factory::eval_symbol())) {
+            Handle<String> name = callee->name();
+            Variable* var = top_scope_->Lookup(name);
+            if (var == NULL) {
+              top_scope_->RecordEvalCall();
+            }
+          }
+        }
+        result = factory()->NewCall(result, args, pos);
+        break;
+      }
+
+      case Token::PERIOD: {
+        Consume(Token::PERIOD);
+        int pos = scanner().location().beg_pos;
+        Handle<String> name = ParseIdentifier(CHECK_OK);
+        result = factory()->NewProperty(result, NEW(Literal(name)), pos);
+        break;
+      }
+
+      default:
+        return result;
+    }
+  }
+}
+
+
+
+Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
+  // NewExpression ::
+  //   ('new')+ MemberExpression
+
+  // The grammar for new expressions is pretty warped. The keyword
+  // 'new' can either be a part of the new expression (where it isn't
+  // followed by an argument list) or a part of the member expression,
+  // where it must be followed by an argument list. To accommodate
+  // this, we parse the 'new' keywords greedily and keep track of how
+  // many we have parsed. This information is then passed on to the
+  // member expression parser, which is only allowed to match argument
+  // lists as long as it has 'new' prefixes left
+  Expect(Token::NEW, CHECK_OK);
+  PositionStack::Element pos(stack, scanner().location().beg_pos);
+
+  Expression* result;
+  if (peek() == Token::NEW) {
+    result = ParseNewPrefix(stack, CHECK_OK);
+  } else {
+    result = ParseMemberWithNewPrefixesExpression(stack, CHECK_OK);
+  }
+
+  if (!stack->is_empty()) {
+    int last = stack->pop();
+    result = NEW(CallNew(result, new ZoneList<Expression*>(0), last));
+  }
+  return result;
+}
+
+
+Expression* Parser::ParseNewExpression(bool* ok) {
+  PositionStack stack(ok);
+  return ParseNewPrefix(&stack, ok);
+}
+
+
+Expression* Parser::ParseMemberExpression(bool* ok) {
+  return ParseMemberWithNewPrefixesExpression(NULL, ok);
+}
+
+
+Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
+                                                         bool* ok) {
+  // MemberExpression ::
+  //   (PrimaryExpression | FunctionLiteral)
+  //     ('[' Expression ']' | '.' Identifier | Arguments)*
+
+  // Parse the initial primary or function expression.
+  Expression* result = NULL;
+  if (peek() == Token::FUNCTION) {
+    Expect(Token::FUNCTION, CHECK_OK);
+    int function_token_position = scanner().location().beg_pos;
+    Handle<String> name;
+    if (peek() == Token::IDENTIFIER) name = ParseIdentifier(CHECK_OK);
+    result = ParseFunctionLiteral(name, function_token_position,
+                                  NESTED, CHECK_OK);
+  } else {
+    result = ParsePrimaryExpression(CHECK_OK);
+  }
+
+  while (true) {
+    switch (peek()) {
+      case Token::LBRACK: {
+        Consume(Token::LBRACK);
+        int pos = scanner().location().beg_pos;
+        Expression* index = ParseExpression(true, CHECK_OK);
+        result = factory()->NewProperty(result, index, pos);
+        Expect(Token::RBRACK, CHECK_OK);
+        break;
+      }
+      case Token::PERIOD: {
+        Consume(Token::PERIOD);
+        int pos = scanner().location().beg_pos;
+        Handle<String> name = ParseIdentifier(CHECK_OK);
+        result = factory()->NewProperty(result, NEW(Literal(name)), pos);
+        break;
+      }
+      case Token::LPAREN: {
+        if ((stack == NULL) || stack->is_empty()) return result;
+        // Consume one of the new prefixes (already parsed).
+        ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
+        int last = stack->pop();
+        result = NEW(CallNew(result, args, last));
+        break;
+      }
+      default:
+        return result;
+    }
+  }
+}
+
+
+DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
+  // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
+  // contexts this is used as a statement which invokes the debugger as i a
+  // break point is present.
+  // DebuggerStatement ::
+  //   'debugger' ';'
+
+  Expect(Token::DEBUGGER, CHECK_OK);
+  ExpectSemicolon(CHECK_OK);
+  return NEW(DebuggerStatement());
+}
+
+
+void Parser::ReportUnexpectedToken(Token::Value token) {
+  // We don't report stack overflows here, to avoid increasing the
+  // stack depth even further.  Instead we report it after parsing is
+  // over, in ParseProgram.
+  if (token == Token::ILLEGAL && scanner().stack_overflow())
+    return;
+  // Four of the tokens are treated specially
+  switch (token) {
+  case Token::EOS:
+    return ReportMessage("unexpected_eos", Vector<const char*>::empty());
+  case Token::NUMBER:
+    return ReportMessage("unexpected_token_number",
+                         Vector<const char*>::empty());
+  case Token::STRING:
+    return ReportMessage("unexpected_token_string",
+                         Vector<const char*>::empty());
+  case Token::IDENTIFIER:
+    return ReportMessage("unexpected_token_identifier",
+                         Vector<const char*>::empty());
+  default:
+    const char* name = Token::String(token);
+    ASSERT(name != NULL);
+    ReportMessage("unexpected_token", Vector<const char*>(&name, 1));
+  }
+}
+
+
+Expression* Parser::ParsePrimaryExpression(bool* ok) {
+  // PrimaryExpression ::
+  //   'this'
+  //   'null'
+  //   'true'
+  //   'false'
+  //   Identifier
+  //   Number
+  //   String
+  //   ArrayLiteral
+  //   ObjectLiteral
+  //   RegExpLiteral
+  //   '(' Expression ')'
+
+  Expression* result = NULL;
+  switch (peek()) {
+    case Token::THIS: {
+      Consume(Token::THIS);
+      if (is_pre_parsing_) {
+        result = VariableProxySentinel::this_proxy();
+      } else {
+        VariableProxy* recv = top_scope_->receiver();
+        recv->var_uses()->RecordRead(1);
+        result = recv;
+      }
+      break;
+    }
+
+    case Token::NULL_LITERAL:
+      Consume(Token::NULL_LITERAL);
+      result = NEW(Literal(Factory::null_value()));
+      break;
+
+    case Token::TRUE_LITERAL:
+      Consume(Token::TRUE_LITERAL);
+      result = NEW(Literal(Factory::true_value()));
+      break;
+
+    case Token::FALSE_LITERAL:
+      Consume(Token::FALSE_LITERAL);
+      result = NEW(Literal(Factory::false_value()));
+      break;
+
+    case Token::IDENTIFIER: {
+      Handle<String> name = ParseIdentifier(CHECK_OK);
+      if (is_pre_parsing_) {
+        result = VariableProxySentinel::identifier_proxy();
+      } else {
+        result = top_scope_->NewUnresolved(name, inside_with());
+      }
+      break;
+    }
+
+    case Token::NUMBER: {
+      Consume(Token::NUMBER);
+      double value =
+        StringToDouble(scanner_.literal_string(), ALLOW_HEX | ALLOW_OCTALS);
+      result = NewNumberLiteral(value);
+      break;
+    }
+
+    case Token::STRING: {
+      Consume(Token::STRING);
+      Handle<String> symbol =
+          factory()->LookupSymbol(scanner_.literal_string(),
+                                  scanner_.literal_length());
+      result = NEW(Literal(symbol));
+      break;
+    }
+
+    case Token::ASSIGN_DIV:
+      result = ParseRegExpLiteral(true, CHECK_OK);
+      break;
+
+    case Token::DIV:
+      result = ParseRegExpLiteral(false, CHECK_OK);
+      break;
+
+    case Token::LBRACK:
+      result = ParseArrayLiteral(CHECK_OK);
+      break;
+
+    case Token::LBRACE:
+      result = ParseObjectLiteral(CHECK_OK);
+      break;
+
+    case Token::LPAREN:
+      Consume(Token::LPAREN);
+      result = ParseExpression(true, CHECK_OK);
+      Expect(Token::RPAREN, CHECK_OK);
+      break;
+
+    case Token::MOD:
+      if (allow_natives_syntax_ || extension_ != NULL) {
+        result = ParseV8Intrinsic(CHECK_OK);
+        break;
+      }
+      // If we're not allowing special syntax we fall-through to the
+      // default case.
+
+    default: {
+      Token::Value tok = peek();
+      // Token::Peek returns the value of the next token but
+      // location() gives info about the current token.
+      // Therefore, we need to read ahead to the next token
+      Next();
+      ReportUnexpectedToken(tok);
+      *ok = false;
+      return NULL;
+    }
+  }
+
+  return result;
+}
+
+
+Expression* Parser::ParseArrayLiteral(bool* ok) {
+  // ArrayLiteral ::
+  //   '[' Expression? (',' Expression?)* ']'
+
+  ZoneListWrapper<Expression> values = factory()->NewList<Expression>(4);
+  Expect(Token::LBRACK, CHECK_OK);
+  while (peek() != Token::RBRACK) {
+    Expression* elem;
+    if (peek() == Token::COMMA) {
+      elem = GetLiteralTheHole();
+    } else {
+      elem = ParseAssignmentExpression(true, CHECK_OK);
+    }
+    values.Add(elem);
+    if (peek() != Token::RBRACK) {
+      Expect(Token::COMMA, CHECK_OK);
+    }
+  }
+  Expect(Token::RBRACK, CHECK_OK);
+
+  // Update the scope information before the pre-parsing bailout.
+  temp_scope_->set_contains_array_literal();
+  int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+
+  if (is_pre_parsing_) return NULL;
+
+  // Allocate a fixed array with all the literals.
+  Handle<FixedArray> literals =
+      Factory::NewFixedArray(values.length(), TENURED);
+
+  // Fill in the literals.
+  bool is_simple = true;
+  int depth = 1;
+  for (int i = 0; i < values.length(); i++) {
+    MaterializedLiteral* m_literal = values.at(i)->AsMaterializedLiteral();
+    if (m_literal != NULL && m_literal->depth() + 1 > depth) {
+      depth = m_literal->depth() + 1;
+    }
+    Handle<Object> boilerplate_value = GetBoilerplateValue(values.at(i));
+    if (boilerplate_value->IsUndefined()) {
+      literals->set_the_hole(i);
+      is_simple = false;
+    } else {
+      literals->set(i, *boilerplate_value);
+    }
+  }
+
+  return NEW(ArrayLiteral(literals, values.elements(),
+                          literal_index, is_simple, depth));
+}
+
+
+bool Parser::IsBoilerplateProperty(ObjectLiteral::Property* property) {
+  return property != NULL &&
+         property->kind() != ObjectLiteral::Property::PROTOTYPE;
+}
+
+
+bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
+  MaterializedLiteral* lit = expression->AsMaterializedLiteral();
+  return lit != NULL && lit->is_simple();
+}
+
+Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
+  ASSERT(IsCompileTimeValue(expression));
+  Handle<FixedArray> result = Factory::NewFixedArray(2, TENURED);
+  ObjectLiteral* object_literal = expression->AsObjectLiteral();
+  if (object_literal != NULL) {
+    ASSERT(object_literal->is_simple());
+    result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL));
+    result->set(kElementsSlot, *object_literal->constant_properties());
+  } else {
+    ArrayLiteral* array_literal = expression->AsArrayLiteral();
+    ASSERT(array_literal != NULL && array_literal->is_simple());
+    result->set(kTypeSlot, Smi::FromInt(ARRAY_LITERAL));
+    result->set(kElementsSlot, *array_literal->literals());
+  }
+  return result;
+}
+
+
+CompileTimeValue::Type CompileTimeValue::GetType(Handle<FixedArray> value) {
+  Smi* type_value = Smi::cast(value->get(kTypeSlot));
+  return static_cast<Type>(type_value->value());
+}
+
+
+Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
+  return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
+}
+
+
+Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
+  if (expression->AsLiteral() != NULL) {
+    return expression->AsLiteral()->handle();
+  }
+  if (CompileTimeValue::IsCompileTimeValue(expression)) {
+    return CompileTimeValue::GetValue(expression);
+  }
+  return Factory::undefined_value();
+}
+
+
+Expression* Parser::ParseObjectLiteral(bool* ok) {
+  // ObjectLiteral ::
+  //   '{' (
+  //       ((Identifier | String | Number) ':' AssignmentExpression)
+  //     | (('get' | 'set') FunctionLiteral)
+  //    )*[','] '}'
+
+  ZoneListWrapper<ObjectLiteral::Property> properties =
+      factory()->NewList<ObjectLiteral::Property>(4);
+  int number_of_boilerplate_properties = 0;
+
+  Expect(Token::LBRACE, CHECK_OK);
+  while (peek() != Token::RBRACE) {
+    Literal* key = NULL;
+    switch (peek()) {
+      case Token::IDENTIFIER: {
+        // Store identifier keys as literal symbols to avoid
+        // resolving them when compiling code for the object
+        // literal.
+        bool is_getter = false;
+        bool is_setter = false;
+        Handle<String> id =
+            ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+        if (is_getter || is_setter) {
+          // Special handling of getter and setter syntax.
+          if (peek() == Token::IDENTIFIER) {
+            Handle<String> name = ParseIdentifier(CHECK_OK);
+            FunctionLiteral* value =
+                ParseFunctionLiteral(name, RelocInfo::kNoPosition,
+                                     DECLARATION, CHECK_OK);
+            ObjectLiteral::Property* property =
+                NEW(ObjectLiteral::Property(is_getter, value));
+            if (IsBoilerplateProperty(property))
+              number_of_boilerplate_properties++;
+            properties.Add(property);
+            if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
+            continue;  // restart the while
+          }
+        }
+        key = NEW(Literal(id));
+        break;
+      }
+
+      case Token::STRING: {
+        Consume(Token::STRING);
+        Handle<String> string =
+            factory()->LookupSymbol(scanner_.literal_string(),
+                                    scanner_.literal_length());
+        uint32_t index;
+        if (!string.is_null() && string->AsArrayIndex(&index)) {
+          key = NewNumberLiteral(index);
+        } else {
+          key = NEW(Literal(string));
+        }
+        break;
+      }
+
+      case Token::NUMBER: {
+        Consume(Token::NUMBER);
+        double value =
+          StringToDouble(scanner_.literal_string(), ALLOW_HEX | ALLOW_OCTALS);
+        key = NewNumberLiteral(value);
+        break;
+      }
+
+      default:
+        Expect(Token::RBRACE, CHECK_OK);
+        break;
+    }
+
+    Expect(Token::COLON, CHECK_OK);
+    Expression* value = ParseAssignmentExpression(true, CHECK_OK);
+
+    ObjectLiteral::Property* property =
+        NEW(ObjectLiteral::Property(key, value));
+
+    // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
+    if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
+    properties.Add(property);
+
+    // TODO(1240767): Consider allowing trailing comma.
+    if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
+  }
+  Expect(Token::RBRACE, CHECK_OK);
+  // Computation of literal_index must happen before pre parse bailout.
+  int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+  if (is_pre_parsing_) return NULL;
+
+  Handle<FixedArray> constant_properties =
+      Factory::NewFixedArray(number_of_boilerplate_properties * 2, TENURED);
+  int position = 0;
+  bool is_simple = true;
+  int depth = 1;
+  for (int i = 0; i < properties.length(); i++) {
+    ObjectLiteral::Property* property = properties.at(i);
+    if (!IsBoilerplateProperty(property)) {
+      is_simple = false;
+      continue;
+    }
+    MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
+    if (m_literal != NULL && m_literal->depth() + 1 > depth) {
+      depth = m_literal->depth() + 1;
+    }
+
+    // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
+    // value for COMPUTED properties, the real value is filled in at
+    // runtime. The enumeration order is maintained.
+    Handle<Object> key = property->key()->handle();
+    Handle<Object> value = GetBoilerplateValue(property->value());
+    is_simple = is_simple && !value->IsUndefined();
+
+    // Add name, value pair to the fixed array.
+    constant_properties->set(position++, *key);
+    constant_properties->set(position++, *value);
+  }
+
+  return new ObjectLiteral(constant_properties,
+                           properties.elements(),
+                           literal_index,
+                           is_simple,
+                           depth);
+}
+
+
+Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
+  if (!scanner_.ScanRegExpPattern(seen_equal)) {
+    Next();
+    ReportMessage("unterminated_regexp", Vector<const char*>::empty());
+    *ok = false;
+    return NULL;
+  }
+
+  int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+
+  if (is_pre_parsing_) {
+    // If we're preparsing we just do all the parsing stuff without
+    // building anything.
+    if (!scanner_.ScanRegExpFlags()) {
+      Next();
+      ReportMessage("invalid_regexp_flags", Vector<const char*>::empty());
+      *ok = false;
+      return NULL;
+    }
+    Next();
+    return NULL;
+  }
+
+  Handle<String> js_pattern =
+      Factory::NewStringFromUtf8(scanner_.next_literal(), TENURED);
+  scanner_.ScanRegExpFlags();
+  Handle<String> js_flags =
+      Factory::NewStringFromUtf8(scanner_.next_literal(), TENURED);
+  Next();
+
+  return new RegExpLiteral(js_pattern, js_flags, literal_index);
+}
+
+
+ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
+  // Arguments ::
+  //   '(' (AssignmentExpression)*[','] ')'
+
+  ZoneListWrapper<Expression> result = factory()->NewList<Expression>(4);
+  Expect(Token::LPAREN, CHECK_OK);
+  bool done = (peek() == Token::RPAREN);
+  while (!done) {
+    Expression* argument = ParseAssignmentExpression(true, CHECK_OK);
+    result.Add(argument);
+    done = (peek() == Token::RPAREN);
+    if (!done) Expect(Token::COMMA, CHECK_OK);
+  }
+  Expect(Token::RPAREN, CHECK_OK);
+  return result.elements();
+}
+
+
+FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
+                                              int function_token_position,
+                                              FunctionLiteralType type,
+                                              bool* ok) {
+  // Function ::
+  //   '(' FormalParameterList? ')' '{' FunctionBody '}'
+
+  bool is_named = !var_name.is_null();
+
+  // The name associated with this function. If it's a function expression,
+  // this is the actual function name, otherwise this is the name of the
+  // variable declared and initialized with the function (expression). In
+  // that case, we don't have a function name (it's empty).
+  Handle<String> name = is_named ? var_name : factory()->EmptySymbol();
+  // The function name, if any.
+  Handle<String> function_name = factory()->EmptySymbol();
+  if (is_named && (type == EXPRESSION || type == NESTED)) {
+    function_name = name;
+  }
+
+  int num_parameters = 0;
+  // Parse function body.
+  { Scope::Type type = Scope::FUNCTION_SCOPE;
+    Scope* scope = factory()->NewScope(top_scope_, type, inside_with());
+    LexicalScope lexical_scope(this, scope);
+    TemporaryScope temp_scope(this);
+    top_scope_->SetScopeName(name);
+
+    //  FormalParameterList ::
+    //    '(' (Identifier)*[','] ')'
+    Expect(Token::LPAREN, CHECK_OK);
+    int start_pos = scanner_.location().beg_pos;
+    bool done = (peek() == Token::RPAREN);
+    while (!done) {
+      Handle<String> param_name = ParseIdentifier(CHECK_OK);
+      if (!is_pre_parsing_) {
+        top_scope_->AddParameter(top_scope_->DeclareLocal(param_name,
+                                                          Variable::VAR));
+        num_parameters++;
+      }
+      done = (peek() == Token::RPAREN);
+      if (!done) Expect(Token::COMMA, CHECK_OK);
+    }
+    Expect(Token::RPAREN, CHECK_OK);
+
+    Expect(Token::LBRACE, CHECK_OK);
+    ZoneListWrapper<Statement> body = factory()->NewList<Statement>(8);
+
+    // If we have a named function expression, we add a local variable
+    // declaration to the body of the function with the name of the
+    // function and let it refer to the function itself (closure).
+    // NOTE: We create a proxy and resolve it here so that in the
+    // future we can change the AST to only refer to VariableProxies
+    // instead of Variables and Proxis as is the case now.
+    if (!function_name.is_null() && function_name->length() > 0) {
+      Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
+      VariableProxy* fproxy =
+          top_scope_->NewUnresolved(function_name, inside_with());
+      fproxy->BindTo(fvar);
+      body.Add(new ExpressionStatement(
+                   new Assignment(Token::INIT_VAR, fproxy,
+                                  NEW(ThisFunction()),
+                                  RelocInfo::kNoPosition)));
+    }
+
+    // Determine if the function will be lazily compiled. The mode can
+    // only be PARSE_LAZILY if the --lazy flag is true.
+    bool is_lazily_compiled =
+        mode() == PARSE_LAZILY && top_scope_->HasTrivialOuterContext();
+
+    int materialized_literal_count;
+    int expected_property_count;
+    bool contains_array_literal;
+    bool only_this_property_assignments;
+    bool only_simple_this_property_assignments;
+    Handle<FixedArray> this_property_assignments;
+    if (is_lazily_compiled && pre_data() != NULL) {
+      FunctionEntry entry = pre_data()->GetFunctionEnd(start_pos);
+      int end_pos = entry.end_pos();
+      Counters::total_preparse_skipped.Increment(end_pos - start_pos);
+      scanner_.SeekForward(end_pos);
+      materialized_literal_count = entry.literal_count();
+      expected_property_count = entry.property_count();
+      only_this_property_assignments = false;
+      only_simple_this_property_assignments = false;
+      this_property_assignments = Factory::empty_fixed_array();
+      contains_array_literal = entry.contains_array_literal();
+    } else {
+      ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
+      materialized_literal_count = temp_scope.materialized_literal_count();
+      expected_property_count = temp_scope.expected_property_count();
+      contains_array_literal = temp_scope.contains_array_literal();
+      only_this_property_assignments =
+          temp_scope.only_this_property_assignments();
+      only_simple_this_property_assignments =
+          temp_scope.only_simple_this_property_assignments();
+      this_property_assignments = temp_scope.this_property_assignments();
+    }
+
+    Expect(Token::RBRACE, CHECK_OK);
+    int end_pos = scanner_.location().end_pos;
+
+    FunctionEntry entry = log()->LogFunction(start_pos);
+    if (entry.is_valid()) {
+      entry.set_end_pos(end_pos);
+      entry.set_literal_count(materialized_literal_count);
+      entry.set_property_count(expected_property_count);
+      entry.set_contains_array_literal(contains_array_literal);
+    }
+
+    FunctionLiteral* function_literal =
+        NEW(FunctionLiteral(name,
+                            top_scope_,
+                            body.elements(),
+                            materialized_literal_count,
+                            contains_array_literal,
+                            expected_property_count,
+                            only_this_property_assignments,
+                            only_simple_this_property_assignments,
+                            this_property_assignments,
+                            num_parameters,
+                            start_pos,
+                            end_pos,
+                            function_name->length() > 0));
+    if (!is_pre_parsing_) {
+      function_literal->set_function_token_position(function_token_position);
+    }
+    return function_literal;
+  }
+}
+
+
+Expression* Parser::ParseV8Intrinsic(bool* ok) {
+  // CallRuntime ::
+  //   '%' Identifier Arguments
+
+  Expect(Token::MOD, CHECK_OK);
+  Handle<String> name = ParseIdentifier(CHECK_OK);
+  Runtime::Function* function =
+      Runtime::FunctionForName(scanner_.literal_string());
+  ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
+  if (function == NULL && extension_ != NULL) {
+    // The extension structures are only accessible while parsing the
+    // very first time not when reparsing because of lazy compilation.
+    top_scope_->ForceEagerCompilation();
+  }
+
+  // Check for built-in macros.
+  if (!is_pre_parsing_) {
+    if (function == Runtime::FunctionForId(Runtime::kIS_VAR)) {
+      // %IS_VAR(x)
+      //   evaluates to x if x is a variable,
+      //   leads to a parse error otherwise
+      if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
+        return args->at(0);
+      }
+      *ok = false;
+    // Check here for other macros.
+    // } else if (function == Runtime::FunctionForId(Runtime::kIS_VAR)) {
+    //   ...
+    }
+
+    if (!*ok) {
+      // We found a macro but it failed.
+      ReportMessage("unable_to_parse", Vector<const char*>::empty());
+      return NULL;
+    }
+  }
+
+  // Otherwise we have a runtime call.
+  return NEW(CallRuntime(name, function, args));
+}
+
+
+void Parser::Consume(Token::Value token) {
+  Token::Value next = Next();
+  USE(next);
+  USE(token);
+  ASSERT(next == token);
+}
+
+
+void Parser::Expect(Token::Value token, bool* ok) {
+  Token::Value next = Next();
+  if (next == token) return;
+  ReportUnexpectedToken(next);
+  *ok = false;
+}
+
+
+void Parser::ExpectSemicolon(bool* ok) {
+  // Check for automatic semicolon insertion according to
+  // the rules given in ECMA-262, section 7.9, page 21.
+  Token::Value tok = peek();
+  if (tok == Token::SEMICOLON) {
+    Next();
+    return;
+  }
+  if (scanner_.has_line_terminator_before_next() ||
+      tok == Token::RBRACE ||
+      tok == Token::EOS) {
+    return;
+  }
+  Expect(Token::SEMICOLON, ok);
+}
+
+
+Literal* Parser::GetLiteralUndefined() {
+  return NEW(Literal(Factory::undefined_value()));
+}
+
+
+Literal* Parser::GetLiteralTheHole() {
+  return NEW(Literal(Factory::the_hole_value()));
+}
+
+
+Literal* Parser::GetLiteralNumber(double value) {
+  return NewNumberLiteral(value);
+}
+
+
+Handle<String> Parser::ParseIdentifier(bool* ok) {
+  Expect(Token::IDENTIFIER, ok);
+  if (!*ok) return Handle<String>();
+  return factory()->LookupSymbol(scanner_.literal_string(),
+                                 scanner_.literal_length());
+}
+
+// This function reads an identifier and determines whether or not it
+// is 'get' or 'set'.  The reason for not using ParseIdentifier and
+// checking on the output is that this involves heap allocation which
+// we can't do during preparsing.
+Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
+                                                 bool* is_set,
+                                                 bool* ok) {
+  Expect(Token::IDENTIFIER, ok);
+  if (!*ok) return Handle<String>();
+  if (scanner_.literal_length() == 3) {
+    const char* token = scanner_.literal_string();
+    *is_get = strcmp(token, "get") == 0;
+    *is_set = !*is_get && strcmp(token, "set") == 0;
+  }
+  return factory()->LookupSymbol(scanner_.literal_string(),
+                                 scanner_.literal_length());
+}
+
+
+// ----------------------------------------------------------------------------
+// Parser support
+
+
+bool Parser::TargetStackContainsLabel(Handle<String> label) {
+  for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+    BreakableStatement* stat = t->node()->AsBreakableStatement();
+    if (stat != NULL && ContainsLabel(stat->labels(), label))
+      return true;
+  }
+  return false;
+}
+
+
+BreakableStatement* Parser::LookupBreakTarget(Handle<String> label, bool* ok) {
+  bool anonymous = label.is_null();
+  for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+    BreakableStatement* stat = t->node()->AsBreakableStatement();
+    if (stat == NULL) continue;
+    if ((anonymous && stat->is_target_for_anonymous()) ||
+        (!anonymous && ContainsLabel(stat->labels(), label))) {
+      RegisterTargetUse(stat->break_target(), t->previous());
+      return stat;
+    }
+  }
+  return NULL;
+}
+
+
+IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
+                                                 bool* ok) {
+  bool anonymous = label.is_null();
+  for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+    IterationStatement* stat = t->node()->AsIterationStatement();
+    if (stat == NULL) continue;
+
+    ASSERT(stat->is_target_for_anonymous());
+    if (anonymous || ContainsLabel(stat->labels(), label)) {
+      RegisterTargetUse(stat->continue_target(), t->previous());
+      return stat;
+    }
+  }
+  return NULL;
+}
+
+
+void Parser::RegisterTargetUse(BreakTarget* target, Target* stop) {
+  // Register that a break target found at the given stop in the
+  // target stack has been used from the top of the target stack. Add
+  // the break target to any TargetCollectors passed on the stack.
+  for (Target* t = target_stack_; t != stop; t = t->previous()) {
+    TargetCollector* collector = t->node()->AsTargetCollector();
+    if (collector != NULL) collector->AddTarget(target);
+  }
+}
+
+
+Literal* Parser::NewNumberLiteral(double number) {
+  return NEW(Literal(Factory::NewNumber(number, TENURED)));
+}
+
+
+Expression* Parser::NewThrowReferenceError(Handle<String> type) {
+  return NewThrowError(Factory::MakeReferenceError_symbol(),
+                       type, HandleVector<Object>(NULL, 0));
+}
+
+
+Expression* Parser::NewThrowSyntaxError(Handle<String> type,
+                                        Handle<Object> first) {
+  int argc = first.is_null() ? 0 : 1;
+  Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc);
+  return NewThrowError(Factory::MakeSyntaxError_symbol(), type, arguments);
+}
+
+
+Expression* Parser::NewThrowTypeError(Handle<String> type,
+                                      Handle<Object> first,
+                                      Handle<Object> second) {
+  ASSERT(!first.is_null() && !second.is_null());
+  Handle<Object> elements[] = { first, second };
+  Vector< Handle<Object> > arguments =
+      HandleVector<Object>(elements, ARRAY_SIZE(elements));
+  return NewThrowError(Factory::MakeTypeError_symbol(), type, arguments);
+}
+
+
+Expression* Parser::NewThrowError(Handle<String> constructor,
+                                  Handle<String> type,
+                                  Vector< Handle<Object> > arguments) {
+  if (is_pre_parsing_) return NULL;
+
+  int argc = arguments.length();
+  Handle<JSArray> array = Factory::NewJSArray(argc, TENURED);
+  ASSERT(array->IsJSArray() && array->HasFastElements());
+  for (int i = 0; i < argc; i++) {
+    Handle<Object> element = arguments[i];
+    if (!element.is_null()) {
+      array->SetFastElement(i, *element);
+    }
+  }
+  ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
+  args->Add(new Literal(type));
+  args->Add(new Literal(array));
+  return new Throw(new CallRuntime(constructor, NULL, args),
+                   scanner().location().beg_pos);
+}
+
+
+// ----------------------------------------------------------------------------
+// Regular expressions
+
+
+RegExpParser::RegExpParser(FlatStringReader* in,
+                           Handle<String>* error,
+                           bool multiline)
+  : current_(kEndMarker),
+    has_more_(true),
+    multiline_(multiline),
+    next_pos_(0),
+    in_(in),
+    error_(error),
+    simple_(false),
+    contains_anchor_(false),
+    captures_(NULL),
+    is_scanned_for_captures_(false),
+    capture_count_(0),
+    failed_(false) {
+  Advance(1);
+}
+
+
+uc32 RegExpParser::Next() {
+  if (has_next()) {
+    return in()->Get(next_pos_);
+  } else {
+    return kEndMarker;
+  }
+}
+
+
+void RegExpParser::Advance() {
+  if (next_pos_ < in()->length()) {
+    StackLimitCheck check;
+    if (check.HasOverflowed()) {
+      ReportError(CStrVector(Top::kStackOverflowMessage));
+    } else if (Zone::excess_allocation()) {
+      ReportError(CStrVector("Regular expression too large"));
+    } else {
+      current_ = in()->Get(next_pos_);
+      next_pos_++;
+    }
+  } else {
+    current_ = kEndMarker;
+    has_more_ = false;
+  }
+}
+
+
+void RegExpParser::Reset(int pos) {
+  next_pos_ = pos;
+  Advance();
+}
+
+
+void RegExpParser::Advance(int dist) {
+  for (int i = 0; i < dist; i++)
+    Advance();
+}
+
+
+bool RegExpParser::simple() {
+  return simple_;
+}
+
+RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
+  failed_ = true;
+  *error_ = Factory::NewStringFromAscii(message, NOT_TENURED);
+  // Zip to the end to make sure the no more input is read.
+  current_ = kEndMarker;
+  next_pos_ = in()->length();
+  return NULL;
+}
+
+
+// Pattern ::
+//   Disjunction
+RegExpTree* RegExpParser::ParsePattern() {
+  RegExpTree* result = ParseDisjunction(CHECK_FAILED);
+  ASSERT(!has_more());
+  // If the result of parsing is a literal string atom, and it has the
+  // same length as the input, then the atom is identical to the input.
+  if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
+    simple_ = true;
+  }
+  return result;
+}
+
+
+// Disjunction ::
+//   Alternative
+//   Alternative | Disjunction
+// Alternative ::
+//   [empty]
+//   Term Alternative
+// Term ::
+//   Assertion
+//   Atom
+//   Atom Quantifier
+RegExpTree* RegExpParser::ParseDisjunction() {
+  // Used to store current state while parsing subexpressions.
+  RegExpParserState initial_state(NULL, INITIAL, 0);
+  RegExpParserState* stored_state = &initial_state;
+  // Cache the builder in a local variable for quick access.
+  RegExpBuilder* builder = initial_state.builder();
+  while (true) {
+    switch (current()) {
+    case kEndMarker:
+      if (stored_state->IsSubexpression()) {
+        // Inside a parenthesized group when hitting end of input.
+        ReportError(CStrVector("Unterminated group") CHECK_FAILED);
+      }
+      ASSERT_EQ(INITIAL, stored_state->group_type());
+      // Parsing completed successfully.
+      return builder->ToRegExp();
+    case ')': {
+      if (!stored_state->IsSubexpression()) {
+        ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
+      }
+      ASSERT_NE(INITIAL, stored_state->group_type());
+
+      Advance();
+      // End disjunction parsing and convert builder content to new single
+      // regexp atom.
+      RegExpTree* body = builder->ToRegExp();
+
+      int end_capture_index = captures_started();
+
+      int capture_index = stored_state->capture_index();
+      SubexpressionType type = stored_state->group_type();
+
+      // Restore previous state.
+      stored_state = stored_state->previous_state();
+      builder = stored_state->builder();
+
+      // Build result of subexpression.
+      if (type == CAPTURE) {
+        RegExpCapture* capture = new RegExpCapture(body, capture_index);
+        captures_->at(capture_index - 1) = capture;
+        body = capture;
+      } else if (type != GROUPING) {
+        ASSERT(type == POSITIVE_LOOKAHEAD || type == NEGATIVE_LOOKAHEAD);
+        bool is_positive = (type == POSITIVE_LOOKAHEAD);
+        body = new RegExpLookahead(body,
+                                   is_positive,
+                                   end_capture_index - capture_index,
+                                   capture_index);
+      }
+      builder->AddAtom(body);
+      break;
+    }
+    case '|': {
+      Advance();
+      builder->NewAlternative();
+      continue;
+    }
+    case '*':
+    case '+':
+    case '?':
+      return ReportError(CStrVector("Nothing to repeat"));
+    case '^': {
+      Advance();
+      if (multiline_) {
+        builder->AddAssertion(
+            new RegExpAssertion(RegExpAssertion::START_OF_LINE));
+      } else {
+        builder->AddAssertion(
+            new RegExpAssertion(RegExpAssertion::START_OF_INPUT));
+        set_contains_anchor();
+      }
+      continue;
+    }
+    case '$': {
+      Advance();
+      RegExpAssertion::Type type =
+          multiline_ ? RegExpAssertion::END_OF_LINE :
+                       RegExpAssertion::END_OF_INPUT;
+      builder->AddAssertion(new RegExpAssertion(type));
+      continue;
+    }
+    case '.': {
+      Advance();
+      // everything except \x0a, \x0d, \u2028 and \u2029
+      ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+      CharacterRange::AddClassEscape('.', ranges);
+      RegExpTree* atom = new RegExpCharacterClass(ranges, false);
+      builder->AddAtom(atom);
+      break;
+    }
+    case '(': {
+      SubexpressionType type = CAPTURE;
+      Advance();
+      if (current() == '?') {
+        switch (Next()) {
+          case ':':
+            type = GROUPING;
+            break;
+          case '=':
+            type = POSITIVE_LOOKAHEAD;
+            break;
+          case '!':
+            type = NEGATIVE_LOOKAHEAD;
+            break;
+          default:
+            ReportError(CStrVector("Invalid group") CHECK_FAILED);
+            break;
+        }
+        Advance(2);
+      } else {
+        if (captures_ == NULL) {
+          captures_ = new ZoneList<RegExpCapture*>(2);
+        }
+        if (captures_started() >= kMaxCaptures) {
+          ReportError(CStrVector("Too many captures") CHECK_FAILED);
+        }
+        captures_->Add(NULL);
+      }
+      // Store current state and begin new disjunction parsing.
+      stored_state = new RegExpParserState(stored_state,
+                                           type,
+                                           captures_started());
+      builder = stored_state->builder();
+      break;
+    }
+    case '[': {
+      RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
+      builder->AddAtom(atom);
+      break;
+    }
+    // Atom ::
+    //   \ AtomEscape
+    case '\\':
+      switch (Next()) {
+      case kEndMarker:
+        return ReportError(CStrVector("\\ at end of pattern"));
+      case 'b':
+        Advance(2);
+        builder->AddAssertion(
+            new RegExpAssertion(RegExpAssertion::BOUNDARY));
+        continue;
+      case 'B':
+        Advance(2);
+        builder->AddAssertion(
+            new RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
+        continue;
+        // AtomEscape ::
+        //   CharacterClassEscape
+        //
+        // CharacterClassEscape :: one of
+        //   d D s S w W
+      case 'd': case 'D': case 's': case 'S': case 'w': case 'W': {
+        uc32 c = Next();
+        Advance(2);
+        ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+        CharacterRange::AddClassEscape(c, ranges);
+        RegExpTree* atom = new RegExpCharacterClass(ranges, false);
+        builder->AddAtom(atom);
+        break;
+      }
+      case '1': case '2': case '3': case '4': case '5': case '6':
+      case '7': case '8': case '9': {
+        int index = 0;
+        if (ParseBackReferenceIndex(&index)) {
+          RegExpCapture* capture = NULL;
+          if (captures_ != NULL && index <= captures_->length()) {
+            capture = captures_->at(index - 1);
+          }
+          if (capture == NULL) {
+            builder->AddEmpty();
+            break;
+          }
+          RegExpTree* atom = new RegExpBackReference(capture);
+          builder->AddAtom(atom);
+          break;
+        }
+        uc32 first_digit = Next();
+        if (first_digit == '8' || first_digit == '9') {
+          // Treat as identity escape
+          builder->AddCharacter(first_digit);
+          Advance(2);
+          break;
+        }
+      }
+      // FALLTHROUGH
+      case '0': {
+        Advance();
+        uc32 octal = ParseOctalLiteral();
+        builder->AddCharacter(octal);
+        break;
+      }
+      // ControlEscape :: one of
+      //   f n r t v
+      case 'f':
+        Advance(2);
+        builder->AddCharacter('\f');
+        break;
+      case 'n':
+        Advance(2);
+        builder->AddCharacter('\n');
+        break;
+      case 'r':
+        Advance(2);
+        builder->AddCharacter('\r');
+        break;
+      case 't':
+        Advance(2);
+        builder->AddCharacter('\t');
+        break;
+      case 'v':
+        Advance(2);
+        builder->AddCharacter('\v');
+        break;
+      case 'c': {
+        Advance(2);
+        uc32 control = ParseControlLetterEscape();
+        builder->AddCharacter(control);
+        break;
+      }
+      case 'x': {
+        Advance(2);
+        uc32 value;
+        if (ParseHexEscape(2, &value)) {
+          builder->AddCharacter(value);
+        } else {
+          builder->AddCharacter('x');
+        }
+        break;
+      }
+      case 'u': {
+        Advance(2);
+        uc32 value;
+        if (ParseHexEscape(4, &value)) {
+          builder->AddCharacter(value);
+        } else {
+          builder->AddCharacter('u');
+        }
+        break;
+      }
+      default:
+        // Identity escape.
+        builder->AddCharacter(Next());
+        Advance(2);
+        break;
+      }
+      break;
+    case '{': {
+      int dummy;
+      if (ParseIntervalQuantifier(&dummy, &dummy)) {
+        ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
+      }
+      // fallthrough
+    }
+    default:
+      builder->AddCharacter(current());
+      Advance();
+      break;
+    }  // end switch(current())
+
+    int min;
+    int max;
+    switch (current()) {
+    // QuantifierPrefix ::
+    //   *
+    //   +
+    //   ?
+    //   {
+    case '*':
+      min = 0;
+      max = RegExpTree::kInfinity;
+      Advance();
+      break;
+    case '+':
+      min = 1;
+      max = RegExpTree::kInfinity;
+      Advance();
+      break;
+    case '?':
+      min = 0;
+      max = 1;
+      Advance();
+      break;
+    case '{':
+      if (ParseIntervalQuantifier(&min, &max)) {
+        if (max < min) {
+          ReportError(CStrVector("numbers out of order in {} quantifier.")
+                      CHECK_FAILED);
+        }
+        break;
+      } else {
+        continue;
+      }
+    default:
+      continue;
+    }
+    bool is_greedy = true;
+    if (current() == '?') {
+      is_greedy = false;
+      Advance();
+    }
+    builder->AddQuantifierToAtom(min, max, is_greedy);
+  }
+}
+
+class SourceCharacter {
+ public:
+  static bool Is(uc32 c) {
+    switch (c) {
+      // case ']': case '}':
+      // In spidermonkey and jsc these are treated as source characters
+      // so we do too.
+      case '^': case '$': case '\\': case '.': case '*': case '+':
+      case '?': case '(': case ')': case '[': case '{': case '|':
+      case RegExpParser::kEndMarker:
+        return false;
+      default:
+        return true;
+    }
+  }
+};
+
+
+static unibrow::Predicate<SourceCharacter> source_character;
+
+
+static inline bool IsSourceCharacter(uc32 c) {
+  return source_character.get(c);
+}
+
+#ifdef DEBUG
+// Currently only used in an ASSERT.
+static bool IsSpecialClassEscape(uc32 c) {
+  switch (c) {
+    case 'd': case 'D':
+    case 's': case 'S':
+    case 'w': case 'W':
+      return true;
+    default:
+      return false;
+  }
+}
+#endif
+
+
+// In order to know whether an escape is a backreference or not we have to scan
+// the entire regexp and find the number of capturing parentheses.  However we
+// don't want to scan the regexp twice unless it is necessary.  This mini-parser
+// is called when needed.  It can see the difference between capturing and
+// noncapturing parentheses and can skip character classes and backslash-escaped
+// characters.
+void RegExpParser::ScanForCaptures() {
+  // Start with captures started previous to current position
+  int capture_count = captures_started();
+  // Add count of captures after this position.
+  int n;
+  while ((n = current()) != kEndMarker) {
+    Advance();
+    switch (n) {
+      case '\\':
+        Advance();
+        break;
+      case '[': {
+        int c;
+        while ((c = current()) != kEndMarker) {
+          Advance();
+          if (c == '\\') {
+            Advance();
+          } else {
+            if (c == ']') break;
+          }
+        }
+        break;
+      }
+      case '(':
+        if (current() != '?') capture_count++;
+        break;
+    }
+  }
+  capture_count_ = capture_count;
+  is_scanned_for_captures_ = true;
+}
+
+
+bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
+  ASSERT_EQ('\\', current());
+  ASSERT('1' <= Next() && Next() <= '9');
+  // Try to parse a decimal literal that is no greater than the total number
+  // of left capturing parentheses in the input.
+  int start = position();
+  int value = Next() - '0';
+  Advance(2);
+  while (true) {
+    uc32 c = current();
+    if (IsDecimalDigit(c)) {
+      value = 10 * value + (c - '0');
+      if (value > kMaxCaptures) {
+        Reset(start);
+        return false;
+      }
+      Advance();
+    } else {
+      break;
+    }
+  }
+  if (value > captures_started()) {
+    if (!is_scanned_for_captures_) {
+      int saved_position = position();
+      ScanForCaptures();
+      Reset(saved_position);
+    }
+    if (value > capture_count_) {
+      Reset(start);
+      return false;
+    }
+  }
+  *index_out = value;
+  return true;
+}
+
+
+// QuantifierPrefix ::
+//   { DecimalDigits }
+//   { DecimalDigits , }
+//   { DecimalDigits , DecimalDigits }
+//
+// Returns true if parsing succeeds, and set the min_out and max_out
+// values. Values are truncated to RegExpTree::kInfinity if they overflow.
+bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
+  ASSERT_EQ(current(), '{');
+  int start = position();
+  Advance();
+  int min = 0;
+  if (!IsDecimalDigit(current())) {
+    Reset(start);
+    return false;
+  }
+  while (IsDecimalDigit(current())) {
+    int next = current() - '0';
+    if (min > (RegExpTree::kInfinity - next) / 10) {
+      // Overflow. Skip past remaining decimal digits and return -1.
+      do {
+        Advance();
+      } while (IsDecimalDigit(current()));
+      min = RegExpTree::kInfinity;
+      break;
+    }
+    min = 10 * min + next;
+    Advance();
+  }
+  int max = 0;
+  if (current() == '}') {
+    max = min;
+    Advance();
+  } else if (current() == ',') {
+    Advance();
+    if (current() == '}') {
+      max = RegExpTree::kInfinity;
+      Advance();
+    } else {
+      while (IsDecimalDigit(current())) {
+        int next = current() - '0';
+        if (max > (RegExpTree::kInfinity - next) / 10) {
+          do {
+            Advance();
+          } while (IsDecimalDigit(current()));
+          max = RegExpTree::kInfinity;
+          break;
+        }
+        max = 10 * max + next;
+        Advance();
+      }
+      if (current() != '}') {
+        Reset(start);
+        return false;
+      }
+      Advance();
+    }
+  } else {
+    Reset(start);
+    return false;
+  }
+  *min_out = min;
+  *max_out = max;
+  return true;
+}
+
+
+// Upper and lower case letters differ by one bit.
+STATIC_CHECK(('a' ^ 'A') == 0x20);
+
+uc32 RegExpParser::ParseControlLetterEscape() {
+  if (!has_more())
+    return 'c';
+  uc32 letter = current() & ~(0x20);  // Collapse upper and lower case letters.
+  if (letter < 'A' || 'Z' < letter) {
+    // Non-spec error-correction: "\c" followed by non-control letter is
+    // interpreted as an IdentityEscape of 'c'.
+    return 'c';
+  }
+  Advance();
+  return letter & 0x1f;  // Remainder modulo 32, per specification.
+}
+
+
+uc32 RegExpParser::ParseOctalLiteral() {
+  ASSERT('0' <= current() && current() <= '7');
+  // For compatibility with some other browsers (not all), we parse
+  // up to three octal digits with a value below 256.
+  uc32 value = current() - '0';
+  Advance();
+  if ('0' <= current() && current() <= '7') {
+    value = value * 8 + current() - '0';
+    Advance();
+    if (value < 32 && '0' <= current() && current() <= '7') {
+      value = value * 8 + current() - '0';
+      Advance();
+    }
+  }
+  return value;
+}
+
+
+bool RegExpParser::ParseHexEscape(int length, uc32 *value) {
+  int start = position();
+  uc32 val = 0;
+  bool done = false;
+  for (int i = 0; !done; i++) {
+    uc32 c = current();
+    int d = HexValue(c);
+    if (d < 0) {
+      Reset(start);
+      return false;
+    }
+    val = val * 16 + d;
+    Advance();
+    if (i == length - 1) {
+      done = true;
+    }
+  }
+  *value = val;
+  return true;
+}
+
+
+uc32 RegExpParser::ParseClassCharacterEscape() {
+  ASSERT(current() == '\\');
+  ASSERT(has_next() && !IsSpecialClassEscape(Next()));
+  Advance();
+  switch (current()) {
+    case 'b':
+      Advance();
+      return '\b';
+    // ControlEscape :: one of
+    //   f n r t v
+    case 'f':
+      Advance();
+      return '\f';
+    case 'n':
+      Advance();
+      return '\n';
+    case 'r':
+      Advance();
+      return '\r';
+    case 't':
+      Advance();
+      return '\t';
+    case 'v':
+      Advance();
+      return '\v';
+    case 'c':
+      Advance();
+      return ParseControlLetterEscape();
+    case '0': case '1': case '2': case '3': case '4': case '5':
+    case '6': case '7':
+      // For compatibility, we interpret a decimal escape that isn't
+      // a back reference (and therefore either \0 or not valid according
+      // to the specification) as a 1..3 digit octal character code.
+      return ParseOctalLiteral();
+    case 'x': {
+      Advance();
+      uc32 value;
+      if (ParseHexEscape(2, &value)) {
+        return value;
+      }
+      // If \x is not followed by a two-digit hexadecimal, treat it
+      // as an identity escape.
+      return 'x';
+    }
+    case 'u': {
+      Advance();
+      uc32 value;
+      if (ParseHexEscape(4, &value)) {
+        return value;
+      }
+      // If \u is not followed by a four-digit hexadecimal, treat it
+      // as an identity escape.
+      return 'u';
+    }
+    default: {
+      // Extended identity escape. We accept any character that hasn't
+      // been matched by a more specific case, not just the subset required
+      // by the ECMAScript specification.
+      uc32 result = current();
+      Advance();
+      return result;
+    }
+  }
+  return 0;
+}
+
+
+CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
+  ASSERT_EQ(0, *char_class);
+  uc32 first = current();
+  if (first == '\\') {
+    switch (Next()) {
+      case 'w': case 'W': case 'd': case 'D': case 's': case 'S': {
+        *char_class = Next();
+        Advance(2);
+        return CharacterRange::Singleton(0);  // Return dummy value.
+      }
+      case kEndMarker:
+        return ReportError(CStrVector("\\ at end of pattern"));
+      default:
+        uc32 c = ParseClassCharacterEscape(CHECK_FAILED);
+        return CharacterRange::Singleton(c);
+    }
+  } else {
+    Advance();
+    return CharacterRange::Singleton(first);
+  }
+}
+
+
+RegExpTree* RegExpParser::ParseCharacterClass() {
+  static const char* kUnterminated = "Unterminated character class";
+  static const char* kRangeOutOfOrder = "Range out of order in character class";
+
+  ASSERT_EQ(current(), '[');
+  Advance();
+  bool is_negated = false;
+  if (current() == '^') {
+    is_negated = true;
+    Advance();
+  }
+  ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+  while (has_more() && current() != ']') {
+    uc16 char_class = 0;
+    CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
+    if (char_class) {
+      CharacterRange::AddClassEscape(char_class, ranges);
+      continue;
+    }
+    if (current() == '-') {
+      Advance();
+      if (current() == kEndMarker) {
+        // If we reach the end we break out of the loop and let the
+        // following code report an error.
+        break;
+      } else if (current() == ']') {
+        ranges->Add(first);
+        ranges->Add(CharacterRange::Singleton('-'));
+        break;
+      }
+      CharacterRange next = ParseClassAtom(&char_class CHECK_FAILED);
+      if (char_class) {
+        ranges->Add(first);
+        ranges->Add(CharacterRange::Singleton('-'));
+        CharacterRange::AddClassEscape(char_class, ranges);
+        continue;
+      }
+      if (first.from() > next.to()) {
+        return ReportError(CStrVector(kRangeOutOfOrder) CHECK_FAILED);
+      }
+      ranges->Add(CharacterRange::Range(first.from(), next.to()));
+    } else {
+      ranges->Add(first);
+    }
+  }
+  if (!has_more()) {
+    return ReportError(CStrVector(kUnterminated) CHECK_FAILED);
+  }
+  Advance();
+  if (ranges->length() == 0) {
+    ranges->Add(CharacterRange::Everything());
+    is_negated = !is_negated;
+  }
+  return new RegExpCharacterClass(ranges, is_negated);
+}
+
+
+// ----------------------------------------------------------------------------
+// The Parser interface.
+
+// MakeAST() is just a wrapper for the corresponding Parser calls
+// so we don't have to expose the entire Parser class in the .h file.
+
+static bool always_allow_natives_syntax = false;
+
+
+ParserMessage::~ParserMessage() {
+  for (int i = 0; i < args().length(); i++)
+    DeleteArray(args()[i]);
+  DeleteArray(args().start());
+}
+
+
+ScriptDataImpl::~ScriptDataImpl() {
+  store_.Dispose();
+}
+
+
+int ScriptDataImpl::Length() {
+  return store_.length();
+}
+
+
+unsigned* ScriptDataImpl::Data() {
+  return store_.start();
+}
+
+
+ScriptDataImpl* PreParse(Handle<String> source,
+                         unibrow::CharacterStream* stream,
+                         v8::Extension* extension) {
+  Handle<Script> no_script;
+  bool allow_natives_syntax =
+      always_allow_natives_syntax ||
+      FLAG_allow_natives_syntax ||
+      Bootstrapper::IsActive();
+  PreParser parser(no_script, allow_natives_syntax, extension);
+  if (!parser.PreParseProgram(source, stream)) return NULL;
+  // The list owns the backing store so we need to clone the vector.
+  // That way, the result will be exactly the right size rather than
+  // the expected 50% too large.
+  Vector<unsigned> store = parser.recorder()->store()->ToVector().Clone();
+  return new ScriptDataImpl(store);
+}
+
+
+bool ParseRegExp(FlatStringReader* input,
+                 bool multiline,
+                 RegExpCompileData* result) {
+  ASSERT(result != NULL);
+  RegExpParser parser(input, &result->error, multiline);
+  RegExpTree* tree = parser.ParsePattern();
+  if (parser.failed()) {
+    ASSERT(tree == NULL);
+    ASSERT(!result->error.is_null());
+  } else {
+    ASSERT(tree != NULL);
+    ASSERT(result->error.is_null());
+    result->tree = tree;
+    int capture_count = parser.captures_started();
+    result->simple = tree->IsAtom() && parser.simple() && capture_count == 0;
+    result->contains_anchor = parser.contains_anchor();
+    result->capture_count = capture_count;
+  }
+  return !parser.failed();
+}
+
+
+FunctionLiteral* MakeAST(bool compile_in_global_context,
+                         Handle<Script> script,
+                         v8::Extension* extension,
+                         ScriptDataImpl* pre_data) {
+  bool allow_natives_syntax =
+      always_allow_natives_syntax ||
+      FLAG_allow_natives_syntax ||
+      Bootstrapper::IsActive();
+  AstBuildingParser parser(script, allow_natives_syntax, extension, pre_data);
+  if (pre_data != NULL && pre_data->has_error()) {
+    Scanner::Location loc = pre_data->MessageLocation();
+    const char* message = pre_data->BuildMessage();
+    Vector<const char*> args = pre_data->BuildArgs();
+    parser.ReportMessageAt(loc, message, args);
+    DeleteArray(message);
+    for (int i = 0; i < args.length(); i++)
+      DeleteArray(args[i]);
+    DeleteArray(args.start());
+    return NULL;
+  }
+  Handle<String> source = Handle<String>(String::cast(script->source()));
+  SafeStringInputBuffer input(source.location());
+  FunctionLiteral* result = parser.ParseProgram(source,
+      &input, compile_in_global_context);
+  return result;
+}
+
+
+FunctionLiteral* MakeLazyAST(Handle<Script> script,
+                             Handle<String> name,
+                             int start_position,
+                             int end_position,
+                             bool is_expression) {
+  bool allow_natives_syntax_before = always_allow_natives_syntax;
+  always_allow_natives_syntax = true;
+  AstBuildingParser parser(script, true, NULL, NULL);  // always allow
+  always_allow_natives_syntax = allow_natives_syntax_before;
+  // Parse the function by pulling the function source from the script source.
+  Handle<String> script_source(String::cast(script->source()));
+  FunctionLiteral* result =
+      parser.ParseLazy(SubString(script_source, start_position, end_position),
+                       name,
+                       start_position,
+                       is_expression);
+  return result;
+}
+
+
+#undef NEW
+
+
+} }  // namespace v8::internal
diff --git a/src/parser.h b/src/parser.h
new file mode 100644
index 0000000..86e1f74
--- /dev/null
+++ b/src/parser.h
@@ -0,0 +1,202 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PARSER_H_
+#define V8_PARSER_H_
+
+#include "scanner.h"
+#include "allocation.h"
+
+namespace v8 {
+namespace internal {
+
+
+class ParserMessage : public Malloced {
+ public:
+  ParserMessage(Scanner::Location loc, const char* message,
+                Vector<const char*> args)
+      : loc_(loc),
+        message_(message),
+        args_(args) { }
+  ~ParserMessage();
+  Scanner::Location location() { return loc_; }
+  const char* message() { return message_; }
+  Vector<const char*> args() { return args_; }
+ private:
+  Scanner::Location loc_;
+  const char* message_;
+  Vector<const char*> args_;
+};
+
+
+class FunctionEntry BASE_EMBEDDED {
+ public:
+  explicit FunctionEntry(Vector<unsigned> backing) : backing_(backing) { }
+  FunctionEntry() : backing_(Vector<unsigned>::empty()) { }
+
+  int start_pos() { return backing_[kStartPosOffset]; }
+  void set_start_pos(int value) { backing_[kStartPosOffset] = value; }
+
+  int end_pos() { return backing_[kEndPosOffset]; }
+  void set_end_pos(int value) { backing_[kEndPosOffset] = value; }
+
+  int literal_count() { return backing_[kLiteralCountOffset]; }
+  void set_literal_count(int value) { backing_[kLiteralCountOffset] = value; }
+
+  int property_count() { return backing_[kPropertyCountOffset]; }
+  void set_property_count(int value) { backing_[kPropertyCountOffset] = value; }
+
+  bool contains_array_literal() {
+    return backing_[kContainsArrayLiteralOffset] != 0;
+  }
+  void set_contains_array_literal(bool value) {
+    backing_[kContainsArrayLiteralOffset] = value ? 1 : 0;
+  }
+
+  bool is_valid() { return backing_.length() > 0; }
+
+  static const int kSize = 5;
+
+ private:
+  Vector<unsigned> backing_;
+  static const int kStartPosOffset = 0;
+  static const int kEndPosOffset = 1;
+  static const int kLiteralCountOffset = 2;
+  static const int kPropertyCountOffset = 3;
+  static const int kContainsArrayLiteralOffset = 4;
+};
+
+
+class ScriptDataImpl : public ScriptData {
+ public:
+  explicit ScriptDataImpl(Vector<unsigned> store)
+      : store_(store),
+        last_entry_(0) { }
+  virtual ~ScriptDataImpl();
+  virtual int Length();
+  virtual unsigned* Data();
+  FunctionEntry GetFunctionEnd(int start);
+  bool SanityCheck();
+
+  Scanner::Location MessageLocation();
+  const char* BuildMessage();
+  Vector<const char*> BuildArgs();
+
+  bool has_error() { return store_[kHasErrorOffset]; }
+  unsigned magic() { return store_[kMagicOffset]; }
+  unsigned version() { return store_[kVersionOffset]; }
+
+  static const unsigned kMagicNumber = 0xBadDead;
+  static const unsigned kCurrentVersion = 1;
+
+  static const unsigned kMagicOffset = 0;
+  static const unsigned kVersionOffset = 1;
+  static const unsigned kHasErrorOffset = 2;
+  static const unsigned kSizeOffset = 3;
+  static const unsigned kHeaderSize = 4;
+
+ private:
+  unsigned Read(int position);
+  unsigned* ReadAddress(int position);
+  int EntryCount();
+  FunctionEntry nth(int n);
+
+  Vector<unsigned> store_;
+
+  // The last entry returned.  This is used to make lookup faster:
+  // the next entry to return is typically the next entry so lookup
+  // will usually be much faster if we start from the last entry.
+  int last_entry_;
+};
+
+
+// The parser: Takes a script and and context information, and builds a
+// FunctionLiteral AST node. Returns NULL and deallocates any allocated
+// AST nodes if parsing failed.
+FunctionLiteral* MakeAST(bool compile_in_global_context,
+                         Handle<Script> script,
+                         v8::Extension* extension,
+                         ScriptDataImpl* pre_data);
+
+
+ScriptDataImpl* PreParse(Handle<String> source,
+                         unibrow::CharacterStream* stream,
+                         v8::Extension* extension);
+
+
+bool ParseRegExp(FlatStringReader* input,
+                 bool multiline,
+                 RegExpCompileData* result);
+
+
+// Support for doing lazy compilation. The script is the script containing full
+// source of the script where the function is declared. The start_position and
+// end_position specifies the part of the script source which has the source
+// for the function declaration in the form:
+//
+//    (<formal parameters>) { <function body> }
+//
+// without any function keyword or name.
+//
+FunctionLiteral* MakeLazyAST(Handle<Script> script,
+                             Handle<String> name,
+                             int start_position,
+                             int end_position,
+                             bool is_expression);
+
+
+// Support for handling complex values (array and object literals) that
+// can be fully handled at compile time.
+class CompileTimeValue: public AllStatic {
+ public:
+  enum Type {
+    OBJECT_LITERAL,
+    ARRAY_LITERAL
+  };
+
+  static bool IsCompileTimeValue(Expression* expression);
+
+  // Get the value as a compile time value.
+  static Handle<FixedArray> GetValue(Expression* expression);
+
+  // Get the type of a compile time value returned by GetValue().
+  static Type GetType(Handle<FixedArray> value);
+
+  // Get the elements array of a compile time value returned by GetValue().
+  static Handle<FixedArray> GetElements(Handle<FixedArray> value);
+
+ private:
+  static const int kTypeSlot = 0;
+  static const int kElementsSlot = 1;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_PARSER_H_
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
new file mode 100644
index 0000000..73d6eeb
--- /dev/null
+++ b/src/platform-freebsd.cc
@@ -0,0 +1,645 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for FreeBSD goes here. For the POSIX comaptible parts
+// the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <sys/ucontext.h>
+#include <stdlib.h>
+
+#include <sys/types.h>  // mmap & munmap
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/stat.h>   // open
+#include <sys/fcntl.h>  // open
+#include <unistd.h>     // getpagesize
+#include <execinfo.h>   // backtrace, backtrace_symbols
+#include <strings.h>    // index
+#include <errno.h>
+#include <stdarg.h>
+#include <limits.h>
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+
+
+namespace v8 {
+namespace internal {
+
+// 0 is never a valid thread id on FreeBSD since tids and pids share a
+// name space and pid 0 is used to kill the group (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+double ceiling(double x) {
+    // Correct as on OS X
+    if (-1.0 < x && x < 0.0) {
+        return -0.0;
+    } else {
+        return ceil(x);
+    }
+}
+
+
+void OS::Setup() {
+  // Seed the random number generator.
+  // Convert the current time to a 64-bit integer first, before converting it
+  // to an unsigned. Going directly can cause an overflow and the seed to be
+  // set to all ones. The seed will be identical for different instances that
+  // call this setup code within the same millisecond.
+  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+  srandom(static_cast<unsigned int>(seed));
+}
+
+
+double OS::nan_value() {
+  return NAN;
+}
+
+
+int OS::ActivationFrameAlignment() {
+  // 16 byte alignment on FreeBSD
+  return 16;
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification).  The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap.  The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+  lowest_ever_allocated = Min(lowest_ever_allocated, address);
+  highest_ever_allocated =
+      Max(highest_ever_allocated,
+          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+  return address < lowest_ever_allocated || address >= highest_ever_allocated;
+}
+
+
+size_t OS::AllocateAlignment() {
+  return getpagesize();
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool executable) {
+  const size_t msize = RoundUp(requested, getpagesize());
+  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+  if (mbase == MAP_FAILED) {
+    LOG(StringEvent("OS::Allocate", "mmap failed"));
+    return NULL;
+  }
+  *allocated = msize;
+  UpdateAllocatedSpaceLimits(mbase, msize);
+  return mbase;
+}
+
+
+void OS::Free(void* buf, const size_t length) {
+  // TODO(1240712): munmap has a return value which is ignored here.
+  int result = munmap(buf, length);
+  USE(result);
+  ASSERT(result == 0);
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+  UNIMPLEMENTED();
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+  UNIMPLEMENTED();
+}
+
+#endif
+
+
+void OS::Sleep(int milliseconds) {
+  unsigned int ms = static_cast<unsigned int>(milliseconds);
+  usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+  // Redirect to std abort to signal abnormal program termination.
+  abort();
+}
+
+
+void OS::DebugBreak() {
+#if defined(__arm__) || defined(__thumb__)
+  asm("bkpt 0");
+#else
+  asm("int $3");
+#endif
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) munmap(memory_, size_);
+  fclose(file_);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+static unsigned StringToLong(char* buffer) {
+  return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
+}
+#endif
+
+
+void OS::LogSharedLibraryAddresses() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static const int MAP_LENGTH = 1024;
+  int fd = open("/proc/self/maps", O_RDONLY);
+  if (fd < 0) return;
+  while (true) {
+    char addr_buffer[11];
+    addr_buffer[0] = '0';
+    addr_buffer[1] = 'x';
+    addr_buffer[10] = 0;
+    int result = read(fd, addr_buffer + 2, 8);
+    if (result < 8) break;
+    unsigned start = StringToLong(addr_buffer);
+    result = read(fd, addr_buffer + 2, 1);
+    if (result < 1) break;
+    if (addr_buffer[2] != '-') break;
+    result = read(fd, addr_buffer + 2, 8);
+    if (result < 8) break;
+    unsigned end = StringToLong(addr_buffer);
+    char buffer[MAP_LENGTH];
+    int bytes_read = -1;
+    do {
+      bytes_read++;
+      if (bytes_read >= MAP_LENGTH - 1)
+        break;
+      result = read(fd, buffer + bytes_read, 1);
+      if (result < 1) break;
+    } while (buffer[bytes_read] != '\n');
+    buffer[bytes_read] = 0;
+    // Ignore mappings that are not executable.
+    if (buffer[3] != 'x') continue;
+    char* start_of_path = index(buffer, '/');
+    // There may be no filename in this line.  Skip to next.
+    if (start_of_path == NULL) continue;
+    buffer[bytes_read] = 0;
+    LOG(SharedLibraryEvent(start_of_path, start, end));
+  }
+  close(fd);
+#endif
+}
+
+
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  int frames_size = frames.length();
+  void** addresses = NewArray<void*>(frames_size);
+
+  int frames_count = backtrace(addresses, frames_size);
+
+  char** symbols;
+  symbols = backtrace_symbols(addresses, frames_count);
+  if (symbols == NULL) {
+    DeleteArray(addresses);
+    return kStackWalkError;
+  }
+
+  for (int i = 0; i < frames_count; i++) {
+    frames[i].address = addresses[i];
+    // Format a text representation of the frame based on the information
+    // available.
+    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
+             "%s",
+             symbols[i]);
+    // Make sure line termination is in place.
+    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+  }
+
+  DeleteArray(addresses);
+  free(symbols);
+
+  return frames_count;
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory(size_t size) {
+  address_ = mmap(NULL, size, PROT_NONE,
+                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                  kMmapFd, kMmapFdOffset);
+  size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != MAP_FAILED;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd, kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(address, size);
+  return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return mmap(address, size, PROT_NONE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+  explicit PlatformData(ThreadHandle::Kind kind) {
+    Initialize(kind);
+  }
+
+  void Initialize(ThreadHandle::Kind kind) {
+    switch (kind) {
+      case ThreadHandle::SELF: thread_ = pthread_self(); break;
+      case ThreadHandle::INVALID: thread_ = kNoThread; break;
+    }
+  }
+  pthread_t thread_;  // Thread handle for pthread.
+};
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+  data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+  data_->Initialize(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+  delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+  return pthread_equal(data_->thread_, pthread_self());
+}
+
+
+bool ThreadHandle::IsValid() const {
+  return data_->thread_ != kNoThread;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+}
+
+
+Thread::~Thread() {
+}
+
+
+static void* ThreadEntry(void* arg) {
+  Thread* thread = reinterpret_cast<Thread*>(arg);
+  // This is also initialized by the first argument to pthread_create() but we
+  // don't know which thread will run first (the original thread or the new
+  // one) so we initialize it here too.
+  thread->thread_handle_data()->thread_ = pthread_self();
+  ASSERT(thread->IsValid());
+  thread->Run();
+  return NULL;
+}
+
+
+void Thread::Start() {
+  pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+  ASSERT(IsValid());
+}
+
+
+void Thread::Join() {
+  pthread_join(thread_handle_data()->thread_, NULL);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+  pthread_key_t key;
+  int result = pthread_key_create(&key, NULL);
+  USE(result);
+  ASSERT(result == 0);
+  return static_cast<LocalStorageKey>(key);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  int result = pthread_key_delete(pthread_key);
+  USE(result);
+  ASSERT(result == 0);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  pthread_setspecific(pthread_key, value);
+}
+
+
+void Thread::YieldCPU() {
+  sched_yield();
+}
+
+
+class FreeBSDMutex : public Mutex {
+ public:
+
+  FreeBSDMutex() {
+    pthread_mutexattr_t attrs;
+    int result = pthread_mutexattr_init(&attrs);
+    ASSERT(result == 0);
+    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
+    ASSERT(result == 0);
+    result = pthread_mutex_init(&mutex_, &attrs);
+    ASSERT(result == 0);
+  }
+
+  virtual ~FreeBSDMutex() { pthread_mutex_destroy(&mutex_); }
+
+  virtual int Lock() {
+    int result = pthread_mutex_lock(&mutex_);
+    return result;
+  }
+
+  virtual int Unlock() {
+    int result = pthread_mutex_unlock(&mutex_);
+    return result;
+  }
+
+ private:
+  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
+};
+
+
+Mutex* OS::CreateMutex() {
+  return new FreeBSDMutex();
+}
+
+
+class FreeBSDSemaphore : public Semaphore {
+ public:
+  explicit FreeBSDSemaphore(int count) {  sem_init(&sem_, 0, count); }
+  virtual ~FreeBSDSemaphore() { sem_destroy(&sem_); }
+
+  virtual void Wait();
+  virtual bool Wait(int timeout);
+  virtual void Signal() { sem_post(&sem_); }
+ private:
+  sem_t sem_;
+};
+
+
+void FreeBSDSemaphore::Wait() {
+  while (true) {
+    int result = sem_wait(&sem_);
+    if (result == 0) return;  // Successfully got semaphore.
+    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
+  }
+}
+
+
+bool FreeBSDSemaphore::Wait(int timeout) {
+  const long kOneSecondMicros = 1000000;  // NOLINT
+
+  // Split timeout into second and nanosecond parts.
+  struct timeval delta;
+  delta.tv_usec = timeout % kOneSecondMicros;
+  delta.tv_sec = timeout / kOneSecondMicros;
+
+  struct timeval current_time;
+  // Get the current time.
+  if (gettimeofday(&current_time, NULL) == -1) {
+    return false;
+  }
+
+  // Calculate time for end of timeout.
+  struct timeval end_time;
+  timeradd(&current_time, &delta, &end_time);
+
+  struct timespec ts;
+  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
+  while (true) {
+    int result = sem_timedwait(&sem_, &ts);
+    if (result == 0) return true;  // Successfully got semaphore.
+    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
+    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
+  }
+}
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+  return new FreeBSDSemaphore(count);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static Sampler* active_sampler_ = NULL;
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+  USE(info);
+  if (signal != SIGPROF) return;
+  if (active_sampler_ == NULL) return;
+
+  TickSample sample;
+
+  // If profiling, we extract the current pc and sp.
+  if (active_sampler_->IsProfiling()) {
+    // Extracting the sample from the context is extremely machine dependent.
+    ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+    mcontext_t& mcontext = ucontext->uc_mcontext;
+#if V8_HOST_ARCH_IA32
+    sample.pc = mcontext.mc_eip;
+    sample.sp = mcontext.mc_esp;
+    sample.fp = mcontext.mc_ebp;
+#elif V8_HOST_ARCH_X64
+    sample.pc = mcontext.mc_rip;
+    sample.sp = mcontext.mc_rsp;
+    sample.fp = mcontext.mc_rbp;
+#elif V8_HOST_ARCH_ARM
+    sample.pc = mcontext.mc_r15;
+    sample.sp = mcontext.mc_r13;
+    sample.fp = mcontext.mc_r11;
+#endif
+    active_sampler_->SampleStack(&sample);
+  }
+
+  // We always sample the VM state.
+  sample.state = Logger::state();
+
+  active_sampler_->Tick(&sample);
+}
+
+
+class Sampler::PlatformData : public Malloced {
+ public:
+  PlatformData() {
+    signal_handler_installed_ = false;
+  }
+
+  bool signal_handler_installed_;
+  struct sigaction old_signal_handler_;
+  struct itimerval old_timer_value_;
+};
+
+
+Sampler::Sampler(int interval, bool profiling)
+    : interval_(interval), profiling_(profiling), active_(false) {
+  data_ = new PlatformData();
+}
+
+
+Sampler::~Sampler() {
+  delete data_;
+}
+
+
+void Sampler::Start() {
+  // There can only be one active sampler at the time on POSIX
+  // platforms.
+  if (active_sampler_ != NULL) return;
+
+  // Request profiling signals.
+  struct sigaction sa;
+  sa.sa_sigaction = ProfilerSignalHandler;
+  sigemptyset(&sa.sa_mask);
+  sa.sa_flags = SA_SIGINFO;
+  if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
+  data_->signal_handler_installed_ = true;
+
+  // Set the itimer to generate a tick for each interval.
+  itimerval itimer;
+  itimer.it_interval.tv_sec = interval_ / 1000;
+  itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
+  itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
+  itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
+  setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
+
+  // Set this sampler as the active sampler.
+  active_sampler_ = this;
+  active_ = true;
+}
+
+
+void Sampler::Stop() {
+  // Restore old signal handler
+  if (data_->signal_handler_installed_) {
+    setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
+    sigaction(SIGPROF, &data_->old_signal_handler_, 0);
+    data_->signal_handler_installed_ = false;
+  }
+
+  // This sampler is no longer the active sampler.
+  active_sampler_ = NULL;
+  active_ = false;
+}
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+} }  // namespace v8::internal
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
new file mode 100644
index 0000000..fe4c31f
--- /dev/null
+++ b/src/platform-linux.cc
@@ -0,0 +1,751 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for Linux goes here. For the POSIX comaptible parts
+// the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <stdlib.h>
+
+// Ubuntu Dapper requires memory pages to be marked as
+// executable. Otherwise, OS raises an exception when executing code
+// in that page.
+#include <sys/types.h>  // mmap & munmap
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/stat.h>   // open
+#include <fcntl.h>      // open
+#include <unistd.h>     // sysconf
+#ifdef __GLIBC__
+#include <execinfo.h>   // backtrace, backtrace_symbols
+#endif  // def __GLIBC__
+#include <strings.h>    // index
+#include <errno.h>
+#include <stdarg.h>
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+#include "top.h"
+#include "v8threads.h"
+
+
+namespace v8 {
+namespace internal {
+
+// 0 is never a valid thread id on Linux since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+double ceiling(double x) {
+  return ceil(x);
+}
+
+
+void OS::Setup() {
+  // Seed the random number generator.
+  // Convert the current time to a 64-bit integer first, before converting it
+  // to an unsigned. Going directly can cause an overflow and the seed to be
+  // set to all ones. The seed will be identical for different instances that
+  // call this setup code within the same millisecond.
+  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+  srandom(static_cast<unsigned int>(seed));
+}
+
+
+double OS::nan_value() {
+  return NAN;
+}
+
+
+int OS::ActivationFrameAlignment() {
+#ifdef V8_TARGET_ARCH_ARM
+  // On EABI ARM targets this is required for fp correctness in the
+  // runtime system.
+  return 8;
+#else
+  // With gcc 4.4 the tree vectorization optimiser can generate code
+  // that requires 16 byte alignment such as movdqa on x86.
+  return 16;
+#endif
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification).  The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap.  The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+  lowest_ever_allocated = Min(lowest_ever_allocated, address);
+  highest_ever_allocated =
+      Max(highest_ever_allocated,
+          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+  return address < lowest_ever_allocated || address >= highest_ever_allocated;
+}
+
+
+size_t OS::AllocateAlignment() {
+  return sysconf(_SC_PAGESIZE);
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  if (mbase == MAP_FAILED) {
+    LOG(StringEvent("OS::Allocate", "mmap failed"));
+    return NULL;
+  }
+  *allocated = msize;
+  UpdateAllocatedSpaceLimits(mbase, msize);
+  return mbase;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+  // TODO(1240712): munmap has a return value which is ignored here.
+  int result = munmap(address, size);
+  USE(result);
+  ASSERT(result == 0);
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+  // TODO(1240712): mprotect has a return value which is ignored here.
+  mprotect(address, size, PROT_READ);
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+  // TODO(1240712): mprotect has a return value which is ignored here.
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  mprotect(address, size, prot);
+}
+
+#endif
+
+
+void OS::Sleep(int milliseconds) {
+  unsigned int ms = static_cast<unsigned int>(milliseconds);
+  usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+  // Redirect to std abort to signal abnormal program termination.
+  abort();
+}
+
+
+void OS::DebugBreak() {
+// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
+//  which is the architecture of generated code).
+#if defined(__arm__) || defined(__thumb__)
+  asm("bkpt 0");
+#else
+  asm("int $3");
+#endif
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) munmap(memory_, size_);
+  fclose(file_);
+}
+
+
+void OS::LogSharedLibraryAddresses() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // This function assumes that the layout of the file is as follows:
+  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+  // If we encounter an unexpected situation we abort scanning further entries.
+  FILE *fp = fopen("/proc/self/maps", "r");
+  if (fp == NULL) return;
+
+  // Allocate enough room to be able to store a full file name.
+  const int kLibNameLen = FILENAME_MAX + 1;
+  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+  // This loop will terminate once the scanning hits an EOF.
+  while (true) {
+    uintptr_t start, end;
+    char attr_r, attr_w, attr_x, attr_p;
+    // Parse the addresses and permission bits at the beginning of the line.
+    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+    int c;
+    if (attr_r == 'r' && attr_x == 'x') {
+      // Found a readable and executable entry. Skip characters until we reach
+      // the beginning of the filename or the end of the line.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n') && (c != '/'));
+      if (c == EOF) break;  // EOF: Was unexpected, just exit.
+
+      // Process the filename if found.
+      if (c == '/') {
+        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
+
+        // Read to the end of the line. Exit if the read fails.
+        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+        // Drop the newline character read by fgets. We do not need to check
+        // for a zero-length string because we know that we at least read the
+        // '/' character.
+        lib_name[strlen(lib_name) - 1] = '\0';
+      } else {
+        // No library name found, just record the raw address range.
+        snprintf(lib_name, kLibNameLen,
+                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+      }
+      LOG(SharedLibraryEvent(lib_name, start, end));
+    } else {
+      // Entry not describing executable data. Skip to end of line to setup
+      // reading the next entry.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n'));
+      if (c == EOF) break;
+    }
+  }
+  free(lib_name);
+  fclose(fp);
+#endif
+}
+
+
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  // backtrace is a glibc extension.
+#ifdef __GLIBC__
+  int frames_size = frames.length();
+  void** addresses = NewArray<void*>(frames_size);
+
+  int frames_count = backtrace(addresses, frames_size);
+
+  char** symbols;
+  symbols = backtrace_symbols(addresses, frames_count);
+  if (symbols == NULL) {
+    DeleteArray(addresses);
+    return kStackWalkError;
+  }
+
+  for (int i = 0; i < frames_count; i++) {
+    frames[i].address = addresses[i];
+    // Format a text representation of the frame based on the information
+    // available.
+    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
+             "%s",
+             symbols[i]);
+    // Make sure line termination is in place.
+    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+  }
+
+  DeleteArray(addresses);
+  free(symbols);
+
+  return frames_count;
+#else  // ndef __GLIBC__
+  return 0;
+#endif  // ndef __GLIBC__
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory(size_t size) {
+  address_ = mmap(NULL, size, PROT_NONE,
+                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                  kMmapFd, kMmapFdOffset);
+  size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != MAP_FAILED;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd, kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(address, size);
+  return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return mmap(address, size, PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+  explicit PlatformData(ThreadHandle::Kind kind) {
+    Initialize(kind);
+  }
+
+  void Initialize(ThreadHandle::Kind kind) {
+    switch (kind) {
+      case ThreadHandle::SELF: thread_ = pthread_self(); break;
+      case ThreadHandle::INVALID: thread_ = kNoThread; break;
+    }
+  }
+
+  pthread_t thread_;  // Thread handle for pthread.
+};
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+  data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+  data_->Initialize(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+  delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+  return pthread_equal(data_->thread_, pthread_self());
+}
+
+
+bool ThreadHandle::IsValid() const {
+  return data_->thread_ != kNoThread;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+}
+
+
+Thread::~Thread() {
+}
+
+
+static void* ThreadEntry(void* arg) {
+  Thread* thread = reinterpret_cast<Thread*>(arg);
+  // This is also initialized by the first argument to pthread_create() but we
+  // don't know which thread will run first (the original thread or the new
+  // one) so we initialize it here too.
+  thread->thread_handle_data()->thread_ = pthread_self();
+  ASSERT(thread->IsValid());
+  thread->Run();
+  return NULL;
+}
+
+
+void Thread::Start() {
+  pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+  ASSERT(IsValid());
+}
+
+
+void Thread::Join() {
+  pthread_join(thread_handle_data()->thread_, NULL);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+  pthread_key_t key;
+  int result = pthread_key_create(&key, NULL);
+  USE(result);
+  ASSERT(result == 0);
+  return static_cast<LocalStorageKey>(key);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  int result = pthread_key_delete(pthread_key);
+  USE(result);
+  ASSERT(result == 0);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  pthread_setspecific(pthread_key, value);
+}
+
+
+void Thread::YieldCPU() {
+  sched_yield();
+}
+
+
+class LinuxMutex : public Mutex {
+ public:
+
+  LinuxMutex() {
+    pthread_mutexattr_t attrs;
+    int result = pthread_mutexattr_init(&attrs);
+    ASSERT(result == 0);
+    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
+    ASSERT(result == 0);
+    result = pthread_mutex_init(&mutex_, &attrs);
+    ASSERT(result == 0);
+  }
+
+  virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
+
+  virtual int Lock() {
+    int result = pthread_mutex_lock(&mutex_);
+    return result;
+  }
+
+  virtual int Unlock() {
+    int result = pthread_mutex_unlock(&mutex_);
+    return result;
+  }
+
+ private:
+  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
+};
+
+
+Mutex* OS::CreateMutex() {
+  return new LinuxMutex();
+}
+
+
+class LinuxSemaphore : public Semaphore {
+ public:
+  explicit LinuxSemaphore(int count) {  sem_init(&sem_, 0, count); }
+  virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
+
+  virtual void Wait();
+  virtual bool Wait(int timeout);
+  virtual void Signal() { sem_post(&sem_); }
+ private:
+  sem_t sem_;
+};
+
+
+void LinuxSemaphore::Wait() {
+  while (true) {
+    int result = sem_wait(&sem_);
+    if (result == 0) return;  // Successfully got semaphore.
+    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
+  }
+}
+
+
+#ifndef TIMEVAL_TO_TIMESPEC
+#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
+    (ts)->tv_sec = (tv)->tv_sec;                                    \
+    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
+} while (false)
+#endif
+
+
+bool LinuxSemaphore::Wait(int timeout) {
+  const long kOneSecondMicros = 1000000;  // NOLINT
+
+  // Split timeout into second and nanosecond parts.
+  struct timeval delta;
+  delta.tv_usec = timeout % kOneSecondMicros;
+  delta.tv_sec = timeout / kOneSecondMicros;
+
+  struct timeval current_time;
+  // Get the current time.
+  if (gettimeofday(&current_time, NULL) == -1) {
+    return false;
+  }
+
+  // Calculate time for end of timeout.
+  struct timeval end_time;
+  timeradd(&current_time, &delta, &end_time);
+
+  struct timespec ts;
+  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
+  // Wait for semaphore signalled or timeout.
+  while (true) {
+    int result = sem_timedwait(&sem_, &ts);
+    if (result == 0) return true;  // Successfully got semaphore.
+    if (result > 0) {
+      // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
+      errno = result;
+      result = -1;
+    }
+    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
+    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
+  }
+}
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+  return new LinuxSemaphore(count);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static Sampler* active_sampler_ = NULL;
+static pthread_t vm_thread_ = 0;
+
+
+#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
+// Android runs a fairly new Linux kernel, so signal info is there,
+// but the C library doesn't have the structs defined.
+
+struct sigcontext {
+  uint32_t trap_no;
+  uint32_t error_code;
+  uint32_t oldmask;
+  uint32_t gregs[16];
+  uint32_t arm_cpsr;
+  uint32_t fault_address;
+};
+typedef uint32_t __sigset_t;
+typedef struct sigcontext mcontext_t;
+typedef struct ucontext {
+  uint32_t uc_flags;
+  struct ucontext *uc_link;
+  stack_t uc_stack;
+  mcontext_t uc_mcontext;
+  __sigset_t uc_sigmask;
+} ucontext_t;
+enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
+
+#endif
+
+
+// A function that determines if a signal handler is called in the context
+// of a VM thread.
+//
+// The problem is that SIGPROF signal can be delivered to an arbitrary thread
+// (see http://code.google.com/p/google-perftools/issues/detail?id=106#c2)
+// So, if the signal is being handled in the context of a non-VM thread,
+// it means that the VM thread is running, and trying to sample its stack can
+// cause a crash.
+static inline bool IsVmThread() {
+  // In the case of a single VM thread, this check is enough.
+  if (pthread_equal(pthread_self(), vm_thread_)) return true;
+  // If there are multiple threads that use VM, they must have a thread id
+  // stored in TLS. To verify that the thread is really executing VM,
+  // we check Top's data. Having that ThreadManager::RestoreThread first
+  // restores ThreadLocalTop from TLS, and only then erases the TLS value,
+  // reading Top::thread_id() should not be affected by races.
+  if (ThreadManager::HasId() && !ThreadManager::IsArchived() &&
+      ThreadManager::CurrentId() == Top::thread_id()) {
+    return true;
+  }
+  return false;
+}
+
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+  USE(info);
+  if (signal != SIGPROF) return;
+  if (active_sampler_ == NULL) return;
+
+  TickSample sample;
+
+  // If profiling, we extract the current pc and sp.
+  if (active_sampler_->IsProfiling()) {
+    // Extracting the sample from the context is extremely machine dependent.
+    ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+    mcontext_t& mcontext = ucontext->uc_mcontext;
+#if V8_HOST_ARCH_IA32
+    sample.pc = mcontext.gregs[REG_EIP];
+    sample.sp = mcontext.gregs[REG_ESP];
+    sample.fp = mcontext.gregs[REG_EBP];
+#elif V8_HOST_ARCH_X64
+    sample.pc = mcontext.gregs[REG_RIP];
+    sample.sp = mcontext.gregs[REG_RSP];
+    sample.fp = mcontext.gregs[REG_RBP];
+#elif V8_HOST_ARCH_ARM
+// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
+#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+    sample.pc = mcontext.gregs[R15];
+    sample.sp = mcontext.gregs[R13];
+    sample.fp = mcontext.gregs[R11];
+#else
+    sample.pc = mcontext.arm_pc;
+    sample.sp = mcontext.arm_sp;
+    sample.fp = mcontext.arm_fp;
+#endif
+#endif
+    if (IsVmThread())
+      active_sampler_->SampleStack(&sample);
+  }
+
+  // We always sample the VM state.
+  sample.state = Logger::state();
+
+  active_sampler_->Tick(&sample);
+}
+
+
+class Sampler::PlatformData : public Malloced {
+ public:
+  PlatformData() {
+    signal_handler_installed_ = false;
+  }
+
+  bool signal_handler_installed_;
+  struct sigaction old_signal_handler_;
+  struct itimerval old_timer_value_;
+};
+
+
+Sampler::Sampler(int interval, bool profiling)
+    : interval_(interval), profiling_(profiling), active_(false) {
+  data_ = new PlatformData();
+}
+
+
+Sampler::~Sampler() {
+  delete data_;
+}
+
+
+void Sampler::Start() {
+  // There can only be one active sampler at the time on POSIX
+  // platforms.
+  if (active_sampler_ != NULL) return;
+
+  vm_thread_ = pthread_self();
+
+  // Request profiling signals.
+  struct sigaction sa;
+  sa.sa_sigaction = ProfilerSignalHandler;
+  sigemptyset(&sa.sa_mask);
+  sa.sa_flags = SA_SIGINFO;
+  if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
+  data_->signal_handler_installed_ = true;
+
+  // Set the itimer to generate a tick for each interval.
+  itimerval itimer;
+  itimer.it_interval.tv_sec = interval_ / 1000;
+  itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
+  itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
+  itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
+  setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
+
+  // Set this sampler as the active sampler.
+  active_sampler_ = this;
+  active_ = true;
+}
+
+
+void Sampler::Stop() {
+  // Restore old signal handler
+  if (data_->signal_handler_installed_) {
+    setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
+    sigaction(SIGPROF, &data_->old_signal_handler_, 0);
+    data_->signal_handler_installed_ = false;
+  }
+
+  // This sampler is no longer the active sampler.
+  active_sampler_ = NULL;
+  active_ = false;
+}
+
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+} }  // namespace v8::internal
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
new file mode 100644
index 0000000..0b236a5
--- /dev/null
+++ b/src/platform-macos.cc
@@ -0,0 +1,640 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for MacOS goes here. For the POSIX comaptible parts
+// the implementation is in platform-posix.cc.
+
+#include <unistd.h>
+#include <sys/mman.h>
+#include <mach/mach_init.h>
+#include <mach-o/dyld.h>
+#include <mach-o/getsect.h>
+
+#include <AvailabilityMacros.h>
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <mach/mach.h>
+#include <mach/semaphore.h>
+#include <mach/task.h>
+#include <mach/vm_statistics.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <stdarg.h>
+#include <stdlib.h>
+
+#include <errno.h>
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+
+// Manually define these here as weak imports, rather than including execinfo.h.
+// This lets us launch on 10.4 which does not have these calls.
+extern "C" {
+  extern int backtrace(void**, int) __attribute__((weak_import));
+  extern char** backtrace_symbols(void* const*, int)
+      __attribute__((weak_import));
+  extern void backtrace_symbols_fd(void* const*, int, int)
+      __attribute__((weak_import));
+}
+
+
+namespace v8 {
+namespace internal {
+
+// 0 is never a valid thread id on MacOSX since a ptread_t is
+// a pointer.
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+double ceiling(double x) {
+  // Correct Mac OS X Leopard 'ceil' behavior.
+  if (-1.0 < x && x < 0.0) {
+    return -0.0;
+  } else {
+    return ceil(x);
+  }
+}
+
+
+void OS::Setup() {
+  // Seed the random number generator.
+  // Convert the current time to a 64-bit integer first, before converting it
+  // to an unsigned. Going directly will cause an overflow and the seed to be
+  // set to all ones. The seed will be identical for different instances that
+  // call this setup code within the same millisecond.
+  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+  srandom(static_cast<unsigned int>(seed));
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification).  The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap.  The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+  lowest_ever_allocated = Min(lowest_ever_allocated, address);
+  highest_ever_allocated =
+      Max(highest_ever_allocated,
+          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+  return address < lowest_ever_allocated || address >= highest_ever_allocated;
+}
+
+
+size_t OS::AllocateAlignment() {
+  return getpagesize();
+}
+
+
+// Constants used for mmap.
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+static const int kMmapFd = VM_MAKE_TAG(255);
+static const off_t kMmapFdOffset = 0;
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, getpagesize());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* mbase = mmap(NULL, msize, prot,
+                     MAP_PRIVATE | MAP_ANON,
+                     kMmapFd, kMmapFdOffset);
+  if (mbase == MAP_FAILED) {
+    LOG(StringEvent("OS::Allocate", "mmap failed"));
+    return NULL;
+  }
+  *allocated = msize;
+  UpdateAllocatedSpaceLimits(mbase, msize);
+  return mbase;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+  // TODO(1240712): munmap has a return value which is ignored here.
+  int result = munmap(address, size);
+  USE(result);
+  ASSERT(result == 0);
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+  UNIMPLEMENTED();
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+  UNIMPLEMENTED();
+}
+
+#endif
+
+
+void OS::Sleep(int milliseconds) {
+  usleep(1000 * milliseconds);
+}
+
+
+void OS::Abort() {
+  // Redirect to std abort to signal abnormal program termination
+  abort();
+}
+
+
+void OS::DebugBreak() {
+  asm("int $3");
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  fwrite(initial, size, 1, file);
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) munmap(memory_, size_);
+  fclose(file_);
+}
+
+
+void OS::LogSharedLibraryAddresses() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  unsigned int images_count = _dyld_image_count();
+  for (unsigned int i = 0; i < images_count; ++i) {
+    const mach_header* header = _dyld_get_image_header(i);
+    if (header == NULL) continue;
+#if V8_HOST_ARCH_X64
+    uint64_t size;
+    char* code_ptr = getsectdatafromheader_64(
+        reinterpret_cast<const mach_header_64*>(header),
+        SEG_TEXT,
+        SECT_TEXT,
+        &size);
+#else
+    unsigned int size;
+    char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
+#endif
+    if (code_ptr == NULL) continue;
+    const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
+    const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
+    LOG(SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
+  }
+#endif  // ENABLE_LOGGING_AND_PROFILING
+}
+
+
+double OS::nan_value() {
+  return NAN;
+}
+
+
+int OS::ActivationFrameAlignment() {
+  // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI
+  // Function Call Guide".
+  return 16;
+}
+
+
+int OS::StackWalk(Vector<StackFrame> frames) {
+  // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
+  if (backtrace == NULL)
+    return 0;
+
+  int frames_size = frames.length();
+  void** addresses = NewArray<void*>(frames_size);
+  int frames_count = backtrace(addresses, frames_size);
+
+  char** symbols;
+  symbols = backtrace_symbols(addresses, frames_count);
+  if (symbols == NULL) {
+    DeleteArray(addresses);
+    return kStackWalkError;
+  }
+
+  for (int i = 0; i < frames_count; i++) {
+    frames[i].address = addresses[i];
+    // Format a text representation of the frame based on the information
+    // available.
+    SNPrintF(MutableCStrVector(frames[i].text,
+                               kStackWalkMaxTextLen),
+             "%s",
+             symbols[i]);
+    // Make sure line termination is in place.
+    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+  }
+
+  DeleteArray(addresses);
+  free(symbols);
+
+  return frames_count;
+}
+
+
+
+
+VirtualMemory::VirtualMemory(size_t size) {
+  address_ = mmap(NULL, size, PROT_NONE,
+                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                  kMmapFd, kMmapFdOffset);
+  size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != MAP_FAILED;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd, kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(address, size);
+  return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return mmap(address, size, PROT_NONE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+  explicit PlatformData(ThreadHandle::Kind kind) {
+    Initialize(kind);
+  }
+
+  void Initialize(ThreadHandle::Kind kind) {
+    switch (kind) {
+      case ThreadHandle::SELF: thread_ = pthread_self(); break;
+      case ThreadHandle::INVALID: thread_ = kNoThread; break;
+    }
+  }
+  pthread_t thread_;  // Thread handle for pthread.
+};
+
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+  data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+  data_->Initialize(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+  delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+  return pthread_equal(data_->thread_, pthread_self());
+}
+
+
+bool ThreadHandle::IsValid() const {
+  return data_->thread_ != kNoThread;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+}
+
+
+Thread::~Thread() {
+}
+
+
+static void* ThreadEntry(void* arg) {
+  Thread* thread = reinterpret_cast<Thread*>(arg);
+  // This is also initialized by the first argument to pthread_create() but we
+  // don't know which thread will run first (the original thread or the new
+  // one) so we initialize it here too.
+  thread->thread_handle_data()->thread_ = pthread_self();
+  ASSERT(thread->IsValid());
+  thread->Run();
+  return NULL;
+}
+
+
+void Thread::Start() {
+  pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+}
+
+
+void Thread::Join() {
+  pthread_join(thread_handle_data()->thread_, NULL);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+  pthread_key_t key;
+  int result = pthread_key_create(&key, NULL);
+  USE(result);
+  ASSERT(result == 0);
+  return static_cast<LocalStorageKey>(key);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  int result = pthread_key_delete(pthread_key);
+  USE(result);
+  ASSERT(result == 0);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  pthread_setspecific(pthread_key, value);
+}
+
+
+void Thread::YieldCPU() {
+  sched_yield();
+}
+
+
+class MacOSMutex : public Mutex {
+ public:
+
+  MacOSMutex() {
+    pthread_mutexattr_t attr;
+    pthread_mutexattr_init(&attr);
+    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+    pthread_mutex_init(&mutex_, &attr);
+  }
+
+  ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
+
+  int Lock() { return pthread_mutex_lock(&mutex_); }
+
+  int Unlock() { return pthread_mutex_unlock(&mutex_); }
+
+ private:
+  pthread_mutex_t mutex_;
+};
+
+
+Mutex* OS::CreateMutex() {
+  return new MacOSMutex();
+}
+
+
+class MacOSSemaphore : public Semaphore {
+ public:
+  explicit MacOSSemaphore(int count) {
+    semaphore_create(mach_task_self(), &semaphore_, SYNC_POLICY_FIFO, count);
+  }
+
+  ~MacOSSemaphore() {
+    semaphore_destroy(mach_task_self(), semaphore_);
+  }
+
+  // The MacOS mach semaphore documentation claims it does not have spurious
+  // wakeups, the way pthreads semaphores do.  So the code from the linux
+  // platform is not needed here.
+  void Wait() { semaphore_wait(semaphore_); }
+
+  bool Wait(int timeout);
+
+  void Signal() { semaphore_signal(semaphore_); }
+
+ private:
+  semaphore_t semaphore_;
+};
+
+
+bool MacOSSemaphore::Wait(int timeout) {
+  mach_timespec_t ts;
+  ts.tv_sec = timeout / 1000000;
+  ts.tv_nsec = (timeout % 1000000) * 1000;
+  return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
+}
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+  return new MacOSSemaphore(count);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+class Sampler::PlatformData : public Malloced {
+ public:
+  explicit PlatformData(Sampler* sampler)
+      : sampler_(sampler),
+        task_self_(mach_task_self()),
+        profiled_thread_(0),
+        sampler_thread_(0) {
+  }
+
+  Sampler* sampler_;
+  // Note: for profiled_thread_ Mach primitives are used instead of PThread's
+  // because the latter doesn't provide thread manipulation primitives required.
+  // For details, consult "Mac OS X Internals" book, Section 7.3.
+  mach_port_t task_self_;
+  thread_act_t profiled_thread_;
+  pthread_t sampler_thread_;
+
+  // Sampler thread handler.
+  void Runner() {
+    // Loop until the sampler is disengaged.
+    while (sampler_->IsActive()) {
+      TickSample sample;
+
+      // If profiling, we record the pc and sp of the profiled thread.
+      if (sampler_->IsProfiling()
+          && KERN_SUCCESS == thread_suspend(profiled_thread_)) {
+#if V8_HOST_ARCH_X64
+        thread_state_flavor_t flavor = x86_THREAD_STATE64;
+        x86_thread_state64_t state;
+        mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
+#if __DARWIN_UNIX03
+#define REGISTER_FIELD(name) __r ## name
+#else
+#define REGISTER_FIELD(name) r ## name
+#endif  // __DARWIN_UNIX03
+#elif V8_HOST_ARCH_IA32
+        thread_state_flavor_t flavor = i386_THREAD_STATE;
+        i386_thread_state_t state;
+        mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
+#if __DARWIN_UNIX03
+#define REGISTER_FIELD(name) __e ## name
+#else
+#define REGISTER_FIELD(name) e ## name
+#endif  // __DARWIN_UNIX03
+#else
+#error Unsupported Mac OS X host architecture.
+#endif  // V8_HOST_ARCH
+
+        if (thread_get_state(profiled_thread_,
+                             flavor,
+                             reinterpret_cast<natural_t*>(&state),
+                             &count) == KERN_SUCCESS) {
+          sample.pc = state.REGISTER_FIELD(ip);
+          sample.sp = state.REGISTER_FIELD(sp);
+          sample.fp = state.REGISTER_FIELD(bp);
+          sampler_->SampleStack(&sample);
+        }
+        thread_resume(profiled_thread_);
+      }
+
+      // We always sample the VM state.
+      sample.state = Logger::state();
+      // Invoke tick handler with program counter and stack pointer.
+      sampler_->Tick(&sample);
+
+      // Wait until next sampling.
+      usleep(sampler_->interval_ * 1000);
+    }
+  }
+};
+
+#undef REGISTER_FIELD
+
+
+// Entry point for sampler thread.
+static void* SamplerEntry(void* arg) {
+  Sampler::PlatformData* data =
+      reinterpret_cast<Sampler::PlatformData*>(arg);
+  data->Runner();
+  return 0;
+}
+
+
+Sampler::Sampler(int interval, bool profiling)
+    : interval_(interval), profiling_(profiling), active_(false) {
+  data_ = new PlatformData(this);
+}
+
+
+Sampler::~Sampler() {
+  delete data_;
+}
+
+
+void Sampler::Start() {
+  // If we are profiling, we need to be able to access the calling
+  // thread.
+  if (IsProfiling()) {
+    data_->profiled_thread_ = mach_thread_self();
+  }
+
+  // Create sampler thread with high priority.
+  // According to POSIX spec, when SCHED_FIFO policy is used, a thread
+  // runs until it exits or blocks.
+  pthread_attr_t sched_attr;
+  sched_param fifo_param;
+  pthread_attr_init(&sched_attr);
+  pthread_attr_setinheritsched(&sched_attr, PTHREAD_EXPLICIT_SCHED);
+  pthread_attr_setschedpolicy(&sched_attr, SCHED_FIFO);
+  fifo_param.sched_priority = sched_get_priority_max(SCHED_FIFO);
+  pthread_attr_setschedparam(&sched_attr, &fifo_param);
+
+  active_ = true;
+  pthread_create(&data_->sampler_thread_, &sched_attr, SamplerEntry, data_);
+}
+
+
+void Sampler::Stop() {
+  // Seting active to false triggers termination of the sampler
+  // thread.
+  active_ = false;
+
+  // Wait for sampler thread to terminate.
+  pthread_join(data_->sampler_thread_, NULL);
+
+  // Deallocate Mach port for thread.
+  if (IsProfiling()) {
+    mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
+  }
+}
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+} }  // namespace v8::internal
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
new file mode 100644
index 0000000..c0cf7f4
--- /dev/null
+++ b/src/platform-nullos.cc
@@ -0,0 +1,436 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for NULLOS goes here
+
+// Minimal include to get access to abort, fprintf and friends for bootstrapping
+// messages.
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "platform.h"
+
+
+namespace v8 {
+namespace internal {
+
+// Give V8 the opportunity to override the default ceil behaviour.
+double ceiling(double x) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+// Initialize OS class early in the V8 startup.
+void OS::Setup() {
+  // Seed the random number generator.
+  UNIMPLEMENTED();
+}
+
+
+// Returns the accumulated user time for thread.
+int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
+  UNIMPLEMENTED();
+  *secs = 0;
+  *usecs = 0;
+  return 0;
+}
+
+
+// Returns current time as the number of milliseconds since
+// 00:00:00 UTC, January 1, 1970.
+double OS::TimeCurrentMillis() {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+// Returns ticks in microsecond resolution.
+int64_t OS::Ticks() {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+// Returns a string identifying the current timezone taking into
+// account daylight saving.
+const char* OS::LocalTimezone(double time) {
+  UNIMPLEMENTED();
+  return "<none>";
+}
+
+
+// Returns the daylight savings offset in milliseconds for the given time.
+double OS::DaylightSavingsOffset(double time) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+// Returns the local time offset in milliseconds east of UTC without
+// taking daylight savings time into account.
+double OS::LocalTimeOffset() {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+// Print (debug) message to console.
+void OS::Print(const char* format, ...) {
+  UNIMPLEMENTED();
+}
+
+
+// Print (debug) message to console.
+void OS::VPrint(const char* format, va_list args) {
+  // Minimalistic implementation for bootstrapping.
+  vfprintf(stdout, format, args);
+}
+
+
+// Print error message to console.
+void OS::PrintError(const char* format, ...) {
+  // Minimalistic implementation for bootstrapping.
+  va_list args;
+  va_start(args, format);
+  VPrintError(format, args);
+  va_end(args);
+}
+
+
+// Print error message to console.
+void OS::VPrintError(const char* format, va_list args) {
+  // Minimalistic implementation for bootstrapping.
+  vfprintf(stderr, format, args);
+}
+
+
+int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+double OS::nan_value() {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+  UNIMPLEMENTED();
+  return false;
+}
+
+
+size_t OS::AllocateAlignment() {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool executable) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+void OS::Free(void* buf, const size_t length) {
+  // TODO(1240712): potential system call return value which is ignored here.
+  UNIMPLEMENTED();
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+  UNIMPLEMENTED();
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+  UNIMPLEMENTED();
+}
+
+#endif
+
+
+void OS::Sleep(int milliseconds) {
+  UNIMPLEMENTED();
+}
+
+
+void OS::Abort() {
+  // Minimalistic implementation for bootstrapping.
+  abort();
+}
+
+
+void OS::DebugBreak() {
+  UNIMPLEMENTED();
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+void OS::LogSharedLibraryAddresses() {
+  UNIMPLEMENTED();
+}
+
+
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
+  UNIMPLEMENTED();
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  UNIMPLEMENTED();
+}
+
+
+bool VirtualMemory::IsReserved() {
+  UNIMPLEMENTED();
+  return false;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+  UNIMPLEMENTED();
+  return false;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  UNIMPLEMENTED();
+  return false;
+}
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+  explicit PlatformData(ThreadHandle::Kind kind) {
+    UNIMPLEMENTED();
+  }
+
+  void* pd_data_;
+};
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+  UNIMPLEMENTED();
+  // Shared setup follows.
+  data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+  UNIMPLEMENTED();
+}
+
+
+ThreadHandle::~ThreadHandle() {
+  UNIMPLEMENTED();
+  // Shared tear down follows.
+  delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+  UNIMPLEMENTED();
+  return false;
+}
+
+
+bool ThreadHandle::IsValid() const {
+  UNIMPLEMENTED();
+  return false;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+  UNIMPLEMENTED();
+}
+
+
+Thread::~Thread() {
+  UNIMPLEMENTED();
+}
+
+
+void Thread::Start() {
+  UNIMPLEMENTED();
+}
+
+
+void Thread::Join() {
+  UNIMPLEMENTED();
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+  UNIMPLEMENTED();
+  return static_cast<LocalStorageKey>(0);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+  UNIMPLEMENTED();
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+  UNIMPLEMENTED();
+}
+
+
+void Thread::YieldCPU() {
+  UNIMPLEMENTED();
+}
+
+
+class NullMutex : public Mutex {
+ public:
+  NullMutex() : data_(NULL) {
+    UNIMPLEMENTED();
+  }
+
+  virtual ~NullMutex() {
+    UNIMPLEMENTED();
+  }
+
+  virtual int Lock() {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  virtual int Unlock() {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+ private:
+  void* data_;
+};
+
+
+Mutex* OS::CreateMutex() {
+  UNIMPLEMENTED();
+  return new NullMutex();
+}
+
+
+class NullSemaphore : public Semaphore {
+ public:
+  explicit NullSemaphore(int count) : data_(NULL) {
+    UNIMPLEMENTED();
+  }
+
+  virtual ~NullSemaphore() {
+    UNIMPLEMENTED();
+  }
+
+  virtual void Wait() {
+    UNIMPLEMENTED();
+  }
+
+  virtual void Signal() {
+    UNIMPLEMENTED();
+  }
+ private:
+  void* data_;
+};
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+  UNIMPLEMENTED();
+  return new NullSemaphore(count);
+}
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+class ProfileSampler::PlatformData  : public Malloced {
+ public:
+  PlatformData() {
+    UNIMPLEMENTED();
+  }
+};
+
+
+ProfileSampler::ProfileSampler(int interval) {
+  UNIMPLEMENTED();
+  // Shared setup follows.
+  data_ = new PlatformData();
+  interval_ = interval;
+  active_ = false;
+}
+
+
+ProfileSampler::~ProfileSampler() {
+  UNIMPLEMENTED();
+  // Shared tear down follows.
+  delete data_;
+}
+
+
+void ProfileSampler::Start() {
+  UNIMPLEMENTED();
+}
+
+
+void ProfileSampler::Stop() {
+  UNIMPLEMENTED();
+}
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+} }  // namespace v8::internal
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
new file mode 100644
index 0000000..b8fe967
--- /dev/null
+++ b/src/platform-posix.cc
@@ -0,0 +1,367 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for POSIX goes here. This is not a platform on its
+// own but contains the parts which are the same across POSIX platforms Linux,
+// Mac OS and FreeBSD.
+
+#include <unistd.h>
+#include <errno.h>
+#include <time.h>
+
+#include <sys/socket.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <netdb.h>
+
+#if defined(ANDROID)
+#define LOG_TAG "v8"
+#include <utils/Log.h>  // LOG_PRI_VA
+#endif
+
+#include "v8.h"
+
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// POSIX date/time support.
+//
+
+int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
+  struct rusage usage;
+
+  if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
+  *secs = usage.ru_utime.tv_sec;
+  *usecs = usage.ru_utime.tv_usec;
+  return 0;
+}
+
+
+double OS::TimeCurrentMillis() {
+  struct timeval tv;
+  if (gettimeofday(&tv, NULL) < 0) return 0.0;
+  return (static_cast<double>(tv.tv_sec) * 1000) +
+         (static_cast<double>(tv.tv_usec) / 1000);
+}
+
+
+int64_t OS::Ticks() {
+  // gettimeofday has microsecond resolution.
+  struct timeval tv;
+  if (gettimeofday(&tv, NULL) < 0)
+    return 0;
+  return (static_cast<int64_t>(tv.tv_sec) * 1000000) + tv.tv_usec;
+}
+
+
+const char* OS::LocalTimezone(double time) {
+  if (isnan(time)) return "";
+  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::DaylightSavingsOffset(double time) {
+  if (isnan(time)) return nan_value();
+  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return nan_value();
+  return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
+}
+
+
+double OS::LocalTimeOffset() {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX stdio support.
+//
+
+FILE* OS::FOpen(const char* path, const char* mode) {
+  return fopen(path, mode);
+}
+
+
+const char* OS::LogFileOpenMode = "w";
+
+
+void OS::Print(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrint(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrint(const char* format, va_list args) {
+#if defined(ANDROID)
+  LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
+#else
+  vprintf(format, args);
+#endif
+}
+
+
+void OS::PrintError(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrintError(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrintError(const char* format, va_list args) {
+#if defined(ANDROID)
+  LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
+#else
+  vfprintf(stderr, format, args);
+#endif
+}
+
+
+int OS::SNPrintF(Vector<char> str, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  int result = VSNPrintF(str, format, args);
+  va_end(args);
+  return result;
+}
+
+
+int OS::VSNPrintF(Vector<char> str,
+                  const char* format,
+                  va_list args) {
+  int n = vsnprintf(str.start(), str.length(), format, args);
+  if (n < 0 || n >= str.length()) {
+    str[str.length() - 1] = '\0';
+    return -1;
+  } else {
+    return n;
+  }
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX string support.
+//
+
+char* OS::StrChr(char* str, int c) {
+  return strchr(str, c);
+}
+
+
+void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
+  strncpy(dest.start(), src, n);
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX socket support.
+//
+
+class POSIXSocket : public Socket {
+ public:
+  explicit POSIXSocket() {
+    // Create the socket.
+    socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+  }
+  explicit POSIXSocket(int socket): socket_(socket) { }
+  virtual ~POSIXSocket() { Shutdown(); }
+
+  // Server initialization.
+  bool Bind(const int port);
+  bool Listen(int backlog) const;
+  Socket* Accept() const;
+
+  // Client initialization.
+  bool Connect(const char* host, const char* port);
+
+  // Shutdown socket for both read and write.
+  bool Shutdown();
+
+  // Data Transimission
+  int Send(const char* data, int len) const;
+  int Receive(char* data, int len) const;
+
+  bool SetReuseAddress(bool reuse_address);
+
+  bool IsValid() const { return socket_ != -1; }
+
+ private:
+  int socket_;
+};
+
+
+bool POSIXSocket::Bind(const int port) {
+  if (!IsValid())  {
+    return false;
+  }
+
+  sockaddr_in addr;
+  memset(&addr, 0, sizeof(addr));
+  addr.sin_family = AF_INET;
+  addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+  addr.sin_port = htons(port);
+  int status = bind(socket_,
+                    reinterpret_cast<struct sockaddr *>(&addr),
+                    sizeof(addr));
+  return status == 0;
+}
+
+
+bool POSIXSocket::Listen(int backlog) const {
+  if (!IsValid()) {
+    return false;
+  }
+
+  int status = listen(socket_, backlog);
+  return status == 0;
+}
+
+
+Socket* POSIXSocket::Accept() const {
+  if (!IsValid()) {
+    return NULL;
+  }
+
+  int socket = accept(socket_, NULL, NULL);
+  if (socket == -1) {
+    return NULL;
+  } else {
+    return new POSIXSocket(socket);
+  }
+}
+
+
+bool POSIXSocket::Connect(const char* host, const char* port) {
+  if (!IsValid()) {
+    return false;
+  }
+
+  // Lookup host and port.
+  struct addrinfo *result = NULL;
+  struct addrinfo hints;
+  memset(&hints, 0, sizeof(addrinfo));
+  hints.ai_family = AF_INET;
+  hints.ai_socktype = SOCK_STREAM;
+  hints.ai_protocol = IPPROTO_TCP;
+  int status = getaddrinfo(host, port, &hints, &result);
+  if (status != 0) {
+    return false;
+  }
+
+  // Connect.
+  status = connect(socket_, result->ai_addr, result->ai_addrlen);
+  freeaddrinfo(result);
+  return status == 0;
+}
+
+
+bool POSIXSocket::Shutdown() {
+  if (IsValid()) {
+    // Shutdown socket for both read and write.
+    int status = shutdown(socket_, SHUT_RDWR);
+    close(socket_);
+    socket_ = -1;
+    return status == 0;
+  }
+  return true;
+}
+
+
+int POSIXSocket::Send(const char* data, int len) const {
+  int status = send(socket_, data, len, 0);
+  return status;
+}
+
+
+int POSIXSocket::Receive(char* data, int len) const {
+  int status = recv(socket_, data, len, 0);
+  return status;
+}
+
+
+bool POSIXSocket::SetReuseAddress(bool reuse_address) {
+  int on = reuse_address ? 1 : 0;
+  int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
+  return status == 0;
+}
+
+
+bool Socket::Setup() {
+  // Nothing to do on POSIX.
+  return true;
+}
+
+
+int Socket::LastError() {
+  return errno;
+}
+
+
+uint16_t Socket::HToN(uint16_t value) {
+  return htons(value);
+}
+
+
+uint16_t Socket::NToH(uint16_t value) {
+  return ntohs(value);
+}
+
+
+uint32_t Socket::HToN(uint32_t value) {
+  return htonl(value);
+}
+
+
+uint32_t Socket::NToH(uint32_t value) {
+  return ntohl(value);
+}
+
+
+Socket* OS::CreateSocket() {
+  return new POSIXSocket();
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
new file mode 100644
index 0000000..d4a183d
--- /dev/null
+++ b/src/platform-win32.cc
@@ -0,0 +1,1890 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for Win32.
+#ifndef WIN32_LEAN_AND_MEAN
+// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
+#define WIN32_LEAN_AND_MEAN
+#endif
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#ifndef NOKERNEL
+#define NOKERNEL
+#endif
+#ifndef NOUSER
+#define NOUSER
+#endif
+#ifndef NOSERVICE
+#define NOSERVICE
+#endif
+#ifndef NOSOUND
+#define NOSOUND
+#endif
+#ifndef NOMCX
+#define NOMCX
+#endif
+// Require Windows 2000 or higher (this is required for the IsDebuggerPresent
+// function to be present).
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT 0x500
+#endif
+
+#include <windows.h>
+
+#include <time.h>  // For LocalOffset() implementation.
+#include <mmsystem.h>  // For timeGetTime().
+#ifdef __MINGW32__
+// Require Windows XP or higher when compiling with MinGW. This is for MinGW
+// header files to expose getaddrinfo.
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x501
+#endif  // __MINGW32__
+#ifndef __MINGW32__
+#include <dbghelp.h>  // For SymLoadModule64 and al.
+#endif  // __MINGW32__
+#include <limits.h>  // For INT_MAX and al.
+#include <tlhelp32.h>  // For Module32First and al.
+
+// These additional WIN32 includes have to be right here as the #undef's below
+// makes it impossible to have them elsewhere.
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <process.h>  // for _beginthreadex()
+#include <stdlib.h>
+
+#undef VOID
+#undef DELETE
+#undef IN
+#undef THIS
+#undef CONST
+#undef NAN
+#undef GetObject
+#undef CreateMutex
+#undef CreateSemaphore
+
+#include "v8.h"
+
+#include "platform.h"
+
+// Extra POSIX/ANSI routines for Win32 when when using Visual Studio C++. Please
+// refer to The Open Group Base Specification for specification of the correct
+// semantics for these functions.
+// (http://www.opengroup.org/onlinepubs/000095399/)
+#ifdef _MSC_VER
+
+namespace v8 {
+namespace internal {
+
+// Test for finite value - usually defined in math.h
+int isfinite(double x) {
+  return _finite(x);
+}
+
+}  // namespace v8
+}  // namespace internal
+
+// Test for a NaN (not a number) value - usually defined in math.h
+int isnan(double x) {
+  return _isnan(x);
+}
+
+
+// Test for infinity - usually defined in math.h
+int isinf(double x) {
+  return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
+}
+
+
+// Test if x is less than y and both nominal - usually defined in math.h
+int isless(double x, double y) {
+  return isnan(x) || isnan(y) ? 0 : x < y;
+}
+
+
+// Test if x is greater than y and both nominal - usually defined in math.h
+int isgreater(double x, double y) {
+  return isnan(x) || isnan(y) ? 0 : x > y;
+}
+
+
+// Classify floating point number - usually defined in math.h
+int fpclassify(double x) {
+  // Use the MS-specific _fpclass() for classification.
+  int flags = _fpclass(x);
+
+  // Determine class. We cannot use a switch statement because
+  // the _FPCLASS_ constants are defined as flags.
+  if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
+  if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
+  if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
+  if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
+
+  // All cases should be covered by the code above.
+  ASSERT(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
+  return FP_NAN;
+}
+
+
+// Test sign - usually defined in math.h
+int signbit(double x) {
+  // We need to take care of the special case of both positive
+  // and negative versions of zero.
+  if (x == 0)
+    return _fpclass(x) & _FPCLASS_NZ;
+  else
+    return x < 0;
+}
+
+
+// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually
+// defined in strings.h.
+int strncasecmp(const char* s1, const char* s2, int n) {
+  return _strnicmp(s1, s2, n);
+}
+
+#endif  // _MSC_VER
+
+
+// Extra functions for MinGW. Most of these are the _s functions which are in
+// the Microsoft Visual Studio C++ CRT.
+#ifdef __MINGW32__
+
+int localtime_s(tm* out_tm, const time_t* time) {
+  tm* posix_local_time_struct = localtime(time);
+  if (posix_local_time_struct == NULL) return 1;
+  *out_tm = *posix_local_time_struct;
+  return 0;
+}
+
+
+// Not sure this the correct interpretation of _mkgmtime
+time_t _mkgmtime(tm* timeptr) {
+  return mktime(timeptr);
+}
+
+
+int fopen_s(FILE** pFile, const char* filename, const char* mode) {
+  *pFile = fopen(filename, mode);
+  return *pFile != NULL ? 0 : 1;
+}
+
+
+int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
+                 const char* format, va_list argptr) {
+  return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
+}
+#define _TRUNCATE 0
+
+
+int strncpy_s(char* strDest, size_t numberOfElements,
+              const char* strSource, size_t count) {
+  strncpy(strDest, strSource, count);
+  return 0;
+}
+
+#endif  // __MINGW32__
+
+// Generate a pseudo-random number in the range 0-2^31-1. Usually
+// defined in stdlib.h. Missing in both Microsoft Visual Studio C++ and MinGW.
+int random() {
+  return rand();
+}
+
+
+namespace v8 {
+namespace internal {
+
+double ceiling(double x) {
+  return ceil(x);
+}
+
+// ----------------------------------------------------------------------------
+// The Time class represents time on win32. A timestamp is represented as
+// a 64-bit integer in 100 nano-seconds since January 1, 1601 (UTC). JavaScript
+// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
+// January 1, 1970.
+
+class Time {
+ public:
+  // Constructors.
+  Time();
+  explicit Time(double jstime);
+  Time(int year, int mon, int day, int hour, int min, int sec);
+
+  // Convert timestamp to JavaScript representation.
+  double ToJSTime();
+
+  // Set timestamp to current time.
+  void SetToCurrentTime();
+
+  // Returns the local timezone offset in milliseconds east of UTC. This is
+  // the number of milliseconds you must add to UTC to get local time, i.e.
+  // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
+  // routine also takes into account whether daylight saving is effect
+  // at the time.
+  int64_t LocalOffset();
+
+  // Returns the daylight savings time offset for the time in milliseconds.
+  int64_t DaylightSavingsOffset();
+
+  // Returns a string identifying the current timezone for the
+  // timestamp taking into account daylight saving.
+  char* LocalTimezone();
+
+ private:
+  // Constants for time conversion.
+  static const int64_t kTimeEpoc = 116444736000000000LL;
+  static const int64_t kTimeScaler = 10000;
+  static const int64_t kMsPerMinute = 60000;
+
+  // Constants for timezone information.
+  static const int kTzNameSize = 128;
+  static const bool kShortTzNames = false;
+
+  // Timezone information. We need to have static buffers for the
+  // timezone names because we return pointers to these in
+  // LocalTimezone().
+  static bool tz_initialized_;
+  static TIME_ZONE_INFORMATION tzinfo_;
+  static char std_tz_name_[kTzNameSize];
+  static char dst_tz_name_[kTzNameSize];
+
+  // Initialize the timezone information (if not already done).
+  static void TzSet();
+
+  // Guess the name of the timezone from the bias.
+  static const char* GuessTimezoneNameFromBias(int bias);
+
+  // Return whether or not daylight savings time is in effect at this time.
+  bool InDST();
+
+  // Return the difference (in milliseconds) between this timestamp and
+  // another timestamp.
+  int64_t Diff(Time* other);
+
+  // Accessor for FILETIME representation.
+  FILETIME& ft() { return time_.ft_; }
+
+  // Accessor for integer representation.
+  int64_t& t() { return time_.t_; }
+
+  // Although win32 uses 64-bit integers for representing timestamps,
+  // these are packed into a FILETIME structure. The FILETIME structure
+  // is just a struct representing a 64-bit integer. The TimeStamp union
+  // allows access to both a FILETIME and an integer representation of
+  // the timestamp.
+  union TimeStamp {
+    FILETIME ft_;
+    int64_t t_;
+  };
+
+  TimeStamp time_;
+};
+
+// Static variables.
+bool Time::tz_initialized_ = false;
+TIME_ZONE_INFORMATION Time::tzinfo_;
+char Time::std_tz_name_[kTzNameSize];
+char Time::dst_tz_name_[kTzNameSize];
+
+
+// Initialize timestamp to start of epoc.
+Time::Time() {
+  t() = 0;
+}
+
+
+// Initialize timestamp from a JavaScript timestamp.
+Time::Time(double jstime) {
+  t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
+}
+
+
+// Initialize timestamp from date/time components.
+Time::Time(int year, int mon, int day, int hour, int min, int sec) {
+  SYSTEMTIME st;
+  st.wYear = year;
+  st.wMonth = mon;
+  st.wDay = day;
+  st.wHour = hour;
+  st.wMinute = min;
+  st.wSecond = sec;
+  st.wMilliseconds = 0;
+  SystemTimeToFileTime(&st, &ft());
+}
+
+
+// Convert timestamp to JavaScript timestamp.
+double Time::ToJSTime() {
+  return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
+}
+
+
+// Guess the name of the timezone from the bias.
+// The guess is very biased towards the northern hemisphere.
+const char* Time::GuessTimezoneNameFromBias(int bias) {
+  static const int kHour = 60;
+  switch (-bias) {
+    case -9*kHour: return "Alaska";
+    case -8*kHour: return "Pacific";
+    case -7*kHour: return "Mountain";
+    case -6*kHour: return "Central";
+    case -5*kHour: return "Eastern";
+    case -4*kHour: return "Atlantic";
+    case  0*kHour: return "GMT";
+    case +1*kHour: return "Central Europe";
+    case +2*kHour: return "Eastern Europe";
+    case +3*kHour: return "Russia";
+    case +5*kHour + 30: return "India";
+    case +8*kHour: return "China";
+    case +9*kHour: return "Japan";
+    case +12*kHour: return "New Zealand";
+    default: return "Local";
+  }
+}
+
+
+// Initialize timezone information. The timezone information is obtained from
+// windows. If we cannot get the timezone information we fall back to CET.
+// Please notice that this code is not thread-safe.
+void Time::TzSet() {
+  // Just return if timezone information has already been initialized.
+  if (tz_initialized_) return;
+
+  // Initialize POSIX time zone data.
+  _tzset();
+  // Obtain timezone information from operating system.
+  memset(&tzinfo_, 0, sizeof(tzinfo_));
+  if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
+    // If we cannot get timezone information we fall back to CET.
+    tzinfo_.Bias = -60;
+    tzinfo_.StandardDate.wMonth = 10;
+    tzinfo_.StandardDate.wDay = 5;
+    tzinfo_.StandardDate.wHour = 3;
+    tzinfo_.StandardBias = 0;
+    tzinfo_.DaylightDate.wMonth = 3;
+    tzinfo_.DaylightDate.wDay = 5;
+    tzinfo_.DaylightDate.wHour = 2;
+    tzinfo_.DaylightBias = -60;
+  }
+
+  // Make standard and DST timezone names.
+  OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize),
+               "%S",
+               tzinfo_.StandardName);
+  std_tz_name_[kTzNameSize - 1] = '\0';
+  OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize),
+               "%S",
+               tzinfo_.DaylightName);
+  dst_tz_name_[kTzNameSize - 1] = '\0';
+
+  // If OS returned empty string or resource id (like "@tzres.dll,-211")
+  // simply guess the name from the UTC bias of the timezone.
+  // To properly resolve the resource identifier requires a library load,
+  // which is not possible in a sandbox.
+  if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
+    OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize - 1),
+                 "%s Standard Time",
+                 GuessTimezoneNameFromBias(tzinfo_.Bias));
+  }
+  if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
+    OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize - 1),
+                 "%s Daylight Time",
+                 GuessTimezoneNameFromBias(tzinfo_.Bias));
+  }
+
+  // Timezone information initialized.
+  tz_initialized_ = true;
+}
+
+
+// Return the difference in milliseconds between this and another timestamp.
+int64_t Time::Diff(Time* other) {
+  return (t() - other->t()) / kTimeScaler;
+}
+
+
+// Set timestamp to current time.
+void Time::SetToCurrentTime() {
+  // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
+  // Because we're fast, we like fast timers which have at least a
+  // 1ms resolution.
+  //
+  // timeGetTime() provides 1ms granularity when combined with
+  // timeBeginPeriod().  If the host application for v8 wants fast
+  // timers, it can use timeBeginPeriod to increase the resolution.
+  //
+  // Using timeGetTime() has a drawback because it is a 32bit value
+  // and hence rolls-over every ~49days.
+  //
+  // To use the clock, we use GetSystemTimeAsFileTime as our base;
+  // and then use timeGetTime to extrapolate current time from the
+  // start time.  To deal with rollovers, we resync the clock
+  // any time when more than kMaxClockElapsedTime has passed or
+  // whenever timeGetTime creates a rollover.
+
+  static bool initialized = false;
+  static TimeStamp init_time;
+  static DWORD init_ticks;
+  static const int64_t kHundredNanosecondsPerSecond = 10000000;
+  static const int64_t kMaxClockElapsedTime =
+      60*kHundredNanosecondsPerSecond;  // 1 minute
+
+  // If we are uninitialized, we need to resync the clock.
+  bool needs_resync = !initialized;
+
+  // Get the current time.
+  TimeStamp time_now;
+  GetSystemTimeAsFileTime(&time_now.ft_);
+  DWORD ticks_now = timeGetTime();
+
+  // Check if we need to resync due to clock rollover.
+  needs_resync |= ticks_now < init_ticks;
+
+  // Check if we need to resync due to elapsed time.
+  needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
+
+  // Resync the clock if necessary.
+  if (needs_resync) {
+    GetSystemTimeAsFileTime(&init_time.ft_);
+    init_ticks = ticks_now = timeGetTime();
+    initialized = true;
+  }
+
+  // Finally, compute the actual time.  Why is this so hard.
+  DWORD elapsed = ticks_now - init_ticks;
+  this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
+}
+
+
+// Return the local timezone offset in milliseconds east of UTC. This
+// takes into account whether daylight saving is in effect at the time.
+// Only times in the 32-bit Unix range may be passed to this function.
+// Also, adding the time-zone offset to the input must not overflow.
+// The function EquivalentTime() in date-delay.js guarantees this.
+int64_t Time::LocalOffset() {
+  // Initialize timezone information, if needed.
+  TzSet();
+
+  Time rounded_to_second(*this);
+  rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
+      1000 * kTimeScaler;
+  // Convert to local time using POSIX localtime function.
+  // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
+  // very slow.  Other browsers use localtime().
+
+  // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
+  // POSIX seconds past 1/1/1970 0:00:00.
+  double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
+  if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
+    return 0;
+  }
+  // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
+  time_t posix_time = static_cast<time_t>(unchecked_posix_time);
+
+  // Convert to local time, as struct with fields for day, hour, year, etc.
+  tm posix_local_time_struct;
+  if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
+  // Convert local time in struct to POSIX time as if it were a UTC time.
+  time_t local_posix_time = _mkgmtime(&posix_local_time_struct);
+  Time localtime(1000.0 * local_posix_time);
+
+  return localtime.Diff(&rounded_to_second);
+}
+
+
+// Return whether or not daylight savings time is in effect at this time.
+bool Time::InDST() {
+  // Initialize timezone information, if needed.
+  TzSet();
+
+  // Determine if DST is in effect at the specified time.
+  bool in_dst = false;
+  if (tzinfo_.StandardDate.wMonth != 0 || tzinfo_.DaylightDate.wMonth != 0) {
+    // Get the local timezone offset for the timestamp in milliseconds.
+    int64_t offset = LocalOffset();
+
+    // Compute the offset for DST. The bias parameters in the timezone info
+    // are specified in minutes. These must be converted to milliseconds.
+    int64_t dstofs = -(tzinfo_.Bias + tzinfo_.DaylightBias) * kMsPerMinute;
+
+    // If the local time offset equals the timezone bias plus the daylight
+    // bias then DST is in effect.
+    in_dst = offset == dstofs;
+  }
+
+  return in_dst;
+}
+
+
+// Return the daylight savings time offset for this time.
+int64_t Time::DaylightSavingsOffset() {
+  return InDST() ? 60 * kMsPerMinute : 0;
+}
+
+
+// Returns a string identifying the current timezone for the
+// timestamp taking into account daylight saving.
+char* Time::LocalTimezone() {
+  // Return the standard or DST time zone name based on whether daylight
+  // saving is in effect at the given time.
+  return InDST() ? dst_tz_name_ : std_tz_name_;
+}
+
+
+void OS::Setup() {
+  // Seed the random number generator.
+  // Convert the current time to a 64-bit integer first, before converting it
+  // to an unsigned. Going directly can cause an overflow and the seed to be
+  // set to all ones. The seed will be identical for different instances that
+  // call this setup code within the same millisecond.
+  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+  srand(static_cast<unsigned int>(seed));
+}
+
+
+// Returns the accumulated user time for thread.
+int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
+  FILETIME dummy;
+  uint64_t usertime;
+
+  // Get the amount of time that the thread has executed in user mode.
+  if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy,
+                      reinterpret_cast<FILETIME*>(&usertime))) return -1;
+
+  // Adjust the resolution to micro-seconds.
+  usertime /= 10;
+
+  // Convert to seconds and microseconds
+  *secs = static_cast<uint32_t>(usertime / 1000000);
+  *usecs = static_cast<uint32_t>(usertime % 1000000);
+  return 0;
+}
+
+
+// Returns current time as the number of milliseconds since
+// 00:00:00 UTC, January 1, 1970.
+double OS::TimeCurrentMillis() {
+  Time t;
+  t.SetToCurrentTime();
+  return t.ToJSTime();
+}
+
+// Returns the tickcounter based on timeGetTime.
+int64_t OS::Ticks() {
+  return timeGetTime() * 1000;  // Convert to microseconds.
+}
+
+
+// Returns a string identifying the current timezone taking into
+// account daylight saving.
+const char* OS::LocalTimezone(double time) {
+  return Time(time).LocalTimezone();
+}
+
+
+// Returns the local time offset in milliseconds east of UTC without
+// taking daylight savings time into account.
+double OS::LocalTimeOffset() {
+  // Use current time, rounded to the millisecond.
+  Time t(TimeCurrentMillis());
+  // Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
+  return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
+}
+
+
+// Returns the daylight savings offset in milliseconds for the given
+// time.
+double OS::DaylightSavingsOffset(double time) {
+  int64_t offset = Time(time).DaylightSavingsOffset();
+  return static_cast<double>(offset);
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 console output.
+//
+// If a Win32 application is linked as a console application it has a normal
+// standard output and standard error. In this case normal printf works fine
+// for output. However, if the application is linked as a GUI application,
+// the process doesn't have a console, and therefore (debugging) output is lost.
+// This is the case if we are embedded in a windows program (like a browser).
+// In order to be able to get debug output in this case the the debugging
+// facility using OutputDebugString. This output goes to the active debugger
+// for the process (if any). Else the output can be monitored using DBMON.EXE.
+
+enum OutputMode {
+  UNKNOWN,  // Output method has not yet been determined.
+  CONSOLE,  // Output is written to stdout.
+  ODS       // Output is written to debug facility.
+};
+
+static OutputMode output_mode = UNKNOWN;  // Current output mode.
+
+
+// Determine if the process has a console for output.
+static bool HasConsole() {
+  // Only check the first time. Eventual race conditions are not a problem,
+  // because all threads will eventually determine the same mode.
+  if (output_mode == UNKNOWN) {
+    // We cannot just check that the standard output is attached to a console
+    // because this would fail if output is redirected to a file. Therefore we
+    // say that a process does not have an output console if either the
+    // standard output handle is invalid or its file type is unknown.
+    if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE &&
+        GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN)
+      output_mode = CONSOLE;
+    else
+      output_mode = ODS;
+  }
+  return output_mode == CONSOLE;
+}
+
+
+static void VPrintHelper(FILE* stream, const char* format, va_list args) {
+  if (HasConsole()) {
+    vfprintf(stream, format, args);
+  } else {
+    // It is important to use safe print here in order to avoid
+    // overflowing the buffer. We might truncate the output, but this
+    // does not crash.
+    EmbeddedVector<char, 4096> buffer;
+    OS::VSNPrintF(buffer, format, args);
+    OutputDebugStringA(buffer.start());
+  }
+}
+
+
+FILE* OS::FOpen(const char* path, const char* mode) {
+  FILE* result;
+  if (fopen_s(&result, path, mode) == 0) {
+    return result;
+  } else {
+    return NULL;
+  }
+}
+
+
+// Open log file in binary mode to avoid /n -> /r/n conversion.
+const char* OS::LogFileOpenMode = "wb";
+
+
+// Print (debug) message to console.
+void OS::Print(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrint(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrint(const char* format, va_list args) {
+  VPrintHelper(stdout, format, args);
+}
+
+
+// Print error message to console.
+void OS::PrintError(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrintError(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrintError(const char* format, va_list args) {
+  VPrintHelper(stderr, format, args);
+}
+
+
+int OS::SNPrintF(Vector<char> str, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  int result = VSNPrintF(str, format, args);
+  va_end(args);
+  return result;
+}
+
+
+int OS::VSNPrintF(Vector<char> str, const char* format, va_list args) {
+  int n = _vsnprintf_s(str.start(), str.length(), _TRUNCATE, format, args);
+  // Make sure to zero-terminate the string if the output was
+  // truncated or if there was an error.
+  if (n < 0 || n >= str.length()) {
+    str[str.length() - 1] = '\0';
+    return -1;
+  } else {
+    return n;
+  }
+}
+
+
+char* OS::StrChr(char* str, int c) {
+  return const_cast<char*>(strchr(str, c));
+}
+
+
+void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
+  int result = strncpy_s(dest.start(), dest.length(), src, n);
+  USE(result);
+  ASSERT(result == 0);
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification).  The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap.  The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+  lowest_ever_allocated = Min(lowest_ever_allocated, address);
+  highest_ever_allocated =
+      Max(highest_ever_allocated,
+          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* pointer) {
+  if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated)
+    return true;
+  // Ask the Windows API
+  if (IsBadWritePtr(pointer, 1))
+    return true;
+  return false;
+}
+
+
+// Get the system's page size used by VirtualAlloc() or the next power
+// of two. The reason for always returning a power of two is that the
+// rounding up in OS::Allocate expects that.
+static size_t GetPageSize() {
+  static size_t page_size = 0;
+  if (page_size == 0) {
+    SYSTEM_INFO info;
+    GetSystemInfo(&info);
+    page_size = RoundUpToPowerOf2(info.dwPageSize);
+  }
+  return page_size;
+}
+
+
+// The allocation alignment is the guaranteed alignment for
+// VirtualAlloc'ed blocks of memory.
+size_t OS::AllocateAlignment() {
+  static size_t allocate_alignment = 0;
+  if (allocate_alignment == 0) {
+    SYSTEM_INFO info;
+    GetSystemInfo(&info);
+    allocate_alignment = info.dwAllocationGranularity;
+  }
+  return allocate_alignment;
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  // VirtualAlloc rounds allocated size to page size automatically.
+  size_t msize = RoundUp(requested, GetPageSize());
+
+  // Windows XP SP2 allows Data Excution Prevention (DEP).
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  LPVOID mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot);
+  if (mbase == NULL) {
+    LOG(StringEvent("OS::Allocate", "VirtualAlloc failed"));
+    return NULL;
+  }
+
+  ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
+
+  *allocated = msize;
+  UpdateAllocatedSpaceLimits(mbase, msize);
+  return mbase;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+  // TODO(1240712): VirtualFree has a return value which is ignored here.
+  VirtualFree(address, 0, MEM_RELEASE);
+  USE(size);
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+  // TODO(1240712): VirtualProtect has a return value which is ignored here.
+  DWORD old_protect;
+  VirtualProtect(address, size, PAGE_READONLY, &old_protect);
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+  // TODO(1240712): VirtualProtect has a return value which is ignored here.
+  DWORD new_protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  DWORD old_protect;
+  VirtualProtect(address, size, new_protect, &old_protect);
+}
+
+#endif
+
+
+void OS::Sleep(int milliseconds) {
+  ::Sleep(milliseconds);
+}
+
+
+void OS::Abort() {
+  if (!IsDebuggerPresent()) {
+#ifdef _MSC_VER
+    // Make the MSVCRT do a silent abort.
+    _set_abort_behavior(0, _WRITE_ABORT_MSG);
+    _set_abort_behavior(0, _CALL_REPORTFAULT);
+#endif  // _MSC_VER
+    abort();
+  } else {
+    DebugBreak();
+  }
+}
+
+
+void OS::DebugBreak() {
+#ifdef _MSC_VER
+  __debugbreak();
+#else
+  ::DebugBreak();
+#endif
+}
+
+
+class Win32MemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  Win32MemoryMappedFile(HANDLE file, HANDLE file_mapping, void* memory)
+    : file_(file), file_mapping_(file_mapping), memory_(memory) { }
+  virtual ~Win32MemoryMappedFile();
+  virtual void* memory() { return memory_; }
+ private:
+  HANDLE file_;
+  HANDLE file_mapping_;
+  void* memory_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  // Open a physical file
+  HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
+      FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
+  if (file == NULL) return NULL;
+  // Create a file mapping for the physical file
+  HANDLE file_mapping = CreateFileMapping(file, NULL,
+      PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
+  if (file_mapping == NULL) return NULL;
+  // Map a view of the file into memory
+  void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
+  if (memory) memmove(memory, initial, size);
+  return new Win32MemoryMappedFile(file, file_mapping, memory);
+}
+
+
+Win32MemoryMappedFile::~Win32MemoryMappedFile() {
+  if (memory_ != NULL)
+    UnmapViewOfFile(memory_);
+  CloseHandle(file_mapping_);
+  CloseHandle(file_);
+}
+
+
+// The following code loads functions defined in DbhHelp.h and TlHelp32.h
+// dynamically. This is to avoid being depending on dbghelp.dll and
+// tlhelp32.dll when running (the functions in tlhelp32.dll have been moved to
+// kernel32.dll at some point so loading functions defines in TlHelp32.h
+// dynamically might not be necessary any more - for some versions of Windows?).
+
+// Function pointers to functions dynamically loaded from dbghelp.dll.
+#define DBGHELP_FUNCTION_LIST(V)  \
+  V(SymInitialize)                \
+  V(SymGetOptions)                \
+  V(SymSetOptions)                \
+  V(SymGetSearchPath)             \
+  V(SymLoadModule64)              \
+  V(StackWalk64)                  \
+  V(SymGetSymFromAddr64)          \
+  V(SymGetLineFromAddr64)         \
+  V(SymFunctionTableAccess64)     \
+  V(SymGetModuleBase64)
+
+// Function pointers to functions dynamically loaded from dbghelp.dll.
+#define TLHELP32_FUNCTION_LIST(V)  \
+  V(CreateToolhelp32Snapshot)      \
+  V(Module32FirstW)                \
+  V(Module32NextW)
+
+// Define the decoration to use for the type and variable name used for
+// dynamically loaded DLL function..
+#define DLL_FUNC_TYPE(name) _##name##_
+#define DLL_FUNC_VAR(name) _##name
+
+// Define the type for each dynamically loaded DLL function. The function
+// definitions are copied from DbgHelp.h and TlHelp32.h. The IN and VOID macros
+// from the Windows include files are redefined here to have the function
+// definitions to be as close to the ones in the original .h files as possible.
+#ifndef IN
+#define IN
+#endif
+#ifndef VOID
+#define VOID void
+#endif
+
+// DbgHelp isn't supported on MinGW yet
+#ifndef __MINGW32__
+// DbgHelp.h functions.
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess,
+                                                       IN PSTR UserSearchPath,
+                                                       IN BOOL fInvadeProcess);
+typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID);
+typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))(
+    IN HANDLE hProcess,
+    OUT PSTR SearchPath,
+    IN DWORD SearchPathLength);
+typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))(
+    IN HANDLE hProcess,
+    IN HANDLE hFile,
+    IN PSTR ImageName,
+    IN PSTR ModuleName,
+    IN DWORD64 BaseOfDll,
+    IN DWORD SizeOfDll);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))(
+    DWORD MachineType,
+    HANDLE hProcess,
+    HANDLE hThread,
+    LPSTACKFRAME64 StackFrame,
+    PVOID ContextRecord,
+    PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+    PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+    PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+    PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))(
+    IN HANDLE hProcess,
+    IN DWORD64 qwAddr,
+    OUT PDWORD64 pdwDisplacement,
+    OUT PIMAGEHLP_SYMBOL64 Symbol);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))(
+    IN HANDLE hProcess,
+    IN DWORD64 qwAddr,
+    OUT PDWORD pdwDisplacement,
+    OUT PIMAGEHLP_LINE64 Line64);
+// DbgHelp.h typedefs. Implementation found in dbghelp.dll.
+typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))(
+    HANDLE hProcess,
+    DWORD64 AddrBase);  // DbgHelp.h typedef PFUNCTION_TABLE_ACCESS_ROUTINE64
+typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))(
+    HANDLE hProcess,
+    DWORD64 AddrBase);  // DbgHelp.h typedef PGET_MODULE_BASE_ROUTINE64
+
+// TlHelp32.h functions.
+typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(
+    DWORD dwFlags,
+    DWORD th32ProcessID);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot,
+                                                        LPMODULEENTRY32W lpme);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot,
+                                                       LPMODULEENTRY32W lpme);
+
+#undef IN
+#undef VOID
+
+// Declare a variable for each dynamically loaded DLL function.
+#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
+DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION)
+TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION)
+#undef DEF_DLL_FUNCTION
+
+// Load the functions. This function has a lot of "ugly" macros in order to
+// keep down code duplication.
+
+static bool LoadDbgHelpAndTlHelp32() {
+  static bool dbghelp_loaded = false;
+
+  if (dbghelp_loaded) return true;
+
+  HMODULE module;
+
+  // Load functions from the dbghelp.dll module.
+  module = LoadLibrary(TEXT("dbghelp.dll"));
+  if (module == NULL) {
+    return false;
+  }
+
+#define LOAD_DLL_FUNC(name)                                                 \
+  DLL_FUNC_VAR(name) =                                                      \
+      reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
+
+DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC)
+
+#undef LOAD_DLL_FUNC
+
+  // Load functions from the kernel32.dll module (the TlHelp32.h function used
+  // to be in tlhelp32.dll but are now moved to kernel32.dll).
+  module = LoadLibrary(TEXT("kernel32.dll"));
+  if (module == NULL) {
+    return false;
+  }
+
+#define LOAD_DLL_FUNC(name)                                                 \
+  DLL_FUNC_VAR(name) =                                                      \
+      reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
+
+TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC)
+
+#undef LOAD_DLL_FUNC
+
+  // Check that all functions where loaded.
+  bool result =
+#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
+
+DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
+TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
+
+#undef DLL_FUNC_LOADED
+  true;
+
+  dbghelp_loaded = result;
+  return result;
+  // NOTE: The modules are never unloaded and will stay around until the
+  // application is closed.
+}
+
+
+// Load the symbols for generating stack traces.
+static bool LoadSymbols(HANDLE process_handle) {
+  static bool symbols_loaded = false;
+
+  if (symbols_loaded) return true;
+
+  BOOL ok;
+
+  // Initialize the symbol engine.
+  ok = _SymInitialize(process_handle,  // hProcess
+                      NULL,            // UserSearchPath
+                      FALSE);          // fInvadeProcess
+  if (!ok) return false;
+
+  DWORD options = _SymGetOptions();
+  options |= SYMOPT_LOAD_LINES;
+  options |= SYMOPT_FAIL_CRITICAL_ERRORS;
+  options = _SymSetOptions(options);
+
+  char buf[OS::kStackWalkMaxNameLen] = {0};
+  ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen);
+  if (!ok) {
+    int err = GetLastError();
+    PrintF("%d\n", err);
+    return false;
+  }
+
+  HANDLE snapshot = _CreateToolhelp32Snapshot(
+      TH32CS_SNAPMODULE,       // dwFlags
+      GetCurrentProcessId());  // th32ProcessId
+  if (snapshot == INVALID_HANDLE_VALUE) return false;
+  MODULEENTRY32W module_entry;
+  module_entry.dwSize = sizeof(module_entry);  // Set the size of the structure.
+  BOOL cont = _Module32FirstW(snapshot, &module_entry);
+  while (cont) {
+    DWORD64 base;
+    // NOTE the SymLoadModule64 function has the peculiarity of accepting a
+    // both unicode and ASCII strings even though the parameter is PSTR.
+    base = _SymLoadModule64(
+        process_handle,                                       // hProcess
+        0,                                                    // hFile
+        reinterpret_cast<PSTR>(module_entry.szExePath),       // ImageName
+        reinterpret_cast<PSTR>(module_entry.szModule),        // ModuleName
+        reinterpret_cast<DWORD64>(module_entry.modBaseAddr),  // BaseOfDll
+        module_entry.modBaseSize);                            // SizeOfDll
+    if (base == 0) {
+      int err = GetLastError();
+      if (err != ERROR_MOD_NOT_FOUND &&
+          err != ERROR_INVALID_HANDLE) return false;
+    }
+    LOG(SharedLibraryEvent(
+            module_entry.szExePath,
+            reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
+            reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
+                                           module_entry.modBaseSize)));
+    cont = _Module32NextW(snapshot, &module_entry);
+  }
+  CloseHandle(snapshot);
+
+  symbols_loaded = true;
+  return true;
+}
+
+
+void OS::LogSharedLibraryAddresses() {
+  // SharedLibraryEvents are logged when loading symbol information.
+  // Only the shared libraries loaded at the time of the call to
+  // LogSharedLibraryAddresses are logged.  DLLs loaded after
+  // initialization are not accounted for.
+  if (!LoadDbgHelpAndTlHelp32()) return;
+  HANDLE process_handle = GetCurrentProcess();
+  LoadSymbols(process_handle);
+}
+
+
+// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
+
+// Switch off warning 4748 (/GS can not protect parameters and local variables
+// from local buffer overrun because optimizations are disabled in function) as
+// it is triggered by the use of inline assembler.
+#pragma warning(push)
+#pragma warning(disable : 4748)
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  BOOL ok;
+
+  // Load the required functions from DLL's.
+  if (!LoadDbgHelpAndTlHelp32()) return kStackWalkError;
+
+  // Get the process and thread handles.
+  HANDLE process_handle = GetCurrentProcess();
+  HANDLE thread_handle = GetCurrentThread();
+
+  // Read the symbols.
+  if (!LoadSymbols(process_handle)) return kStackWalkError;
+
+  // Capture current context.
+  CONTEXT context;
+  memset(&context, 0, sizeof(context));
+  context.ContextFlags = CONTEXT_CONTROL;
+  context.ContextFlags = CONTEXT_CONTROL;
+#ifdef  _WIN64
+  // TODO(X64): Implement context capture.
+#else
+  __asm    call x
+  __asm x: pop eax
+  __asm    mov context.Eip, eax
+  __asm    mov context.Ebp, ebp
+  __asm    mov context.Esp, esp
+  // NOTE: At some point, we could use RtlCaptureContext(&context) to
+  // capture the context instead of inline assembler. However it is
+  // only available on XP, Vista, Server 2003 and Server 2008 which
+  // might not be sufficient.
+#endif
+
+  // Initialize the stack walking
+  STACKFRAME64 stack_frame;
+  memset(&stack_frame, 0, sizeof(stack_frame));
+#ifdef  _WIN64
+  stack_frame.AddrPC.Offset = context.Rip;
+  stack_frame.AddrFrame.Offset = context.Rbp;
+  stack_frame.AddrStack.Offset = context.Rsp;
+#else
+  stack_frame.AddrPC.Offset = context.Eip;
+  stack_frame.AddrFrame.Offset = context.Ebp;
+  stack_frame.AddrStack.Offset = context.Esp;
+#endif
+  stack_frame.AddrPC.Mode = AddrModeFlat;
+  stack_frame.AddrFrame.Mode = AddrModeFlat;
+  stack_frame.AddrStack.Mode = AddrModeFlat;
+  int frames_count = 0;
+
+  // Collect stack frames.
+  int frames_size = frames.length();
+  while (frames_count < frames_size) {
+    ok = _StackWalk64(
+        IMAGE_FILE_MACHINE_I386,    // MachineType
+        process_handle,             // hProcess
+        thread_handle,              // hThread
+        &stack_frame,               // StackFrame
+        &context,                   // ContextRecord
+        NULL,                       // ReadMemoryRoutine
+        _SymFunctionTableAccess64,  // FunctionTableAccessRoutine
+        _SymGetModuleBase64,        // GetModuleBaseRoutine
+        NULL);                      // TranslateAddress
+    if (!ok) break;
+
+    // Store the address.
+    ASSERT((stack_frame.AddrPC.Offset >> 32) == 0);  // 32-bit address.
+    frames[frames_count].address =
+        reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
+
+    // Try to locate a symbol for this frame.
+    DWORD64 symbol_displacement;
+    IMAGEHLP_SYMBOL64* symbol = NULL;
+    symbol = NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen);
+    if (!symbol) return kStackWalkError;  // Out of memory.
+    memset(symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
+    symbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
+    symbol->MaxNameLength = kStackWalkMaxNameLen;
+    ok = _SymGetSymFromAddr64(process_handle,             // hProcess
+                              stack_frame.AddrPC.Offset,  // Address
+                              &symbol_displacement,       // Displacement
+                              symbol);                    // Symbol
+    if (ok) {
+      // Try to locate more source information for the symbol.
+      IMAGEHLP_LINE64 Line;
+      memset(&Line, 0, sizeof(Line));
+      Line.SizeOfStruct = sizeof(Line);
+      DWORD line_displacement;
+      ok = _SymGetLineFromAddr64(
+          process_handle,             // hProcess
+          stack_frame.AddrPC.Offset,  // dwAddr
+          &line_displacement,         // pdwDisplacement
+          &Line);                     // Line
+      // Format a text representation of the frame based on the information
+      // available.
+      if (ok) {
+        SNPrintF(MutableCStrVector(frames[frames_count].text,
+                                   kStackWalkMaxTextLen),
+                 "%s %s:%d:%d",
+                 symbol->Name, Line.FileName, Line.LineNumber,
+                 line_displacement);
+      } else {
+        SNPrintF(MutableCStrVector(frames[frames_count].text,
+                                   kStackWalkMaxTextLen),
+                 "%s",
+                 symbol->Name);
+      }
+      // Make sure line termination is in place.
+      frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0';
+    } else {
+      // No text representation of this frame
+      frames[frames_count].text[0] = '\0';
+
+      // Continue if we are just missing a module (for non C/C++ frames a
+      // module will never be found).
+      int err = GetLastError();
+      if (err != ERROR_MOD_NOT_FOUND) {
+        DeleteArray(symbol);
+        break;
+      }
+    }
+    DeleteArray(symbol);
+
+    frames_count++;
+  }
+
+  // Return the number of frames filled in.
+  return frames_count;
+}
+
+// Restore warnings to previous settings.
+#pragma warning(pop)
+
+#else  // __MINGW32__
+void OS::LogSharedLibraryAddresses() { }
+int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
+#endif  // __MINGW32__
+
+
+double OS::nan_value() {
+#ifdef _MSC_VER
+  static const __int64 nanval = 0xfff8000000000000;
+  return *reinterpret_cast<const double*>(&nanval);
+#else  // _MSC_VER
+  return NAN;
+#endif  // _MSC_VER
+}
+
+
+int OS::ActivationFrameAlignment() {
+#ifdef _WIN64
+  return 16;  // Windows 64-bit ABI requires the stack to be 16-byte aligned.
+#else
+  return 8;  // Floating-point math runs faster with 8-byte alignment.
+#endif
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+VirtualMemory::VirtualMemory(size_t size) {
+  address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
+  size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
+  }
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(address, size);
+  return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  ASSERT(IsReserved());
+  return VirtualFree(address, size, MEM_DECOMMIT) != FALSE;
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 thread support.
+
+// Definition of invalid thread handle and id.
+static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
+static const DWORD kNoThreadId = 0;
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+  explicit PlatformData(ThreadHandle::Kind kind) {
+    Initialize(kind);
+  }
+
+  void Initialize(ThreadHandle::Kind kind) {
+    switch (kind) {
+      case ThreadHandle::SELF: tid_ = GetCurrentThreadId(); break;
+      case ThreadHandle::INVALID: tid_ = kNoThreadId; break;
+    }
+  }
+  DWORD tid_;  // Win32 thread identifier.
+};
+
+
+// Entry point for threads. The supplied argument is a pointer to the thread
+// object. The entry function dispatches to the run method in the thread
+// object. It is important that this function has __stdcall calling
+// convention.
+static unsigned int __stdcall ThreadEntry(void* arg) {
+  Thread* thread = reinterpret_cast<Thread*>(arg);
+  // This is also initialized by the last parameter to _beginthreadex() but we
+  // don't know which thread will run first (the original thread or the new
+  // one) so we initialize it here too.
+  thread->thread_handle_data()->tid_ = GetCurrentThreadId();
+  thread->Run();
+  return 0;
+}
+
+
+// Initialize thread handle to invalid handle.
+ThreadHandle::ThreadHandle(ThreadHandle::Kind kind) {
+  data_ = new PlatformData(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+  delete data_;
+}
+
+
+// The thread is running if it has the same id as the current thread.
+bool ThreadHandle::IsSelf() const {
+  return GetCurrentThreadId() == data_->tid_;
+}
+
+
+// Test for invalid thread handle.
+bool ThreadHandle::IsValid() const {
+  return data_->tid_ != kNoThreadId;
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+  data_->Initialize(kind);
+}
+
+
+class Thread::PlatformData : public Malloced {
+ public:
+  explicit PlatformData(HANDLE thread) : thread_(thread) {}
+  HANDLE thread_;
+};
+
+
+// Initialize a Win32 thread object. The thread has an invalid thread
+// handle until it is started.
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+  data_ = new PlatformData(kNoThread);
+}
+
+
+// Close our own handle for the thread.
+Thread::~Thread() {
+  if (data_->thread_ != kNoThread) CloseHandle(data_->thread_);
+  delete data_;
+}
+
+
+// Create a new thread. It is important to use _beginthreadex() instead of
+// the Win32 function CreateThread(), because the CreateThread() does not
+// initialize thread specific structures in the C runtime library.
+void Thread::Start() {
+  data_->thread_ = reinterpret_cast<HANDLE>(
+      _beginthreadex(NULL,
+                     0,
+                     ThreadEntry,
+                     this,
+                     0,
+                     reinterpret_cast<unsigned int*>(
+                         &thread_handle_data()->tid_)));
+  ASSERT(IsValid());
+}
+
+
+// Wait for thread to terminate.
+void Thread::Join() {
+  WaitForSingleObject(data_->thread_, INFINITE);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+  DWORD result = TlsAlloc();
+  ASSERT(result != TLS_OUT_OF_INDEXES);
+  return static_cast<LocalStorageKey>(result);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+  BOOL result = TlsFree(static_cast<DWORD>(key));
+  USE(result);
+  ASSERT(result);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+  return TlsGetValue(static_cast<DWORD>(key));
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+  BOOL result = TlsSetValue(static_cast<DWORD>(key), value);
+  USE(result);
+  ASSERT(result);
+}
+
+
+
+void Thread::YieldCPU() {
+  Sleep(0);
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 mutex support.
+//
+// On Win32 mutexes are implemented using CRITICAL_SECTION objects. These are
+// faster than Win32 Mutex objects because they are implemented using user mode
+// atomic instructions. Therefore we only do ring transitions if there is lock
+// contention.
+
+class Win32Mutex : public Mutex {
+ public:
+
+  Win32Mutex() { InitializeCriticalSection(&cs_); }
+
+  ~Win32Mutex() { DeleteCriticalSection(&cs_); }
+
+  int Lock() {
+    EnterCriticalSection(&cs_);
+    return 0;
+  }
+
+  int Unlock() {
+    LeaveCriticalSection(&cs_);
+    return 0;
+  }
+
+ private:
+  CRITICAL_SECTION cs_;  // Critical section used for mutex
+};
+
+
+Mutex* OS::CreateMutex() {
+  return new Win32Mutex();
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 semaphore support.
+//
+// On Win32 semaphores are implemented using Win32 Semaphore objects. The
+// semaphores are anonymous. Also, the semaphores are initialized to have
+// no upper limit on count.
+
+
+class Win32Semaphore : public Semaphore {
+ public:
+  explicit Win32Semaphore(int count) {
+    sem = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
+  }
+
+  ~Win32Semaphore() {
+    CloseHandle(sem);
+  }
+
+  void Wait() {
+    WaitForSingleObject(sem, INFINITE);
+  }
+
+  bool Wait(int timeout) {
+    // Timeout in Windows API is in milliseconds.
+    DWORD millis_timeout = timeout / 1000;
+    return WaitForSingleObject(sem, millis_timeout) != WAIT_TIMEOUT;
+  }
+
+  void Signal() {
+    LONG dummy;
+    ReleaseSemaphore(sem, 1, &dummy);
+  }
+
+ private:
+  HANDLE sem;
+};
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+  return new Win32Semaphore(count);
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 socket support.
+//
+
+class Win32Socket : public Socket {
+ public:
+  explicit Win32Socket() {
+    // Create the socket.
+    socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+  }
+  explicit Win32Socket(SOCKET socket): socket_(socket) { }
+  virtual ~Win32Socket() { Shutdown(); }
+
+  // Server initialization.
+  bool Bind(const int port);
+  bool Listen(int backlog) const;
+  Socket* Accept() const;
+
+  // Client initialization.
+  bool Connect(const char* host, const char* port);
+
+  // Shutdown socket for both read and write.
+  bool Shutdown();
+
+  // Data Transimission
+  int Send(const char* data, int len) const;
+  int Receive(char* data, int len) const;
+
+  bool SetReuseAddress(bool reuse_address);
+
+  bool IsValid() const { return socket_ != INVALID_SOCKET; }
+
+ private:
+  SOCKET socket_;
+};
+
+
+bool Win32Socket::Bind(const int port) {
+  if (!IsValid())  {
+    return false;
+  }
+
+  sockaddr_in addr;
+  memset(&addr, 0, sizeof(addr));
+  addr.sin_family = AF_INET;
+  addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+  addr.sin_port = htons(port);
+  int status = bind(socket_,
+                    reinterpret_cast<struct sockaddr *>(&addr),
+                    sizeof(addr));
+  return status == 0;
+}
+
+
+bool Win32Socket::Listen(int backlog) const {
+  if (!IsValid()) {
+    return false;
+  }
+
+  int status = listen(socket_, backlog);
+  return status == 0;
+}
+
+
+Socket* Win32Socket::Accept() const {
+  if (!IsValid()) {
+    return NULL;
+  }
+
+  SOCKET socket = accept(socket_, NULL, NULL);
+  if (socket == INVALID_SOCKET) {
+    return NULL;
+  } else {
+    return new Win32Socket(socket);
+  }
+}
+
+
+bool Win32Socket::Connect(const char* host, const char* port) {
+  if (!IsValid()) {
+    return false;
+  }
+
+  // Lookup host and port.
+  struct addrinfo *result = NULL;
+  struct addrinfo hints;
+  memset(&hints, 0, sizeof(addrinfo));
+  hints.ai_family = AF_INET;
+  hints.ai_socktype = SOCK_STREAM;
+  hints.ai_protocol = IPPROTO_TCP;
+  int status = getaddrinfo(host, port, &hints, &result);
+  if (status != 0) {
+    return false;
+  }
+
+  // Connect.
+  status = connect(socket_, result->ai_addr, result->ai_addrlen);
+  freeaddrinfo(result);
+  return status == 0;
+}
+
+
+bool Win32Socket::Shutdown() {
+  if (IsValid()) {
+    // Shutdown socket for both read and write.
+    int status = shutdown(socket_, SD_BOTH);
+    closesocket(socket_);
+    socket_ = INVALID_SOCKET;
+    return status == SOCKET_ERROR;
+  }
+  return true;
+}
+
+
+int Win32Socket::Send(const char* data, int len) const {
+  int status = send(socket_, data, len, 0);
+  return status;
+}
+
+
+int Win32Socket::Receive(char* data, int len) const {
+  int status = recv(socket_, data, len, 0);
+  return status;
+}
+
+
+bool Win32Socket::SetReuseAddress(bool reuse_address) {
+  BOOL on = reuse_address ? TRUE : FALSE;
+  int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
+                          reinterpret_cast<char*>(&on), sizeof(on));
+  return status == SOCKET_ERROR;
+}
+
+
+bool Socket::Setup() {
+  // Initialize Winsock32
+  int err;
+  WSADATA winsock_data;
+  WORD version_requested = MAKEWORD(1, 0);
+  err = WSAStartup(version_requested, &winsock_data);
+  if (err != 0) {
+    PrintF("Unable to initialize Winsock, err = %d\n", Socket::LastError());
+  }
+
+  return err == 0;
+}
+
+
+int Socket::LastError() {
+  return WSAGetLastError();
+}
+
+
+uint16_t Socket::HToN(uint16_t value) {
+  return htons(value);
+}
+
+
+uint16_t Socket::NToH(uint16_t value) {
+  return ntohs(value);
+}
+
+
+uint32_t Socket::HToN(uint32_t value) {
+  return htonl(value);
+}
+
+
+uint32_t Socket::NToH(uint32_t value) {
+  return ntohl(value);
+}
+
+
+Socket* OS::CreateSocket() {
+  return new Win32Socket();
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+// ----------------------------------------------------------------------------
+// Win32 profiler support.
+//
+// On win32 we use a sampler thread with high priority to sample the program
+// counter for the profiled thread.
+
+class Sampler::PlatformData : public Malloced {
+ public:
+  explicit PlatformData(Sampler* sampler) {
+    sampler_ = sampler;
+    sampler_thread_ = INVALID_HANDLE_VALUE;
+    profiled_thread_ = INVALID_HANDLE_VALUE;
+  }
+
+  Sampler* sampler_;
+  HANDLE sampler_thread_;
+  HANDLE profiled_thread_;
+
+  // Sampler thread handler.
+  void Runner() {
+    // Context used for sampling the register state of the profiled thread.
+    CONTEXT context;
+    memset(&context, 0, sizeof(context));
+    // Loop until the sampler is disengaged.
+    while (sampler_->IsActive()) {
+      TickSample sample;
+
+      // If profiling, we record the pc and sp of the profiled thread.
+      if (sampler_->IsProfiling()
+          && SuspendThread(profiled_thread_) != (DWORD)-1) {
+        context.ContextFlags = CONTEXT_FULL;
+        if (GetThreadContext(profiled_thread_, &context) != 0) {
+#if V8_HOST_ARCH_X64
+          UNIMPLEMENTED();
+          sample.pc = context.Rip;
+          sample.sp = context.Rsp;
+          sample.fp = context.Rbp;
+#else
+          sample.pc = context.Eip;
+          sample.sp = context.Esp;
+          sample.fp = context.Ebp;
+#endif
+          sampler_->SampleStack(&sample);
+        }
+        ResumeThread(profiled_thread_);
+      }
+
+      // We always sample the VM state.
+      sample.state = Logger::state();
+      // Invoke tick handler with program counter and stack pointer.
+      sampler_->Tick(&sample);
+
+      // Wait until next sampling.
+      Sleep(sampler_->interval_);
+    }
+  }
+};
+
+
+// Entry point for sampler thread.
+static unsigned int __stdcall SamplerEntry(void* arg) {
+  Sampler::PlatformData* data =
+      reinterpret_cast<Sampler::PlatformData*>(arg);
+  data->Runner();
+  return 0;
+}
+
+
+// Initialize a profile sampler.
+Sampler::Sampler(int interval, bool profiling)
+    : interval_(interval), profiling_(profiling), active_(false) {
+  data_ = new PlatformData(this);
+}
+
+
+Sampler::~Sampler() {
+  delete data_;
+}
+
+
+// Start profiling.
+void Sampler::Start() {
+  // If we are profiling, we need to be able to access the calling
+  // thread.
+  if (IsProfiling()) {
+    // Get a handle to the calling thread. This is the thread that we are
+    // going to profile. We need to make a copy of the handle because we are
+    // going to use it in the sampler thread. Using GetThreadHandle() will
+    // not work in this case. We're using OpenThread because DuplicateHandle
+    // for some reason doesn't work in Chrome's sandbox.
+    data_->profiled_thread_ = OpenThread(THREAD_GET_CONTEXT |
+                                         THREAD_SUSPEND_RESUME |
+                                         THREAD_QUERY_INFORMATION,
+                                         FALSE,
+                                         GetCurrentThreadId());
+    BOOL ok = data_->profiled_thread_ != NULL;
+    if (!ok) return;
+  }
+
+  // Start sampler thread.
+  unsigned int tid;
+  active_ = true;
+  data_->sampler_thread_ = reinterpret_cast<HANDLE>(
+      _beginthreadex(NULL, 0, SamplerEntry, data_, 0, &tid));
+  // Set thread to high priority to increase sampling accuracy.
+  SetThreadPriority(data_->sampler_thread_, THREAD_PRIORITY_TIME_CRITICAL);
+}
+
+
+// Stop profiling.
+void Sampler::Stop() {
+  // Seting active to false triggers termination of the sampler
+  // thread.
+  active_ = false;
+
+  // Wait for sampler thread to terminate.
+  WaitForSingleObject(data_->sampler_thread_, INFINITE);
+
+  // Release the thread handles
+  CloseHandle(data_->sampler_thread_);
+  CloseHandle(data_->profiled_thread_);
+}
+
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+} }  // namespace v8::internal
diff --git a/src/platform.h b/src/platform.h
new file mode 100644
index 0000000..76bf891
--- /dev/null
+++ b/src/platform.h
@@ -0,0 +1,544 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This module contains the platform-specific code. This make the rest of the
+// code less dependent on operating system, compilers and runtime libraries.
+// This module does specifically not deal with differences between different
+// processor architecture.
+// The platform classes have the same definition for all platforms. The
+// implementation for a particular platform is put in platform_<os>.cc.
+// The build system then uses the implementation for the target platform.
+//
+// This design has been chosen because it is simple and fast. Alternatively,
+// the platform dependent classes could have been implemented using abstract
+// superclasses with virtual methods and having specializations for each
+// platform. This design was rejected because it was more complicated and
+// slower. It would require factory methods for selecting the right
+// implementation and the overhead of virtual methods for performance
+// sensitive like mutex locking/unlocking.
+
+#ifndef V8_PLATFORM_H_
+#define V8_PLATFORM_H_
+
+#define V8_INFINITY INFINITY
+
+// Windows specific stuff.
+#ifdef WIN32
+
+// Microsoft Visual C++ specific stuff.
+#ifdef _MSC_VER
+
+enum {
+  FP_NAN,
+  FP_INFINITE,
+  FP_ZERO,
+  FP_SUBNORMAL,
+  FP_NORMAL
+};
+
+#undef V8_INFINITY
+#define V8_INFINITY HUGE_VAL
+
+namespace v8 {
+namespace internal {
+int isfinite(double x);
+} }
+int isnan(double x);
+int isinf(double x);
+int isless(double x, double y);
+int isgreater(double x, double y);
+int fpclassify(double x);
+int signbit(double x);
+
+int strncasecmp(const char* s1, const char* s2, int n);
+
+#endif  // _MSC_VER
+
+// Random is missing on both Visual Studio and MinGW.
+int random();
+
+#endif  // WIN32
+
+// GCC specific stuff
+#ifdef __GNUC__
+
+// Needed for va_list on at least MinGW and Android.
+#include <stdarg.h>
+
+#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
+
+// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
+// warning flag and certain versions of GCC due to a bug:
+// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
+// For now, we use the more involved template-based version from <limits>, but
+// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
+// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
+#if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100
+#include <limits>
+#undef V8_INFINITY
+#define V8_INFINITY std::numeric_limits<double>::infinity()
+#endif
+
+#endif  // __GNUC__
+
+namespace v8 {
+namespace internal {
+
+class Semaphore;
+
+double ceiling(double x);
+
+// Forward declarations.
+class Socket;
+
+// ----------------------------------------------------------------------------
+// OS
+//
+// This class has static methods for the different platform specific
+// functions. Add methods here to cope with differences between the
+// supported platforms.
+
+class OS {
+ public:
+  // Initializes the platform OS support. Called once at VM startup.
+  static void Setup();
+
+  // Returns the accumulated user time for thread. This routine
+  // can be used for profiling. The implementation should
+  // strive for high-precision timer resolution, preferable
+  // micro-second resolution.
+  static int GetUserTime(uint32_t* secs,  uint32_t* usecs);
+
+  // Get a tick counter normalized to one tick per microsecond.
+  // Used for calculating time intervals.
+  static int64_t Ticks();
+
+  // Returns current time as the number of milliseconds since
+  // 00:00:00 UTC, January 1, 1970.
+  static double TimeCurrentMillis();
+
+  // Returns a string identifying the current time zone. The
+  // timestamp is used for determining if DST is in effect.
+  static const char* LocalTimezone(double time);
+
+  // Returns the local time offset in milliseconds east of UTC without
+  // taking daylight savings time into account.
+  static double LocalTimeOffset();
+
+  // Returns the daylight savings offset for the given time.
+  static double DaylightSavingsOffset(double time);
+
+  static FILE* FOpen(const char* path, const char* mode);
+
+  // Log file open mode is platform-dependent due to line ends issues.
+  static const char* LogFileOpenMode;
+
+  // Print output to console. This is mostly used for debugging output.
+  // On platforms that has standard terminal output, the output
+  // should go to stdout.
+  static void Print(const char* format, ...);
+  static void VPrint(const char* format, va_list args);
+
+  // Print error output to console. This is mostly used for error message
+  // output. On platforms that has standard terminal output, the output
+  // should go to stderr.
+  static void PrintError(const char* format, ...);
+  static void VPrintError(const char* format, va_list args);
+
+  // Allocate/Free memory used by JS heap. Pages are readable/writable, but
+  // they are not guaranteed to be executable unless 'executable' is true.
+  // Returns the address of allocated memory, or NULL if failed.
+  static void* Allocate(const size_t requested,
+                        size_t* allocated,
+                        bool is_executable);
+  static void Free(void* address, const size_t size);
+  // Get the Alignment guaranteed by Allocate().
+  static size_t AllocateAlignment();
+
+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect a block of memory by marking it read-only/writable.
+  static void Protect(void* address, size_t size);
+  static void Unprotect(void* address, size_t size, bool is_executable);
+#endif
+
+  // Returns an indication of whether a pointer is in a space that
+  // has been allocated by Allocate().  This method may conservatively
+  // always return false, but giving more accurate information may
+  // improve the robustness of the stack dump code in the presence of
+  // heap corruption.
+  static bool IsOutsideAllocatedSpace(void* pointer);
+
+  // Sleep for a number of milliseconds.
+  static void Sleep(const int milliseconds);
+
+  // Abort the current process.
+  static void Abort();
+
+  // Debug break.
+  static void DebugBreak();
+
+  // Walk the stack.
+  static const int kStackWalkError = -1;
+  static const int kStackWalkMaxNameLen = 256;
+  static const int kStackWalkMaxTextLen = 256;
+  struct StackFrame {
+    void* address;
+    char text[kStackWalkMaxTextLen];
+  };
+
+  static int StackWalk(Vector<StackFrame> frames);
+
+  // Factory method for creating platform dependent Mutex.
+  // Please use delete to reclaim the storage for the returned Mutex.
+  static Mutex* CreateMutex();
+
+  // Factory method for creating platform dependent Semaphore.
+  // Please use delete to reclaim the storage for the returned Semaphore.
+  static Semaphore* CreateSemaphore(int count);
+
+  // Factory method for creating platform dependent Socket.
+  // Please use delete to reclaim the storage for the returned Socket.
+  static Socket* CreateSocket();
+
+  class MemoryMappedFile {
+   public:
+    static MemoryMappedFile* create(const char* name, int size, void* initial);
+    virtual ~MemoryMappedFile() { }
+    virtual void* memory() = 0;
+  };
+
+  // Safe formatting print. Ensures that str is always null-terminated.
+  // Returns the number of chars written, or -1 if output was truncated.
+  static int SNPrintF(Vector<char> str, const char* format, ...);
+  static int VSNPrintF(Vector<char> str,
+                       const char* format,
+                       va_list args);
+
+  static char* StrChr(char* str, int c);
+  static void StrNCpy(Vector<char> dest, const char* src, size_t n);
+
+  // Support for profiler.  Can do nothing, in which case ticks
+  // occuring in shared libraries will not be properly accounted
+  // for.
+  static void LogSharedLibraryAddresses();
+
+  // Returns the double constant NAN
+  static double nan_value();
+
+  // Returns the activation frame alignment constraint or zero if
+  // the platform doesn't care. Guaranteed to be a power of two.
+  static int ActivationFrameAlignment();
+
+ private:
+  static const int msPerSecond = 1000;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
+};
+
+
+class VirtualMemory {
+ public:
+  // Reserves virtual memory with size.
+  explicit VirtualMemory(size_t size);
+  ~VirtualMemory();
+
+  // Returns whether the memory has been reserved.
+  bool IsReserved();
+
+  // Returns the start address of the reserved memory.
+  void* address() {
+    ASSERT(IsReserved());
+    return address_;
+  };
+
+  // Returns the size of the reserved memory.
+  size_t size() { return size_; }
+
+  // Commits real memory. Returns whether the operation succeeded.
+  bool Commit(void* address, size_t size, bool is_executable);
+
+  // Uncommit real memory.  Returns whether the operation succeeded.
+  bool Uncommit(void* address, size_t size);
+
+ private:
+  void* address_;  // Start address of the virtual memory.
+  size_t size_;  // Size of the virtual memory.
+};
+
+
+// ----------------------------------------------------------------------------
+// ThreadHandle
+//
+// A ThreadHandle represents a thread identifier for a thread. The ThreadHandle
+// does not own the underlying os handle. Thread handles can be used for
+// refering to threads and testing equality.
+
+class ThreadHandle {
+ public:
+  enum Kind { SELF, INVALID };
+  explicit ThreadHandle(Kind kind);
+
+  // Destructor.
+  ~ThreadHandle();
+
+  // Test for thread running.
+  bool IsSelf() const;
+
+  // Test for valid thread handle.
+  bool IsValid() const;
+
+  // Get platform-specific data.
+  class PlatformData;
+  PlatformData* thread_handle_data() { return data_; }
+
+  // Initialize the handle to kind
+  void Initialize(Kind kind);
+
+ private:
+  PlatformData* data_;  // Captures platform dependent data.
+};
+
+
+// ----------------------------------------------------------------------------
+// Thread
+//
+// Thread objects are used for creating and running threads. When the start()
+// method is called the new thread starts running the run() method in the new
+// thread. The Thread object should not be deallocated before the thread has
+// terminated.
+
+class Thread: public ThreadHandle {
+ public:
+  // Opaque data type for thread-local storage keys.
+  enum LocalStorageKey {};
+
+  // Create new thread.
+  Thread();
+  virtual ~Thread();
+
+  // Start new thread by calling the Run() method in the new thread.
+  void Start();
+
+  // Wait until thread terminates.
+  void Join();
+
+  // Abstract method for run handler.
+  virtual void Run() = 0;
+
+  // Thread-local storage.
+  static LocalStorageKey CreateThreadLocalKey();
+  static void DeleteThreadLocalKey(LocalStorageKey key);
+  static void* GetThreadLocal(LocalStorageKey key);
+  static int GetThreadLocalInt(LocalStorageKey key) {
+    return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
+  }
+  static void SetThreadLocal(LocalStorageKey key, void* value);
+  static void SetThreadLocalInt(LocalStorageKey key, int value) {
+    SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
+  }
+  static bool HasThreadLocal(LocalStorageKey key) {
+    return GetThreadLocal(key) != NULL;
+  }
+
+  // A hint to the scheduler to let another thread run.
+  static void YieldCPU();
+
+ private:
+  class PlatformData;
+  PlatformData* data_;
+  DISALLOW_COPY_AND_ASSIGN(Thread);
+};
+
+
+// ----------------------------------------------------------------------------
+// Mutex
+//
+// Mutexes are used for serializing access to non-reentrant sections of code.
+// The implementations of mutex should allow for nested/recursive locking.
+
+class Mutex {
+ public:
+  virtual ~Mutex() {}
+
+  // Locks the given mutex. If the mutex is currently unlocked, it becomes
+  // locked and owned by the calling thread, and immediately. If the mutex
+  // is already locked by another thread, suspends the calling thread until
+  // the mutex is unlocked.
+  virtual int Lock() = 0;
+
+  // Unlocks the given mutex. The mutex is assumed to be locked and owned by
+  // the calling thread on entrance.
+  virtual int Unlock() = 0;
+};
+
+
+// ----------------------------------------------------------------------------
+// ScopedLock
+//
+// Stack-allocated ScopedLocks provide block-scoped locking and unlocking
+// of a mutex.
+class ScopedLock {
+ public:
+  explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
+    mutex_->Lock();
+  }
+  ~ScopedLock() {
+    mutex_->Unlock();
+  }
+
+ private:
+  Mutex* mutex_;
+  DISALLOW_COPY_AND_ASSIGN(ScopedLock);
+};
+
+
+// ----------------------------------------------------------------------------
+// Semaphore
+//
+// A semaphore object is a synchronization object that maintains a count. The
+// count is decremented each time a thread completes a wait for the semaphore
+// object and incremented each time a thread signals the semaphore. When the
+// count reaches zero,  threads waiting for the semaphore blocks until the
+// count becomes non-zero.
+
+class Semaphore {
+ public:
+  virtual ~Semaphore() {}
+
+  // Suspends the calling thread until the semaphore counter is non zero
+  // and then decrements the semaphore counter.
+  virtual void Wait() = 0;
+
+  // Suspends the calling thread until the counter is non zero or the timeout
+  // time has passsed. If timeout happens the return value is false and the
+  // counter is unchanged. Otherwise the semaphore counter is decremented and
+  // true is returned. The timeout value is specified in microseconds.
+  virtual bool Wait(int timeout) = 0;
+
+  // Increments the semaphore counter.
+  virtual void Signal() = 0;
+};
+
+
+// ----------------------------------------------------------------------------
+// Socket
+//
+
+class Socket {
+ public:
+  virtual ~Socket() {}
+
+  // Server initialization.
+  virtual bool Bind(const int port) = 0;
+  virtual bool Listen(int backlog) const = 0;
+  virtual Socket* Accept() const = 0;
+
+  // Client initialization.
+  virtual bool Connect(const char* host, const char* port) = 0;
+
+  // Shutdown socket for both read and write. This causes blocking Send and
+  // Receive calls to exit. After Shutdown the Socket object cannot be used for
+  // any communication.
+  virtual bool Shutdown() = 0;
+
+  // Data Transimission
+  virtual int Send(const char* data, int len) const = 0;
+  virtual int Receive(char* data, int len) const = 0;
+
+  // Set the value of the SO_REUSEADDR socket option.
+  virtual bool SetReuseAddress(bool reuse_address) = 0;
+
+  virtual bool IsValid() const = 0;
+
+  static bool Setup();
+  static int LastError();
+  static uint16_t HToN(uint16_t value);
+  static uint16_t NToH(uint16_t value);
+  static uint32_t HToN(uint32_t value);
+  static uint32_t NToH(uint32_t value);
+};
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+// ----------------------------------------------------------------------------
+// Sampler
+//
+// A sampler periodically samples the state of the VM and optionally
+// (if used for profiling) the program counter and stack pointer for
+// the thread that created it.
+
+// TickSample captures the information collected for each sample.
+class TickSample {
+ public:
+  TickSample() : pc(0), sp(0), fp(0), state(OTHER), frames_count(0) {}
+  uintptr_t pc;  // Instruction pointer.
+  uintptr_t sp;  // Stack pointer.
+  uintptr_t fp;  // Frame pointer.
+  StateTag state;   // The state of the VM.
+  static const int kMaxFramesCount = 100;
+  EmbeddedVector<Address, kMaxFramesCount> stack;  // Call stack.
+  int frames_count;  // Number of captured frames.
+};
+
+class Sampler {
+ public:
+  // Initialize sampler.
+  explicit Sampler(int interval, bool profiling);
+  virtual ~Sampler();
+
+  // Performs stack sampling.
+  virtual void SampleStack(TickSample* sample) = 0;
+
+  // This method is called for each sampling period with the current
+  // program counter.
+  virtual void Tick(TickSample* sample) = 0;
+
+  // Start and stop sampler.
+  void Start();
+  void Stop();
+
+  // Is the sampler used for profiling.
+  inline bool IsProfiling() { return profiling_; }
+
+  // Whether the sampler is running (that is, consumes resources).
+  inline bool IsActive() { return active_; }
+
+  class PlatformData;
+
+ private:
+  const int interval_;
+  const bool profiling_;
+  bool active_;
+  PlatformData* data_;  // Platform specific data.
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
+};
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+} }  // namespace v8::internal
+
+#endif  // V8_PLATFORM_H_
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
new file mode 100644
index 0000000..bf66c4b
--- /dev/null
+++ b/src/prettyprinter.cc
@@ -0,0 +1,1094 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "prettyprinter.h"
+#include "scopes.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef DEBUG
+
+PrettyPrinter::PrettyPrinter() {
+  output_ = NULL;
+  size_ = 0;
+  pos_ = 0;
+}
+
+
+PrettyPrinter::~PrettyPrinter() {
+  DeleteArray(output_);
+}
+
+
+void PrettyPrinter::VisitBlock(Block* node) {
+  if (!node->is_initializer_block()) Print("{ ");
+  PrintStatements(node->statements());
+  if (node->statements()->length() > 0) Print(" ");
+  if (!node->is_initializer_block()) Print("}");
+}
+
+
+void PrettyPrinter::VisitDeclaration(Declaration* node) {
+  Print("var ");
+  PrintLiteral(node->proxy()->name(), false);
+  if (node->fun() != NULL) {
+    Print(" = ");
+    PrintFunctionLiteral(node->fun());
+  }
+  Print(";");
+}
+
+
+void PrettyPrinter::VisitExpressionStatement(ExpressionStatement* node) {
+  Visit(node->expression());
+  Print(";");
+}
+
+
+void PrettyPrinter::VisitEmptyStatement(EmptyStatement* node) {
+  Print(";");
+}
+
+
+void PrettyPrinter::VisitIfStatement(IfStatement* node) {
+  Print("if (");
+  Visit(node->condition());
+  Print(") ");
+  Visit(node->then_statement());
+  if (node->HasElseStatement()) {
+    Print(" else ");
+    Visit(node->else_statement());
+  }
+}
+
+
+void PrettyPrinter::VisitContinueStatement(ContinueStatement* node) {
+  Print("continue");
+  ZoneStringList* labels = node->target()->labels();
+  if (labels != NULL) {
+    Print(" ");
+    ASSERT(labels->length() > 0);  // guaranteed to have at least one entry
+    PrintLiteral(labels->at(0), false);  // any label from the list is fine
+  }
+  Print(";");
+}
+
+
+void PrettyPrinter::VisitBreakStatement(BreakStatement* node) {
+  Print("break");
+  ZoneStringList* labels = node->target()->labels();
+  if (labels != NULL) {
+    Print(" ");
+    ASSERT(labels->length() > 0);  // guaranteed to have at least one entry
+    PrintLiteral(labels->at(0), false);  // any label from the list is fine
+  }
+  Print(";");
+}
+
+
+void PrettyPrinter::VisitReturnStatement(ReturnStatement* node) {
+  Print("return ");
+  Visit(node->expression());
+  Print(";");
+}
+
+
+void PrettyPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
+  Print("<enter with> (");
+  Visit(node->expression());
+  Print(") ");
+}
+
+
+void PrettyPrinter::VisitWithExitStatement(WithExitStatement* node) {
+  Print("<exit with>");
+}
+
+
+void PrettyPrinter::VisitSwitchStatement(SwitchStatement* node) {
+  PrintLabels(node->labels());
+  Print("switch (");
+  Visit(node->tag());
+  Print(") { ");
+  ZoneList<CaseClause*>* cases = node->cases();
+  for (int i = 0; i < cases->length(); i++)
+    PrintCaseClause(cases->at(i));
+  Print("}");
+}
+
+
+void PrettyPrinter::VisitLoopStatement(LoopStatement* node) {
+  PrintLabels(node->labels());
+  switch (node->type()) {
+    case LoopStatement::DO_LOOP:
+      ASSERT(node->init() == NULL);
+      ASSERT(node->next() == NULL);
+      Print("do ");
+      Visit(node->body());
+      Print(" while (");
+      Visit(node->cond());
+      Print(");");
+      break;
+
+    case LoopStatement::FOR_LOOP:
+      Print("for (");
+      if (node->init() != NULL) {
+        Visit(node->init());
+        Print(" ");
+      } else {
+        Print("; ");
+      }
+      if (node->cond() != NULL)
+        Visit(node->cond());
+      Print("; ");
+      if (node->next() != NULL)
+        Visit(node->next());  // prints extra ';', unfortunately
+      // to fix: should use Expression for next
+      Print(") ");
+      Visit(node->body());
+      break;
+
+    case LoopStatement::WHILE_LOOP:
+      ASSERT(node->init() == NULL);
+      ASSERT(node->next() == NULL);
+      Print("while (");
+      Visit(node->cond());
+      Print(") ");
+      Visit(node->body());
+      break;
+  }
+}
+
+
+void PrettyPrinter::VisitForInStatement(ForInStatement* node) {
+  PrintLabels(node->labels());
+  Print("for (");
+  Visit(node->each());
+  Print(" in ");
+  Visit(node->enumerable());
+  Print(") ");
+  Visit(node->body());
+}
+
+
+void PrettyPrinter::VisitTryCatch(TryCatch* node) {
+  Print("try ");
+  Visit(node->try_block());
+  Print(" catch (");
+  Visit(node->catch_var());
+  Print(") ");
+  Visit(node->catch_block());
+}
+
+
+void PrettyPrinter::VisitTryFinally(TryFinally* node) {
+  Print("try ");
+  Visit(node->try_block());
+  Print(" finally ");
+  Visit(node->finally_block());
+}
+
+
+void PrettyPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
+  Print("debugger ");
+}
+
+
+void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
+  Print("(");
+  PrintFunctionLiteral(node);
+  Print(")");
+}
+
+
+void PrettyPrinter::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* node) {
+  Print("(");
+  PrintLiteral(node->boilerplate(), true);
+  Print(")");
+}
+
+
+void PrettyPrinter::VisitConditional(Conditional* node) {
+  Visit(node->condition());
+  Print(" ? ");
+  Visit(node->then_expression());
+  Print(" : ");
+  Visit(node->else_expression());
+}
+
+
+void PrettyPrinter::VisitLiteral(Literal* node) {
+  PrintLiteral(node->handle(), true);
+}
+
+
+void PrettyPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
+  Print(" RegExp(");
+  PrintLiteral(node->pattern(), false);
+  Print(",");
+  PrintLiteral(node->flags(), false);
+  Print(") ");
+}
+
+
+void PrettyPrinter::VisitObjectLiteral(ObjectLiteral* node) {
+  Print("{ ");
+  for (int i = 0; i < node->properties()->length(); i++) {
+    if (i != 0) Print(",");
+    ObjectLiteral::Property* property = node->properties()->at(i);
+    Print(" ");
+    Visit(property->key());
+    Print(": ");
+    Visit(property->value());
+  }
+  Print(" }");
+}
+
+
+void PrettyPrinter::VisitArrayLiteral(ArrayLiteral* node) {
+  Print("[ ");
+  for (int i = 0; i < node->values()->length(); i++) {
+    if (i != 0) Print(",");
+    Visit(node->values()->at(i));
+  }
+  Print(" ]");
+}
+
+
+void PrettyPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  Print("{ ");
+  Visit(node->key());
+  Print(": ");
+  Visit(node->value());
+  Print(" }");
+}
+
+
+void PrettyPrinter::VisitSlot(Slot* node) {
+  switch (node->type()) {
+    case Slot::PARAMETER:
+      Print("parameter[%d]", node->index());
+      break;
+    case Slot::LOCAL:
+      Print("frame[%d]", node->index());
+      break;
+    case Slot::CONTEXT:
+      Print(".context[%d]", node->index());
+      break;
+    case Slot::LOOKUP:
+      Print(".context[");
+      PrintLiteral(node->var()->name(), false);
+      Print("]");
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void PrettyPrinter::VisitVariableProxy(VariableProxy* node) {
+  PrintLiteral(node->name(), false);
+}
+
+
+void PrettyPrinter::VisitAssignment(Assignment* node) {
+  Visit(node->target());
+  Print(" %s ", Token::String(node->op()));
+  Visit(node->value());
+}
+
+
+void PrettyPrinter::VisitThrow(Throw* node) {
+  Print("throw ");
+  Visit(node->exception());
+}
+
+
+void PrettyPrinter::VisitProperty(Property* node) {
+  Expression* key = node->key();
+  Literal* literal = key->AsLiteral();
+  if (literal != NULL && literal->handle()->IsSymbol()) {
+    Print("(");
+    Visit(node->obj());
+    Print(").");
+    PrintLiteral(literal->handle(), false);
+  } else {
+    Visit(node->obj());
+    Print("[");
+    Visit(key);
+    Print("]");
+  }
+}
+
+
+void PrettyPrinter::VisitCall(Call* node) {
+  Visit(node->expression());
+  PrintArguments(node->arguments());
+}
+
+
+void PrettyPrinter::VisitCallNew(CallNew* node) {
+  Print("new (");
+  Visit(node->expression());
+  Print(")");
+  PrintArguments(node->arguments());
+}
+
+
+void PrettyPrinter::VisitCallRuntime(CallRuntime* node) {
+  Print("%%");
+  PrintLiteral(node->name(), false);
+  PrintArguments(node->arguments());
+}
+
+
+void PrettyPrinter::VisitUnaryOperation(UnaryOperation* node) {
+  Print("(%s", Token::String(node->op()));
+  Visit(node->expression());
+  Print(")");
+}
+
+
+void PrettyPrinter::VisitCountOperation(CountOperation* node) {
+  Print("(");
+  if (node->is_prefix()) Print("%s", Token::String(node->op()));
+  Visit(node->expression());
+  if (node->is_postfix()) Print("%s", Token::String(node->op()));
+  Print(")");
+}
+
+
+void PrettyPrinter::VisitBinaryOperation(BinaryOperation* node) {
+  Print("(");
+  Visit(node->left());
+  Print("%s", Token::String(node->op()));
+  Visit(node->right());
+  Print(")");
+}
+
+
+void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
+  Print("(");
+  Visit(node->left());
+  Print("%s", Token::String(node->op()));
+  Visit(node->right());
+  Print(")");
+}
+
+
+void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
+  Print("<this-function>");
+}
+
+
+const char* PrettyPrinter::Print(AstNode* node) {
+  Init();
+  Visit(node);
+  return output_;
+}
+
+
+const char* PrettyPrinter::PrintExpression(FunctionLiteral* program) {
+  Init();
+  ExpressionStatement* statement =
+    program->body()->at(0)->AsExpressionStatement();
+  Visit(statement->expression());
+  return output_;
+}
+
+
+const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
+  Init();
+  PrintStatements(program->body());
+  Print("\n");
+  return output_;
+}
+
+
+void PrettyPrinter::PrintOut(AstNode* node) {
+  PrettyPrinter printer;
+  PrintF("%s", printer.Print(node));
+}
+
+
+void PrettyPrinter::Init() {
+  if (size_ == 0) {
+    ASSERT(output_ == NULL);
+    const int initial_size = 256;
+    output_ = NewArray<char>(initial_size);
+    size_ = initial_size;
+  }
+  output_[0] = '\0';
+  pos_ = 0;
+}
+
+
+void PrettyPrinter::Print(const char* format, ...) {
+  for (;;) {
+    va_list arguments;
+    va_start(arguments, format);
+    int n = OS::VSNPrintF(Vector<char>(output_, size_) + pos_,
+                          format,
+                          arguments);
+    va_end(arguments);
+
+    if (n >= 0) {
+      // there was enough space - we are done
+      pos_ += n;
+      return;
+    } else {
+      // there was not enough space - allocate more and try again
+      const int slack = 32;
+      int new_size = size_ + (size_ >> 1) + slack;
+      char* new_output = NewArray<char>(new_size);
+      memcpy(new_output, output_, pos_);
+      DeleteArray(output_);
+      output_ = new_output;
+      size_ = new_size;
+    }
+  }
+}
+
+
+void PrettyPrinter::PrintStatements(ZoneList<Statement*>* statements) {
+  for (int i = 0; i < statements->length(); i++) {
+    if (i != 0) Print(" ");
+    Visit(statements->at(i));
+  }
+}
+
+
+void PrettyPrinter::PrintLabels(ZoneStringList* labels) {
+  if (labels != NULL) {
+    for (int i = 0; i < labels->length(); i++) {
+      PrintLiteral(labels->at(i), false);
+      Print(": ");
+    }
+  }
+}
+
+
+void PrettyPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
+  Print("(");
+  for (int i = 0; i < arguments->length(); i++) {
+    if (i != 0) Print(", ");
+    Visit(arguments->at(i));
+  }
+  Print(")");
+}
+
+
+void PrettyPrinter::PrintLiteral(Handle<Object> value, bool quote) {
+  Object* object = *value;
+  if (object->IsString()) {
+    String* string = String::cast(object);
+    if (quote) Print("\"");
+    for (int i = 0; i < string->length(); i++) {
+      Print("%c", string->Get(i));
+    }
+    if (quote) Print("\"");
+  } else if (object == Heap::null_value()) {
+    Print("null");
+  } else if (object == Heap::true_value()) {
+    Print("true");
+  } else if (object == Heap::false_value()) {
+    Print("false");
+  } else if (object == Heap::undefined_value()) {
+    Print("undefined");
+  } else if (object->IsNumber()) {
+    Print("%g", object->Number());
+  } else if (object->IsJSObject()) {
+    // regular expression
+    if (object->IsJSFunction()) {
+      Print("JS-Function");
+    } else if (object->IsJSArray()) {
+      Print("JS-array[%u]", JSArray::cast(object)->length());
+    } else if (object->IsJSObject()) {
+      Print("JS-Object");
+    } else {
+      Print("?UNKNOWN?");
+    }
+  } else if (object->IsFixedArray()) {
+    Print("FixedArray");
+  } else {
+    Print("<unknown literal %p>", object);
+  }
+}
+
+
+void PrettyPrinter::PrintParameters(Scope* scope) {
+  Print("(");
+  for (int i = 0; i < scope->num_parameters(); i++) {
+    if (i  > 0) Print(", ");
+    PrintLiteral(scope->parameter(i)->name(), false);
+  }
+  Print(")");
+}
+
+
+void PrettyPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
+  for (int i = 0; i < declarations->length(); i++) {
+    if (i > 0) Print(" ");
+    Visit(declarations->at(i));
+  }
+}
+
+
+void PrettyPrinter::PrintFunctionLiteral(FunctionLiteral* function) {
+  Print("function ");
+  PrintLiteral(function->name(), false);
+  PrintParameters(function->scope());
+  Print(" { ");
+  PrintDeclarations(function->scope()->declarations());
+  PrintStatements(function->body());
+  Print(" }");
+}
+
+
+void PrettyPrinter::PrintCaseClause(CaseClause* clause) {
+  if (clause->is_default()) {
+    Print("default");
+  } else {
+    Print("case ");
+    Visit(clause->label());
+  }
+  Print(": ");
+  PrintStatements(clause->statements());
+  if (clause->statements()->length() > 0)
+    Print(" ");
+}
+
+
+//-----------------------------------------------------------------------------
+
+class IndentedScope BASE_EMBEDDED {
+ public:
+  IndentedScope() {
+    ast_printer_->inc_indent();
+  }
+
+  explicit IndentedScope(const char* txt, SmiAnalysis* type = NULL) {
+    ast_printer_->PrintIndented(txt);
+    if ((type != NULL) && (type->IsKnown())) {
+      ast_printer_->Print(" (type = ");
+      ast_printer_->Print(SmiAnalysis::Type2String(type));
+      ast_printer_->Print(")");
+    }
+    ast_printer_->Print("\n");
+    ast_printer_->inc_indent();
+  }
+
+  virtual ~IndentedScope() {
+    ast_printer_->dec_indent();
+  }
+
+  static void SetAstPrinter(AstPrinter* a) { ast_printer_ = a; }
+
+ private:
+  static AstPrinter* ast_printer_;
+};
+
+
+AstPrinter* IndentedScope::ast_printer_ = NULL;
+
+
+//-----------------------------------------------------------------------------
+
+int AstPrinter::indent_ = 0;
+
+
+AstPrinter::AstPrinter() {
+  ASSERT(indent_ == 0);
+  IndentedScope::SetAstPrinter(this);
+}
+
+
+AstPrinter::~AstPrinter() {
+  ASSERT(indent_ == 0);
+  IndentedScope::SetAstPrinter(NULL);
+}
+
+
+void AstPrinter::PrintIndented(const char* txt) {
+  for (int i = 0; i < indent_; i++) {
+    Print(". ");
+  }
+  Print(txt);
+}
+
+
+void AstPrinter::PrintLiteralIndented(const char* info,
+                                      Handle<Object> value,
+                                      bool quote) {
+  PrintIndented(info);
+  Print(" ");
+  PrintLiteral(value, quote);
+  Print("\n");
+}
+
+
+void AstPrinter::PrintLiteralWithModeIndented(const char* info,
+                                              Variable* var,
+                                              Handle<Object> value,
+                                              SmiAnalysis* type) {
+  if (var == NULL) {
+    PrintLiteralIndented(info, value, true);
+  } else {
+    EmbeddedVector<char, 256> buf;
+    if (type->IsKnown()) {
+      OS::SNPrintF(buf, "%s (mode = %s, type = %s)", info,
+                   Variable::Mode2String(var->mode()),
+                   SmiAnalysis::Type2String(type));
+    } else {
+      OS::SNPrintF(buf, "%s (mode = %s)", info,
+                   Variable::Mode2String(var->mode()));
+    }
+    PrintLiteralIndented(buf.start(), value, true);
+  }
+}
+
+
+void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
+  if (labels != NULL && labels->length() > 0) {
+    if (info == NULL) {
+      PrintIndented("LABELS ");
+    } else {
+      PrintIndented(info);
+      Print(" ");
+    }
+    PrintLabels(labels);
+  } else if (info != NULL) {
+    PrintIndented(info);
+  }
+  Print("\n");
+}
+
+
+void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
+  IndentedScope indent(s);
+  Visit(node);
+}
+
+
+const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
+  Init();
+  { IndentedScope indent("FUNC");
+    PrintLiteralIndented("NAME", program->name(), true);
+    PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true);
+    PrintParameters(program->scope());
+    PrintDeclarations(program->scope()->declarations());
+    PrintStatements(program->body());
+  }
+  return Output();
+}
+
+
+void AstPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
+  if (declarations->length() > 0) {
+    IndentedScope indent("DECLS");
+    for (int i = 0; i < declarations->length(); i++) {
+      Visit(declarations->at(i));
+    }
+  }
+}
+
+
+void AstPrinter::PrintParameters(Scope* scope) {
+  if (scope->num_parameters() > 0) {
+    IndentedScope indent("PARAMS");
+    for (int i = 0; i < scope->num_parameters(); i++) {
+      PrintLiteralWithModeIndented("VAR", scope->parameter(i),
+                                   scope->parameter(i)->name(),
+                                   scope->parameter(i)->type());
+    }
+  }
+}
+
+
+void AstPrinter::PrintStatements(ZoneList<Statement*>* statements) {
+  for (int i = 0; i < statements->length(); i++) {
+    Visit(statements->at(i));
+  }
+}
+
+
+void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
+  for (int i = 0; i < arguments->length(); i++) {
+    Visit(arguments->at(i));
+  }
+}
+
+
+void AstPrinter::PrintCaseClause(CaseClause* clause) {
+  if (clause->is_default()) {
+    IndentedScope indent("DEFAULT");
+    PrintStatements(clause->statements());
+  } else {
+    IndentedScope indent("CASE");
+    Visit(clause->label());
+    PrintStatements(clause->statements());
+  }
+}
+
+
+void AstPrinter::VisitBlock(Block* node) {
+  const char* block_txt = node->is_initializer_block() ? "BLOCK INIT" : "BLOCK";
+  IndentedScope indent(block_txt);
+  PrintStatements(node->statements());
+}
+
+
+void AstPrinter::VisitDeclaration(Declaration* node) {
+  if (node->fun() == NULL) {
+    // var or const declarations
+    PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
+                                 node->proxy()->AsVariable(),
+                                 node->proxy()->name(),
+                                 node->proxy()->AsVariable()->type());
+  } else {
+    // function declarations
+    PrintIndented("FUNCTION ");
+    PrintLiteral(node->proxy()->name(), true);
+    Print(" = function ");
+    PrintLiteral(node->fun()->name(), false);
+    Print("\n");
+  }
+}
+
+
+void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
+  Visit(node->expression());
+}
+
+
+void AstPrinter::VisitEmptyStatement(EmptyStatement* node) {
+  PrintIndented("EMPTY\n");
+}
+
+
+void AstPrinter::VisitIfStatement(IfStatement* node) {
+  PrintIndentedVisit("IF", node->condition());
+  PrintIndentedVisit("THEN", node->then_statement());
+  if (node->HasElseStatement()) {
+    PrintIndentedVisit("ELSE", node->else_statement());
+  }
+}
+
+
+void AstPrinter::VisitContinueStatement(ContinueStatement* node) {
+  PrintLabelsIndented("CONTINUE", node->target()->labels());
+}
+
+
+void AstPrinter::VisitBreakStatement(BreakStatement* node) {
+  PrintLabelsIndented("BREAK", node->target()->labels());
+}
+
+
+void AstPrinter::VisitReturnStatement(ReturnStatement* node) {
+  PrintIndentedVisit("RETURN", node->expression());
+}
+
+
+void AstPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
+  PrintIndentedVisit("WITH ENTER", node->expression());
+}
+
+
+void AstPrinter::VisitWithExitStatement(WithExitStatement* node) {
+  PrintIndented("WITH EXIT\n");
+}
+
+
+void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
+  IndentedScope indent("SWITCH");
+  PrintLabelsIndented(NULL, node->labels());
+  PrintIndentedVisit("TAG", node->tag());
+  for (int i = 0; i < node->cases()->length(); i++) {
+    PrintCaseClause(node->cases()->at(i));
+  }
+}
+
+
+void AstPrinter::VisitLoopStatement(LoopStatement* node) {
+  IndentedScope indent(node->OperatorString());
+  PrintLabelsIndented(NULL, node->labels());
+  if (node->init()) PrintIndentedVisit("INIT", node->init());
+  if (node->cond()) PrintIndentedVisit("COND", node->cond());
+  if (node->body()) PrintIndentedVisit("BODY", node->body());
+  if (node->next()) PrintIndentedVisit("NEXT", node->next());
+}
+
+
+void AstPrinter::VisitForInStatement(ForInStatement* node) {
+  IndentedScope indent("FOR IN");
+  PrintIndentedVisit("FOR", node->each());
+  PrintIndentedVisit("IN", node->enumerable());
+  PrintIndentedVisit("BODY", node->body());
+}
+
+
+void AstPrinter::VisitTryCatch(TryCatch* node) {
+  IndentedScope indent("TRY CATCH");
+  PrintIndentedVisit("TRY", node->try_block());
+  PrintIndentedVisit("CATCHVAR", node->catch_var());
+  PrintIndentedVisit("CATCH", node->catch_block());
+}
+
+
+void AstPrinter::VisitTryFinally(TryFinally* node) {
+  IndentedScope indent("TRY FINALLY");
+  PrintIndentedVisit("TRY", node->try_block());
+  PrintIndentedVisit("FINALLY", node->finally_block());
+}
+
+
+void AstPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
+  IndentedScope indent("DEBUGGER");
+}
+
+
+void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
+  IndentedScope indent("FUNC LITERAL");
+  PrintLiteralIndented("NAME", node->name(), false);
+  PrintLiteralIndented("INFERRED NAME", node->inferred_name(), false);
+  PrintParameters(node->scope());
+  // We don't want to see the function literal in this case: it
+  // will be printed via PrintProgram when the code for it is
+  // generated.
+  // PrintStatements(node->body());
+}
+
+
+void AstPrinter::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* node) {
+  IndentedScope indent("FUNC LITERAL");
+  PrintLiteralIndented("BOILERPLATE", node->boilerplate(), true);
+}
+
+
+void AstPrinter::VisitConditional(Conditional* node) {
+  IndentedScope indent("CONDITIONAL");
+  PrintIndentedVisit("?", node->condition());
+  PrintIndentedVisit("THEN", node->then_expression());
+  PrintIndentedVisit("ELSE", node->else_expression());
+}
+
+
+void AstPrinter::VisitLiteral(Literal* node) {
+  PrintLiteralIndented("LITERAL", node->handle(), true);
+}
+
+
+void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
+  IndentedScope indent("REGEXP LITERAL");
+  PrintLiteralIndented("PATTERN", node->pattern(), false);
+  PrintLiteralIndented("FLAGS", node->flags(), false);
+}
+
+
+void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
+  IndentedScope indent("OBJ LITERAL");
+  for (int i = 0; i < node->properties()->length(); i++) {
+    const char* prop_kind = NULL;
+    switch (node->properties()->at(i)->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        prop_kind = "PROPERTY - CONSTANT";
+        break;
+      case ObjectLiteral::Property::COMPUTED:
+        prop_kind = "PROPERTY - COMPUTED";
+        break;
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        prop_kind = "PROPERTY - MATERIALIZED_LITERAL";
+        break;
+      case ObjectLiteral::Property::PROTOTYPE:
+        prop_kind = "PROPERTY - PROTOTYPE";
+        break;
+      case ObjectLiteral::Property::GETTER:
+        prop_kind = "PROPERTY - GETTER";
+        break;
+      case ObjectLiteral::Property::SETTER:
+        prop_kind = "PROPERTY - SETTER";
+        break;
+      default:
+        UNREACHABLE();
+    }
+    IndentedScope prop(prop_kind);
+    PrintIndentedVisit("KEY", node->properties()->at(i)->key());
+    PrintIndentedVisit("VALUE", node->properties()->at(i)->value());
+  }
+}
+
+
+void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
+  IndentedScope indent("ARRAY LITERAL");
+  if (node->values()->length() > 0) {
+    IndentedScope indent("VALUES");
+    for (int i = 0; i < node->values()->length(); i++) {
+      Visit(node->values()->at(i));
+    }
+  }
+}
+
+
+void AstPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  IndentedScope indent("CatchExtensionObject");
+  PrintIndentedVisit("KEY", node->key());
+  PrintIndentedVisit("VALUE", node->value());
+}
+
+
+void AstPrinter::VisitSlot(Slot* node) {
+  PrintIndented("SLOT ");
+  switch (node->type()) {
+    case Slot::PARAMETER:
+      Print("parameter[%d]", node->index());
+      break;
+    case Slot::LOCAL:
+      Print("frame[%d]", node->index());
+      break;
+    case Slot::CONTEXT:
+      Print(".context[%d]", node->index());
+      break;
+    case Slot::LOOKUP:
+      Print(".context[");
+      PrintLiteral(node->var()->name(), false);
+      Print("]");
+      break;
+    default:
+      UNREACHABLE();
+  }
+  Print("\n");
+}
+
+
+void AstPrinter::VisitVariableProxy(VariableProxy* node) {
+  PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(),
+                               node->type());
+  Variable* var = node->var();
+  if (var != NULL && var->rewrite() != NULL) {
+    IndentedScope indent;
+    Visit(var->rewrite());
+  }
+}
+
+
+void AstPrinter::VisitAssignment(Assignment* node) {
+  IndentedScope indent(Token::Name(node->op()), node->type());
+  Visit(node->target());
+  Visit(node->value());
+}
+
+
+void AstPrinter::VisitThrow(Throw* node) {
+  PrintIndentedVisit("THROW", node->exception());
+}
+
+
+void AstPrinter::VisitProperty(Property* node) {
+  IndentedScope indent("PROPERTY");
+  Visit(node->obj());
+  Literal* literal = node->key()->AsLiteral();
+  if (literal != NULL && literal->handle()->IsSymbol()) {
+    PrintLiteralIndented("NAME", literal->handle(), false);
+  } else {
+    PrintIndentedVisit("KEY", node->key());
+  }
+}
+
+
+void AstPrinter::VisitCall(Call* node) {
+  IndentedScope indent("CALL");
+  Visit(node->expression());
+  PrintArguments(node->arguments());
+}
+
+
+void AstPrinter::VisitCallNew(CallNew* node) {
+  IndentedScope indent("CALL NEW");
+  Visit(node->expression());
+  PrintArguments(node->arguments());
+}
+
+
+void AstPrinter::VisitCallRuntime(CallRuntime* node) {
+  PrintLiteralIndented("CALL RUNTIME ", node->name(), false);
+  IndentedScope indent;
+  PrintArguments(node->arguments());
+}
+
+
+void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
+  PrintIndentedVisit(Token::Name(node->op()), node->expression());
+}
+
+
+void AstPrinter::VisitCountOperation(CountOperation* node) {
+  EmbeddedVector<char, 128> buf;
+  if (node->type()->IsKnown()) {
+    OS::SNPrintF(buf, "%s %s (type = %s)",
+                 (node->is_prefix() ? "PRE" : "POST"),
+                 Token::Name(node->op()),
+                 SmiAnalysis::Type2String(node->type()));
+  } else {
+    OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
+                 Token::Name(node->op()));
+  }
+  PrintIndentedVisit(buf.start(), node->expression());
+}
+
+
+void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
+  IndentedScope indent(Token::Name(node->op()), node->type());
+  Visit(node->left());
+  Visit(node->right());
+}
+
+
+void AstPrinter::VisitCompareOperation(CompareOperation* node) {
+  IndentedScope indent(Token::Name(node->op()), node->type());
+  Visit(node->left());
+  Visit(node->right());
+}
+
+
+void AstPrinter::VisitThisFunction(ThisFunction* node) {
+  IndentedScope indent("THIS-FUNCTION");
+}
+
+
+
+#endif  // DEBUG
+
+} }  // namespace v8::internal
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
new file mode 100644
index 0000000..8a6d1fb
--- /dev/null
+++ b/src/prettyprinter.h
@@ -0,0 +1,119 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PRETTYPRINTER_H_
+#define V8_PRETTYPRINTER_H_
+
+#include "ast.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef DEBUG
+
+class PrettyPrinter: public AstVisitor {
+ public:
+  PrettyPrinter();
+  virtual ~PrettyPrinter();
+
+  // The following routines print a node into a string.
+  // The result string is alive as long as the PrettyPrinter is alive.
+  const char* Print(AstNode* node);
+  const char* PrintExpression(FunctionLiteral* program);
+  const char* PrintProgram(FunctionLiteral* program);
+
+  // Print a node to stdout.
+  static void PrintOut(AstNode* node);
+
+  // Individual nodes
+#define DEF_VISIT(type)                         \
+  virtual void Visit##type(type* node);
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ private:
+  char* output_;  // output string buffer
+  int size_;  // output_ size
+  int pos_;  // current printing position
+
+ protected:
+  void Init();
+  void Print(const char* format, ...);
+  const char* Output() const { return output_; }
+
+  virtual void PrintStatements(ZoneList<Statement*>* statements);
+  void PrintLabels(ZoneStringList* labels);
+  virtual void PrintArguments(ZoneList<Expression*>* arguments);
+  void PrintLiteral(Handle<Object> value, bool quote);
+  void PrintParameters(Scope* scope);
+  void PrintDeclarations(ZoneList<Declaration*>* declarations);
+  void PrintFunctionLiteral(FunctionLiteral* function);
+  void PrintCaseClause(CaseClause* clause);
+};
+
+
+// Prints the AST structure
+class AstPrinter: public PrettyPrinter {
+ public:
+  AstPrinter();
+  virtual ~AstPrinter();
+
+  const char* PrintProgram(FunctionLiteral* program);
+
+  // Individual nodes
+#define DEF_VISIT(type)                         \
+  virtual void Visit##type(type* node);
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+ private:
+  friend class IndentedScope;
+  void PrintIndented(const char* txt);
+  void PrintIndentedVisit(const char* s, AstNode* node);
+
+  void PrintStatements(ZoneList<Statement*>* statements);
+  void PrintDeclarations(ZoneList<Declaration*>* declarations);
+  void PrintParameters(Scope* scope);
+  void PrintArguments(ZoneList<Expression*>* arguments);
+  void PrintCaseClause(CaseClause* clause);
+  void PrintLiteralIndented(const char* info, Handle<Object> value, bool quote);
+  void PrintLiteralWithModeIndented(const char* info,
+                                    Variable* var,
+                                    Handle<Object> value,
+                                    SmiAnalysis* type);
+  void PrintLabelsIndented(const char* info, ZoneStringList* labels);
+
+  void inc_indent() { indent_++; }
+  void dec_indent() { indent_--; }
+
+  static int indent_;
+};
+
+#endif  // DEBUG
+
+} }  // namespace v8::internal
+
+#endif  // V8_PRETTYPRINTER_H_
diff --git a/src/property.cc b/src/property.cc
new file mode 100644
index 0000000..caa7397
--- /dev/null
+++ b/src/property.cc
@@ -0,0 +1,96 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+#ifdef DEBUG
+void LookupResult::Print() {
+  if (!IsValid()) {
+    PrintF("Not Found\n");
+    return;
+  }
+
+  PrintF("LookupResult:\n");
+  PrintF(" -cacheable = %s\n", IsCacheable() ? "true" : "false");
+  PrintF(" -attributes = %x\n", GetAttributes());
+  switch (type()) {
+    case NORMAL:
+      PrintF(" -type = normal\n");
+      PrintF(" -entry = %d", GetDictionaryEntry());
+      break;
+    case MAP_TRANSITION:
+      PrintF(" -type = map transition\n");
+      PrintF(" -map:\n");
+      GetTransitionMap()->Print();
+      PrintF("\n");
+      break;
+    case CONSTANT_FUNCTION:
+      PrintF(" -type = constant function\n");
+      PrintF(" -function:\n");
+      GetConstantFunction()->Print();
+      PrintF("\n");
+      break;
+    case FIELD:
+      PrintF(" -type = field\n");
+      PrintF(" -index = %d", GetFieldIndex());
+      PrintF("\n");
+      break;
+    case CALLBACKS:
+      PrintF(" -type = call backs\n");
+      PrintF(" -callback object:\n");
+      GetCallbackObject()->Print();
+      break;
+    case INTERCEPTOR:
+      PrintF(" -type = lookup interceptor\n");
+      break;
+    case CONSTANT_TRANSITION:
+      PrintF(" -type = constant property transition\n");
+      break;
+    case NULL_DESCRIPTOR:
+      PrintF(" =type = null descriptor\n");
+      break;
+  }
+}
+
+
+void Descriptor::Print() {
+  PrintF("Descriptor ");
+  GetKey()->ShortPrint();
+  PrintF(" @ ");
+  GetValue()->ShortPrint();
+  PrintF(" %d\n", GetDetails().index());
+}
+
+
+#endif
+
+
+} }  // namespace v8::internal
diff --git a/src/property.h b/src/property.h
new file mode 100644
index 0000000..1869719
--- /dev/null
+++ b/src/property.h
@@ -0,0 +1,327 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PROPERTY_H_
+#define V8_PROPERTY_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Abstraction for elements in instance-descriptor arrays.
+//
+// Each descriptor has a key, property attributes, property type,
+// property index (in the actual instance-descriptor array) and
+// optionally a piece of data.
+//
+
+class Descriptor BASE_EMBEDDED {
+ public:
+  static int IndexFromValue(Object* value) {
+    return Smi::cast(value)->value();
+  }
+
+  Object* KeyToSymbol() {
+    if (!StringShape(key_).IsSymbol()) {
+      Object* result = Heap::LookupSymbol(key_);
+      if (result->IsFailure()) return result;
+      key_ = String::cast(result);
+    }
+    return key_;
+  }
+
+  String* GetKey() { return key_; }
+  Object* GetValue() { return value_; }
+  PropertyDetails GetDetails() { return details_; }
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+  void SetEnumerationIndex(int index) {
+    ASSERT(PropertyDetails::IsValidIndex(index));
+    details_ = PropertyDetails(details_.attributes(), details_.type(), index);
+  }
+
+ private:
+  String* key_;
+  Object* value_;
+  PropertyDetails details_;
+
+ protected:
+  Descriptor() : details_(Smi::FromInt(0)) {}
+
+  void Init(String* key, Object* value, PropertyDetails details) {
+    key_ = key;
+    value_ = value;
+    details_ = details;
+  }
+
+  Descriptor(String* key, Object* value, PropertyDetails details)
+      : key_(key),
+        value_(value),
+        details_(details) { }
+
+  Descriptor(String* key,
+             Object* value,
+             PropertyAttributes attributes,
+             PropertyType type,
+             int index = 0)
+      : key_(key),
+        value_(value),
+        details_(attributes, type, index) { }
+
+  friend class DescriptorArray;
+};
+
+// A pointer from a map to the new map that is created by adding
+// a named property.  These are key to the speed and functioning of V8.
+// The two maps should always have the same prototype, since
+// MapSpace::CreateBackPointers depends on this.
+class MapTransitionDescriptor: public Descriptor {
+ public:
+  MapTransitionDescriptor(String* key, Map* map, PropertyAttributes attributes)
+      : Descriptor(key, map, attributes, MAP_TRANSITION) { }
+};
+
+// Marks a field name in a map so that adding the field is guaranteed
+// to create a FIELD descriptor in the new map.  Used after adding
+// a constant function the first time, creating a CONSTANT_FUNCTION
+// descriptor in the new map.  This avoids creating multiple maps with
+// the same CONSTANT_FUNCTION field.
+class ConstTransitionDescriptor: public Descriptor {
+ public:
+  explicit ConstTransitionDescriptor(String* key)
+      : Descriptor(key, Smi::FromInt(0), NONE, CONSTANT_TRANSITION) { }
+};
+
+
+class FieldDescriptor: public Descriptor {
+ public:
+  FieldDescriptor(String* key,
+                  int field_index,
+                  PropertyAttributes attributes,
+                  int index = 0)
+      : Descriptor(key, Smi::FromInt(field_index), attributes, FIELD, index) {}
+};
+
+
+class ConstantFunctionDescriptor: public Descriptor {
+ public:
+  ConstantFunctionDescriptor(String* key,
+                             JSFunction* function,
+                             PropertyAttributes attributes,
+                             int index = 0)
+      : Descriptor(key, function, attributes, CONSTANT_FUNCTION, index) {}
+};
+
+
+class CallbacksDescriptor:  public Descriptor {
+ public:
+  CallbacksDescriptor(String* key,
+                      Object* proxy,
+                      PropertyAttributes attributes,
+                      int index = 0)
+      : Descriptor(key, proxy, attributes, CALLBACKS, index) {}
+};
+
+
+class LookupResult BASE_EMBEDDED {
+ public:
+  // Where did we find the result;
+  enum {
+    NOT_FOUND,
+    DESCRIPTOR_TYPE,
+    DICTIONARY_TYPE,
+    INTERCEPTOR_TYPE,
+    CONSTANT_TYPE
+  } lookup_type_;
+
+  LookupResult()
+      : lookup_type_(NOT_FOUND),
+        cacheable_(true),
+        details_(NONE, NORMAL) {}
+
+  void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
+    lookup_type_ = DESCRIPTOR_TYPE;
+    holder_ = holder;
+    details_ = details;
+    number_ = number;
+  }
+
+  void ConstantResult(JSObject* holder) {
+    lookup_type_ = CONSTANT_TYPE;
+    holder_ = holder;
+    details_ =
+        PropertyDetails(static_cast<PropertyAttributes>(DONT_ENUM |
+                                                        DONT_DELETE),
+                        CALLBACKS);
+    number_ = -1;
+  }
+
+  void DictionaryResult(JSObject* holder, int entry) {
+    lookup_type_ = DICTIONARY_TYPE;
+    holder_ = holder;
+    details_ = holder->property_dictionary()->DetailsAt(entry);
+    number_ = entry;
+  }
+
+  void InterceptorResult(JSObject* holder) {
+    lookup_type_ = INTERCEPTOR_TYPE;
+    holder_ = holder;
+    details_ = PropertyDetails(NONE, INTERCEPTOR);
+  }
+
+  void NotFound() {
+    lookup_type_ = NOT_FOUND;
+  }
+
+  JSObject* holder() {
+    ASSERT(IsValid());
+    return holder_;
+  }
+
+  PropertyType type() {
+    ASSERT(IsValid());
+    return details_.type();
+  }
+
+  bool IsTransitionType() {
+    PropertyType t = type();
+    if (t == MAP_TRANSITION || t == CONSTANT_TRANSITION) return true;
+    return false;
+  }
+
+  PropertyAttributes GetAttributes() {
+    ASSERT(IsValid());
+    return details_.attributes();
+  }
+
+  PropertyDetails GetPropertyDetails() {
+    return details_;
+  }
+
+  bool IsReadOnly() { return details_.IsReadOnly(); }
+  bool IsDontDelete() { return details_.IsDontDelete(); }
+  bool IsDontEnum() { return details_.IsDontEnum(); }
+  bool IsDeleted() { return details_.IsDeleted(); }
+
+  bool IsValid() { return  lookup_type_ != NOT_FOUND; }
+  bool IsNotFound() { return lookup_type_ == NOT_FOUND; }
+
+  // Tells whether the result is a property.
+  // Excluding transitions and the null descriptor.
+  bool IsProperty() {
+    return IsValid() && type() < FIRST_PHANTOM_PROPERTY_TYPE;
+  }
+
+  bool IsCacheable() { return cacheable_; }
+  void DisallowCaching() { cacheable_ = false; }
+
+  // Tells whether the value needs to be loaded.
+  bool IsLoaded() {
+    if (lookup_type_ == DESCRIPTOR_TYPE || lookup_type_ == DICTIONARY_TYPE) {
+      Object* target = GetLazyValue();
+      return !target->IsJSObject() || JSObject::cast(target)->IsLoaded();
+    }
+    return true;
+  }
+
+  Object* GetLazyValue() {
+    switch (type()) {
+      case FIELD:
+        return holder()->FastPropertyAt(GetFieldIndex());
+      case NORMAL: {
+        Object* value;
+        value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
+        if (holder()->IsGlobalObject()) {
+          value = JSGlobalPropertyCell::cast(value)->value();
+        }
+        return value;
+      }
+      case CONSTANT_FUNCTION:
+        return GetConstantFunction();
+      default:
+        return Smi::FromInt(0);
+    }
+  }
+
+  Map* GetTransitionMap() {
+    ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+    ASSERT(type() == MAP_TRANSITION);
+    return Map::cast(GetValue());
+  }
+
+  int GetFieldIndex() {
+    ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+    ASSERT(type() == FIELD);
+    return Descriptor::IndexFromValue(GetValue());
+  }
+
+  int GetDictionaryEntry() {
+    ASSERT(lookup_type_ == DICTIONARY_TYPE);
+    return number_;
+  }
+
+  JSFunction* GetConstantFunction() {
+    ASSERT(type() == CONSTANT_FUNCTION);
+    return JSFunction::cast(GetValue());
+  }
+
+  Object* GetCallbackObject() {
+    if (lookup_type_ == CONSTANT_TYPE) {
+      // For now we only have the __proto__ as constant type.
+      return Heap::prototype_accessors();
+    }
+    return GetValue();
+  }
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+  Object* GetValue() {
+    if (lookup_type_ == DESCRIPTOR_TYPE) {
+      DescriptorArray* descriptors = holder()->map()->instance_descriptors();
+      return descriptors->GetValue(number_);
+    }
+    // In the dictionary case, the data is held in the value field.
+    ASSERT(lookup_type_ == DICTIONARY_TYPE);
+    return holder()->GetNormalizedProperty(this);
+  }
+
+ private:
+  JSObject* holder_;
+  int number_;
+  bool cacheable_;
+  PropertyDetails details_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_PROPERTY_H_
diff --git a/src/regexp-delay.js b/src/regexp-delay.js
new file mode 100644
index 0000000..14c3644
--- /dev/null
+++ b/src/regexp-delay.js
@@ -0,0 +1,412 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Expect $Object = global.Object;
+// Expect $Array = global.Array;
+
+const $RegExp = global.RegExp;
+
+// A recursive descent parser for Patterns according to the grammar of
+// ECMA-262 15.10.1, with deviations noted below.
+function DoConstructRegExp(object, pattern, flags, isConstructorCall) {
+  // RegExp : Called as constructor; see ECMA-262, section 15.10.4.
+  if (IS_REGEXP(pattern)) {
+    if (!IS_UNDEFINED(flags)) {
+      throw MakeTypeError('regexp_flags', []);
+    }
+    flags = (pattern.global ? 'g' : '')
+        + (pattern.ignoreCase ? 'i' : '')
+        + (pattern.multiline ? 'm' : '');
+    pattern = pattern.source;
+  }
+
+  pattern = IS_UNDEFINED(pattern) ? '' : ToString(pattern);
+  flags = IS_UNDEFINED(flags) ? '' : ToString(flags);
+
+  var global = false;
+  var ignoreCase = false;
+  var multiline = false;
+
+  for (var i = 0; i < flags.length; i++) {
+    var c = StringCharAt.call(flags, i);
+    switch (c) {
+      case 'g':
+        // Allow duplicate flags to be consistent with JSC and others.
+        global = true;
+        break;
+      case 'i':
+        ignoreCase = true;
+        break;
+      case 'm':
+        multiline = true;
+        break;
+      default:
+        // Ignore flags that have no meaning to be consistent with
+        // JSC.
+        break;
+    }
+  }
+
+  if (isConstructorCall) {
+    // ECMA-262, section 15.10.7.1.
+    %SetProperty(object, 'source', pattern,
+                 DONT_DELETE |  READ_ONLY | DONT_ENUM);
+
+    // ECMA-262, section 15.10.7.2.
+    %SetProperty(object, 'global', global, DONT_DELETE | READ_ONLY | DONT_ENUM);
+
+    // ECMA-262, section 15.10.7.3.
+    %SetProperty(object, 'ignoreCase', ignoreCase,
+                 DONT_DELETE | READ_ONLY | DONT_ENUM);
+
+    // ECMA-262, section 15.10.7.4.
+    %SetProperty(object, 'multiline', multiline,
+                 DONT_DELETE | READ_ONLY | DONT_ENUM);
+
+    // ECMA-262, section 15.10.7.5.
+    %SetProperty(object, 'lastIndex', 0, DONT_DELETE | DONT_ENUM);
+  } else { // RegExp is being recompiled via RegExp.prototype.compile.
+    %IgnoreAttributesAndSetProperty(object, 'source', pattern);
+    %IgnoreAttributesAndSetProperty(object, 'global', global);
+    %IgnoreAttributesAndSetProperty(object, 'ignoreCase', ignoreCase);
+    %IgnoreAttributesAndSetProperty(object, 'multiline', multiline);
+    %IgnoreAttributesAndSetProperty(object, 'lastIndex', 0);
+  }
+
+  // Call internal function to compile the pattern.
+  %RegExpCompile(object, pattern, flags);
+}
+
+
+function RegExpConstructor(pattern, flags) {
+  if (%_IsConstructCall()) {
+    DoConstructRegExp(this, pattern, flags, true);
+  } else {
+    // RegExp : Called as function; see ECMA-262, section 15.10.3.1.
+    if (IS_REGEXP(pattern) && IS_UNDEFINED(flags)) {
+      return pattern;
+    }
+    return new $RegExp(pattern, flags);
+  }
+}
+
+
+// Deprecated RegExp.prototype.compile method.  We behave like the constructor
+// were called again.  In SpiderMonkey, this method returns the regexp object.
+// In JSC, it returns undefined.  For compatibility with JSC, we match their
+// behavior.
+function CompileRegExp(pattern, flags) {
+  // Both JSC and SpiderMonkey treat a missing pattern argument as the
+  // empty subject string, and an actual undefined value passed as the
+  // pattern as the string 'undefined'.  Note that JSC is inconsistent
+  // here, treating undefined values differently in
+  // RegExp.prototype.compile and in the constructor, where they are
+  // the empty string.  For compatibility with JSC, we match their
+  // behavior.
+  if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
+    DoConstructRegExp(this, 'undefined', flags, false);
+  } else {
+    DoConstructRegExp(this, pattern, flags, false);
+  }
+}
+
+
+function DoRegExpExec(regexp, string, index) {
+  return %RegExpExec(regexp, string, index, lastMatchInfo);
+}
+
+
+function DoRegExpExecGlobal(regexp, string) {
+  // Returns an array of arrays of substring indices.
+  return %RegExpExecGlobal(regexp, string, lastMatchInfo);
+}
+
+
+function RegExpExec(string) {
+  if (!IS_REGEXP(this)) {
+    throw MakeTypeError('method_called_on_incompatible',
+                        ['RegExp.prototype.exec', this]);
+  }
+  if (%_ArgumentsLength() == 0) {
+    var regExpInput = LAST_INPUT(lastMatchInfo);
+    if (IS_UNDEFINED(regExpInput)) {
+      throw MakeError('no_input_to_regexp', [this]);
+    }
+    string = regExpInput;
+  }
+  var s = ToString(string);
+  var length = s.length;
+  var lastIndex = this.lastIndex;
+  var i = this.global ? TO_INTEGER(lastIndex) : 0;
+
+  if (i < 0 || i > s.length) {
+    this.lastIndex = 0;
+    return null;
+  }
+
+  %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
+  // matchIndices is either null or the lastMatchInfo array.
+  var matchIndices = %RegExpExec(this, s, i, lastMatchInfo);
+
+  if (matchIndices == null) {
+    if (this.global) this.lastIndex = 0;
+    return matchIndices; // no match
+  }
+
+  var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
+  var result = new $Array(numResults);
+  for (var i = 0; i < numResults; i++) {
+    var matchStart = lastMatchInfo[CAPTURE(i << 1)];
+    var matchEnd = lastMatchInfo[CAPTURE((i << 1) + 1)];
+    if (matchStart != -1 && matchEnd != -1) {
+      result[i] = SubString(s, matchStart, matchEnd);
+    } else {
+      // Make sure the element is present. Avoid reading the undefined
+      // property from the global object since this may change.
+      result[i] = void 0;
+    }
+  }
+
+  if (this.global)
+    this.lastIndex = lastMatchInfo[CAPTURE1];
+  result.index = lastMatchInfo[CAPTURE0];
+  result.input = s;
+  return result;
+}
+
+
+// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
+// that test is defined in terms of String.prototype.exec. However, it probably
+// means the original value of String.prototype.exec, which is what everybody
+// else implements.
+function RegExpTest(string) {
+  if (!IS_REGEXP(this)) {
+    throw MakeTypeError('method_called_on_incompatible',
+                        ['RegExp.prototype.test', this]);
+  }
+  if (%_ArgumentsLength() == 0) {
+    var regExpInput = LAST_INPUT(lastMatchInfo);
+    if (IS_UNDEFINED(regExpInput)) {
+      throw MakeError('no_input_to_regexp', [this]);
+    }
+    string = regExpInput;
+  }
+  var s = ToString(string);
+  var length = s.length;
+  var lastIndex = this.lastIndex;
+  var i = this.global ? TO_INTEGER(lastIndex) : 0;
+
+  if (i < 0 || i > s.length) {
+    this.lastIndex = 0;
+    return false;
+  }
+
+  %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
+  // matchIndices is either null or the lastMatchInfo array.
+  var matchIndices = %RegExpExec(this, s, i, lastMatchInfo);
+
+  if (matchIndices == null) {
+    if (this.global) this.lastIndex = 0;
+    return false;
+  }
+
+  if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
+  return true;
+}
+
+
+function RegExpToString() {
+  // If this.source is an empty string, output /(?:)/.
+  // http://bugzilla.mozilla.org/show_bug.cgi?id=225550
+  // ecma_2/RegExp/properties-001.js.
+  var src = this.source ? this.source : '(?:)';
+  var result = '/' + src + '/';
+  if (this.global)
+    result += 'g';
+  if (this.ignoreCase)
+    result += 'i';
+  if (this.multiline)
+    result += 'm';
+  return result;
+}
+
+
+// Getters for the static properties lastMatch, lastParen, leftContext, and
+// rightContext of the RegExp constructor.  The properties are computed based
+// on the captures array of the last successful match and the subject string
+// of the last successful match.
+function RegExpGetLastMatch() {
+  var regExpSubject = LAST_SUBJECT(lastMatchInfo);
+  return SubString(regExpSubject,
+                   lastMatchInfo[CAPTURE0],
+                   lastMatchInfo[CAPTURE1]);
+}
+
+
+function RegExpGetLastParen() {
+  var length = NUMBER_OF_CAPTURES(lastMatchInfo);
+  if (length <= 2) return '';  // There were no captures.
+  // We match the SpiderMonkey behavior: return the substring defined by the
+  // last pair (after the first pair) of elements of the capture array even if
+  // it is empty.
+  var regExpSubject = LAST_SUBJECT(lastMatchInfo);
+  var start = lastMatchInfo[CAPTURE(length - 2)];
+  var end = lastMatchInfo[CAPTURE(length - 1)];
+  if (start != -1 && end != -1) {
+    return SubString(regExpSubject, start, end);
+  }
+  return "";
+}
+
+
+function RegExpGetLeftContext() {
+  return SubString(LAST_SUBJECT(lastMatchInfo),
+                   0,
+                   lastMatchInfo[CAPTURE0]);
+}
+
+
+function RegExpGetRightContext() {
+  var subject = LAST_SUBJECT(lastMatchInfo);
+  return SubString(subject,
+                   lastMatchInfo[CAPTURE1],
+                   subject.length);
+}
+
+
+// The properties $1..$9 are the first nine capturing substrings of the last
+// successful match, or ''.  The function RegExpMakeCaptureGetter will be
+// called with indices from 1 to 9.
+function RegExpMakeCaptureGetter(n) {
+  return function() {
+    var index = n * 2;
+    if (index >= NUMBER_OF_CAPTURES(lastMatchInfo)) return '';
+    var matchStart = lastMatchInfo[CAPTURE(index)];
+    var matchEnd = lastMatchInfo[CAPTURE(index + 1)];
+    if (matchStart == -1 || matchEnd == -1) return '';
+    return SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd);
+  };
+}
+
+
+// Property of the builtins object for recording the result of the last
+// regexp match.  The property lastMatchInfo includes the matchIndices
+// array of the last successful regexp match (an array of start/end index
+// pairs for the match and all the captured substrings), the invariant is
+// that there are at least two capture indeces.  The array also contains
+// the subject string for the last successful match.
+var lastMatchInfo = [
+    2,                 // REGEXP_NUMBER_OF_CAPTURES
+    "",                // Last subject.
+    void 0,            // Last input - settable with RegExpSetInput.
+    0,                 // REGEXP_FIRST_CAPTURE + 0
+    0,                 // REGEXP_FIRST_CAPTURE + 1
+];
+
+// -------------------------------------------------------------------
+
+function SetupRegExp() {
+  %FunctionSetInstanceClassName($RegExp, 'RegExp');
+  %FunctionSetPrototype($RegExp, new $Object());
+  %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
+  %SetCode($RegExp, RegExpConstructor);
+
+  InstallFunctions($RegExp.prototype, DONT_ENUM, $Array(
+    "exec", RegExpExec,
+    "test", RegExpTest,
+    "toString", RegExpToString,
+    "compile", CompileRegExp
+  ));
+
+  // The length of compile is 1 in SpiderMonkey.
+  %FunctionSetLength($RegExp.prototype.compile, 1);
+
+  // The properties input, $input, and $_ are aliases for each other.  When this
+  // value is set the value it is set to is coerced to a string. 
+  // Getter and setter for the input.
+  function RegExpGetInput() {
+    var regExpInput = LAST_INPUT(lastMatchInfo);
+    return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
+  }
+  function RegExpSetInput(string) {
+    LAST_INPUT(lastMatchInfo) = ToString(string);
+  };
+
+  %DefineAccessor($RegExp, 'input', GETTER, RegExpGetInput, DONT_DELETE);
+  %DefineAccessor($RegExp, 'input', SETTER, RegExpSetInput, DONT_DELETE);
+  %DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
+
+  // The properties multiline and $* are aliases for each other.  When this
+  // value is set in SpiderMonkey, the value it is set to is coerced to a
+  // boolean.  We mimic that behavior with a slight difference: in SpiderMonkey
+  // the value of the expression 'RegExp.multiline = null' (for instance) is the
+  // boolean false (ie, the value after coercion), while in V8 it is the value
+  // null (ie, the value before coercion).
+
+  // Getter and setter for multiline.
+  var multiline = false;
+  function RegExpGetMultiline() { return multiline; };
+  function RegExpSetMultiline(flag) { multiline = flag ? true : false; };
+
+  %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline, DONT_DELETE);
+  %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline, DONT_DELETE);
+  %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline, DONT_ENUM | DONT_DELETE);
+
+
+  function NoOpSetter(ignored) {}
+
+
+  // Static properties set by a successful match.
+  %DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch, DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastMatch', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$&', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen, DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastParen', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$+', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext, DONT_DELETE);
+  %DefineAccessor($RegExp, 'leftContext', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$`', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext, DONT_DELETE);
+  %DefineAccessor($RegExp, 'rightContext', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, "$'", SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+
+  for (var i = 1; i < 10; ++i) {
+    %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i), DONT_DELETE);
+    %DefineAccessor($RegExp, '$' + i, SETTER, NoOpSetter, DONT_DELETE);
+  }
+}
+
+
+SetupRegExp();
diff --git a/src/regexp-macro-assembler-irregexp-inl.h b/src/regexp-macro-assembler-irregexp-inl.h
new file mode 100644
index 0000000..b487468
--- /dev/null
+++ b/src/regexp-macro-assembler-irregexp-inl.h
@@ -0,0 +1,78 @@
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A light-weight assembler for the Irregexp byte code.
+
+
+#include "v8.h"
+#include "ast.h"
+#include "bytecodes-irregexp.h"
+
+#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
+#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_NATIVE_REGEXP
+
+void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
+                                        uint32_t twenty_four_bits) {
+  uint32_t word = ((twenty_four_bits << BYTECODE_SHIFT) | byte);
+  ASSERT(pc_ <= buffer_.length());
+  if (pc_  + 3 >= buffer_.length()) {
+    Expand();
+  }
+  *reinterpret_cast<uint32_t*>(buffer_.start() + pc_) = word;
+  pc_ += 4;
+}
+
+
+void RegExpMacroAssemblerIrregexp::Emit16(uint32_t word) {
+  ASSERT(pc_ <= buffer_.length());
+  if (pc_ + 1 >= buffer_.length()) {
+    Expand();
+  }
+  *reinterpret_cast<uint16_t*>(buffer_.start() + pc_) = word;
+  pc_ += 2;
+}
+
+
+void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
+  ASSERT(pc_ <= buffer_.length());
+  if (pc_ + 3 >= buffer_.length()) {
+    Expand();
+  }
+  *reinterpret_cast<uint32_t*>(buffer_.start() + pc_) = word;
+  pc_ += 4;
+}
+
+#endif  // ! V8_NATIVE_REGEXP
+
+} }  // namespace v8::internal
+
+#endif  // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
diff --git a/src/regexp-macro-assembler-irregexp.cc b/src/regexp-macro-assembler-irregexp.cc
new file mode 100644
index 0000000..f9c7eee
--- /dev/null
+++ b/src/regexp-macro-assembler-irregexp.cc
@@ -0,0 +1,464 @@
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "ast.h"
+#include "bytecodes-irregexp.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-macro-assembler-irregexp.h"
+#include "regexp-macro-assembler-irregexp-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_NATIVE_REGEXP
+
+RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
+    : buffer_(buffer),
+      pc_(0),
+      own_buffer_(false),
+      advance_current_end_(kInvalidPC) {
+}
+
+
+RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
+  if (backtrack_.is_linked()) backtrack_.Unuse();
+  if (own_buffer_) buffer_.Dispose();
+}
+
+
+RegExpMacroAssemblerIrregexp::IrregexpImplementation
+RegExpMacroAssemblerIrregexp::Implementation() {
+  return kBytecodeImplementation;
+}
+
+
+void RegExpMacroAssemblerIrregexp::Bind(Label* l) {
+  advance_current_end_ = kInvalidPC;
+  ASSERT(!l->is_bound());
+  if (l->is_linked()) {
+    int pos = l->pos();
+    while (pos != 0) {
+      int fixup = pos;
+      pos = *reinterpret_cast<int32_t*>(buffer_.start() + fixup);
+      *reinterpret_cast<uint32_t*>(buffer_.start() + fixup) = pc_;
+    }
+  }
+  l->bind_to(pc_);
+}
+
+
+void RegExpMacroAssemblerIrregexp::EmitOrLink(Label* l) {
+  if (l == NULL) l = &backtrack_;
+  if (l->is_bound()) {
+    Emit32(l->pos());
+  } else {
+    int pos = 0;
+    if (l->is_linked()) {
+      pos = l->pos();
+    }
+    l->link_to(pc_);
+    Emit32(pos);
+  }
+}
+
+
+void RegExpMacroAssemblerIrregexp::PopRegister(int register_index) {
+  ASSERT(register_index >= 0);
+  ASSERT(register_index <= kMaxRegister);
+  Emit(BC_POP_REGISTER, register_index);
+}
+
+
+void RegExpMacroAssemblerIrregexp::PushRegister(
+    int register_index,
+    StackCheckFlag check_stack_limit) {
+  ASSERT(register_index >= 0);
+  ASSERT(register_index <= kMaxRegister);
+  Emit(BC_PUSH_REGISTER, register_index);
+}
+
+
+void RegExpMacroAssemblerIrregexp::WriteCurrentPositionToRegister(
+    int register_index, int cp_offset) {
+  ASSERT(register_index >= 0);
+  ASSERT(register_index <= kMaxRegister);
+  Emit(BC_SET_REGISTER_TO_CP, register_index);
+  Emit32(cp_offset);  // Current position offset.
+}
+
+
+void RegExpMacroAssemblerIrregexp::ClearRegisters(int reg_from, int reg_to) {
+  ASSERT(reg_from <= reg_to);
+  for (int reg = reg_from; reg <= reg_to; reg++) {
+    SetRegister(reg, -1);
+  }
+}
+
+
+void RegExpMacroAssemblerIrregexp::ReadCurrentPositionFromRegister(
+    int register_index) {
+  ASSERT(register_index >= 0);
+  ASSERT(register_index <= kMaxRegister);
+  Emit(BC_SET_CP_TO_REGISTER, register_index);
+}
+
+
+void RegExpMacroAssemblerIrregexp::WriteStackPointerToRegister(
+    int register_index) {
+  ASSERT(register_index >= 0);
+  ASSERT(register_index <= kMaxRegister);
+  Emit(BC_SET_REGISTER_TO_SP, register_index);
+}
+
+
+void RegExpMacroAssemblerIrregexp::ReadStackPointerFromRegister(
+    int register_index) {
+  ASSERT(register_index >= 0);
+  ASSERT(register_index <= kMaxRegister);
+  Emit(BC_SET_SP_TO_REGISTER, register_index);
+}
+
+
+void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) {
+  ASSERT(register_index >= 0);
+  ASSERT(register_index <= kMaxRegister);
+  Emit(BC_SET_REGISTER, register_index);
+  Emit32(to);
+}
+
+
+void RegExpMacroAssemblerIrregexp::AdvanceRegister(int register_index, int by) {
+  ASSERT(register_index >= 0);
+  ASSERT(register_index <= kMaxRegister);
+  Emit(BC_ADVANCE_REGISTER, register_index);
+  Emit32(by);
+}
+
+
+void RegExpMacroAssemblerIrregexp::PopCurrentPosition() {
+  Emit(BC_POP_CP, 0);
+}
+
+
+void RegExpMacroAssemblerIrregexp::PushCurrentPosition() {
+  Emit(BC_PUSH_CP, 0);
+}
+
+
+void RegExpMacroAssemblerIrregexp::Backtrack() {
+  Emit(BC_POP_BT, 0);
+}
+
+
+void RegExpMacroAssemblerIrregexp::GoTo(Label* l) {
+  if (advance_current_end_ == pc_) {
+    // Combine advance current and goto.
+    pc_ = advance_current_start_;
+    Emit(BC_ADVANCE_CP_AND_GOTO, advance_current_offset_);
+    EmitOrLink(l);
+    advance_current_end_ = kInvalidPC;
+  } else {
+    // Regular goto.
+    Emit(BC_GOTO, 0);
+    EmitOrLink(l);
+  }
+}
+
+
+void RegExpMacroAssemblerIrregexp::PushBacktrack(Label* l) {
+  Emit(BC_PUSH_BT, 0);
+  EmitOrLink(l);
+}
+
+
+void RegExpMacroAssemblerIrregexp::Succeed() {
+  Emit(BC_SUCCEED, 0);
+}
+
+
+void RegExpMacroAssemblerIrregexp::Fail() {
+  Emit(BC_FAIL, 0);
+}
+
+
+void RegExpMacroAssemblerIrregexp::AdvanceCurrentPosition(int by) {
+  ASSERT(by >= kMinCPOffset);
+  ASSERT(by <= kMaxCPOffset);
+  advance_current_start_ = pc_;
+  advance_current_offset_ = by;
+  Emit(BC_ADVANCE_CP, by);
+  advance_current_end_ = pc_;
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckGreedyLoop(
+      Label* on_tos_equals_current_position) {
+  Emit(BC_CHECK_GREEDY, 0);
+  EmitOrLink(on_tos_equals_current_position);
+}
+
+
+void RegExpMacroAssemblerIrregexp::LoadCurrentCharacter(int cp_offset,
+                                                        Label* on_failure,
+                                                        bool check_bounds,
+                                                        int characters) {
+  ASSERT(cp_offset >= kMinCPOffset);
+  ASSERT(cp_offset <= kMaxCPOffset);
+  int bytecode;
+  if (check_bounds) {
+    if (characters == 4) {
+      bytecode = BC_LOAD_4_CURRENT_CHARS;
+    } else if (characters == 2) {
+      bytecode = BC_LOAD_2_CURRENT_CHARS;
+    } else {
+      ASSERT(characters == 1);
+      bytecode = BC_LOAD_CURRENT_CHAR;
+    }
+  } else {
+    if (characters == 4) {
+      bytecode = BC_LOAD_4_CURRENT_CHARS_UNCHECKED;
+    } else if (characters == 2) {
+      bytecode = BC_LOAD_2_CURRENT_CHARS_UNCHECKED;
+    } else {
+      ASSERT(characters == 1);
+      bytecode = BC_LOAD_CURRENT_CHAR_UNCHECKED;
+    }
+  }
+  Emit(bytecode, cp_offset);
+  if (check_bounds) EmitOrLink(on_failure);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckCharacterLT(uc16 limit,
+                                                    Label* on_less) {
+  Emit(BC_CHECK_LT, limit);
+  EmitOrLink(on_less);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckCharacterGT(uc16 limit,
+                                                    Label* on_greater) {
+  Emit(BC_CHECK_GT, limit);
+  EmitOrLink(on_greater);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckCharacter(uint32_t c, Label* on_equal) {
+  if (c > MAX_FIRST_ARG) {
+    Emit(BC_CHECK_4_CHARS, 0);
+    Emit32(c);
+  } else {
+    Emit(BC_CHECK_CHAR, c);
+  }
+  EmitOrLink(on_equal);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckAtStart(Label* on_at_start) {
+  Emit(BC_CHECK_AT_START, 0);
+  EmitOrLink(on_at_start);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckNotAtStart(Label* on_not_at_start) {
+  Emit(BC_CHECK_NOT_AT_START, 0);
+  EmitOrLink(on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckNotCharacter(uint32_t c,
+                                                     Label* on_not_equal) {
+  if (c > MAX_FIRST_ARG) {
+    Emit(BC_CHECK_NOT_4_CHARS, 0);
+    Emit32(c);
+  } else {
+    Emit(BC_CHECK_NOT_CHAR, c);
+  }
+  EmitOrLink(on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckCharacterAfterAnd(
+    uint32_t c,
+    uint32_t mask,
+    Label* on_equal) {
+  if (c > MAX_FIRST_ARG) {
+    Emit(BC_AND_CHECK_4_CHARS, 0);
+    Emit32(c);
+  } else {
+    Emit(BC_AND_CHECK_CHAR, c);
+  }
+  Emit32(mask);
+  EmitOrLink(on_equal);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterAnd(
+    uint32_t c,
+    uint32_t mask,
+    Label* on_not_equal) {
+  if (c > MAX_FIRST_ARG) {
+    Emit(BC_AND_CHECK_NOT_4_CHARS, 0);
+    Emit32(c);
+  } else {
+    Emit(BC_AND_CHECK_NOT_CHAR, c);
+  }
+  Emit32(mask);
+  EmitOrLink(on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterMinusAnd(
+    uc16 c,
+    uc16 minus,
+    uc16 mask,
+    Label* on_not_equal) {
+  Emit(BC_MINUS_AND_CHECK_NOT_CHAR, c);
+  Emit16(minus);
+  Emit16(mask);
+  EmitOrLink(on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
+                                                         Label* on_not_equal) {
+  ASSERT(start_reg >= 0);
+  ASSERT(start_reg <= kMaxRegister);
+  Emit(BC_CHECK_NOT_BACK_REF, start_reg);
+  EmitOrLink(on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
+    int start_reg,
+    Label* on_not_equal) {
+  ASSERT(start_reg >= 0);
+  ASSERT(start_reg <= kMaxRegister);
+  Emit(BC_CHECK_NOT_BACK_REF_NO_CASE, start_reg);
+  EmitOrLink(on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckNotRegistersEqual(int reg1,
+                                                          int reg2,
+                                                          Label* on_not_equal) {
+  ASSERT(reg1 >= 0);
+  ASSERT(reg1 <= kMaxRegister);
+  Emit(BC_CHECK_NOT_REGS_EQUAL, reg1);
+  Emit32(reg2);
+  EmitOrLink(on_not_equal);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckCharacters(
+  Vector<const uc16> str,
+  int cp_offset,
+  Label* on_failure,
+  bool check_end_of_string) {
+  ASSERT(cp_offset >= kMinCPOffset);
+  ASSERT(cp_offset + str.length() - 1 <= kMaxCPOffset);
+  // It is vital that this loop is backwards due to the unchecked character
+  // load below.
+  for (int i = str.length() - 1; i >= 0; i--) {
+    if (check_end_of_string && i == str.length() - 1) {
+      Emit(BC_LOAD_CURRENT_CHAR, cp_offset + i);
+      EmitOrLink(on_failure);
+    } else {
+      Emit(BC_LOAD_CURRENT_CHAR_UNCHECKED, cp_offset + i);
+    }
+    Emit(BC_CHECK_NOT_CHAR, str[i]);
+    EmitOrLink(on_failure);
+  }
+}
+
+
+void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
+                                                int comparand,
+                                                Label* on_less_than) {
+  ASSERT(register_index >= 0);
+  ASSERT(register_index <= kMaxRegister);
+  Emit(BC_CHECK_REGISTER_LT, register_index);
+  Emit32(comparand);
+  EmitOrLink(on_less_than);
+}
+
+
+void RegExpMacroAssemblerIrregexp::IfRegisterGE(int register_index,
+                                                int comparand,
+                                                Label* on_greater_or_equal) {
+  ASSERT(register_index >= 0);
+  ASSERT(register_index <= kMaxRegister);
+  Emit(BC_CHECK_REGISTER_GE, register_index);
+  Emit32(comparand);
+  EmitOrLink(on_greater_or_equal);
+}
+
+
+void RegExpMacroAssemblerIrregexp::IfRegisterEqPos(int register_index,
+                                                   Label* on_eq) {
+  ASSERT(register_index >= 0);
+  ASSERT(register_index <= kMaxRegister);
+  Emit(BC_CHECK_REGISTER_EQ_POS, register_index);
+  EmitOrLink(on_eq);
+}
+
+
+Handle<Object> RegExpMacroAssemblerIrregexp::GetCode(Handle<String> source) {
+  Bind(&backtrack_);
+  Emit(BC_POP_BT, 0);
+  Handle<ByteArray> array = Factory::NewByteArray(length());
+  Copy(array->GetDataStartAddress());
+  return array;
+}
+
+
+int RegExpMacroAssemblerIrregexp::length() {
+  return pc_;
+}
+
+
+void RegExpMacroAssemblerIrregexp::Copy(Address a) {
+  memcpy(a, buffer_.start(), length());
+}
+
+
+void RegExpMacroAssemblerIrregexp::Expand() {
+  bool old_buffer_was_our_own = own_buffer_;
+  Vector<byte> old_buffer = buffer_;
+  buffer_ = Vector<byte>::New(old_buffer.length() * 2);
+  own_buffer_ = true;
+  memcpy(buffer_.start(), old_buffer.start(), old_buffer.length());
+  if (old_buffer_was_our_own) {
+    old_buffer.Dispose();
+  }
+}
+
+#endif  // !V8_NATIVE_REGEXP
+
+} }  // namespace v8::internal
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
new file mode 100644
index 0000000..642a283
--- /dev/null
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -0,0 +1,141 @@
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
+#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_NATIVE_REGEXP
+
+class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  explicit RegExpMacroAssemblerIrregexp(Vector<byte>);
+  virtual ~RegExpMacroAssemblerIrregexp();
+  // The byte-code interpreter checks on each push anyway.
+  virtual int stack_limit_slack() { return 1; }
+  virtual void Bind(Label* label);
+  virtual void AdvanceCurrentPosition(int by);  // Signed cp change.
+  virtual void PopCurrentPosition();
+  virtual void PushCurrentPosition();
+  virtual void Backtrack();
+  virtual void GoTo(Label* label);
+  virtual void PushBacktrack(Label* label);
+  virtual void Succeed();
+  virtual void Fail();
+  virtual void PopRegister(int register_index);
+  virtual void PushRegister(int register_index,
+                            StackCheckFlag check_stack_limit);
+  virtual void AdvanceRegister(int reg, int by);  // r[reg] += by.
+  virtual void SetRegister(int register_index, int to);
+  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+  virtual void ClearRegisters(int reg_from, int reg_to);
+  virtual void ReadCurrentPositionFromRegister(int reg);
+  virtual void WriteStackPointerToRegister(int reg);
+  virtual void ReadStackPointerFromRegister(int reg);
+  virtual void LoadCurrentCharacter(int cp_offset,
+                                    Label* on_end_of_input,
+                                    bool check_bounds = true,
+                                    int characters = 1);
+  virtual void CheckCharacter(uint32_t c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(uint32_t c,
+                                      uint32_t mask,
+                                      Label* on_equal);
+  virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+  virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckNotAtStart(Label* on_not_at_start);
+  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(uint32_t c,
+                                         uint32_t mask,
+                                         Label* on_not_equal);
+  virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+                                              uc16 minus,
+                                              uc16 mask,
+                                              Label* on_not_equal);
+  virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+                                               Label* on_no_match);
+  virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+  virtual void CheckCharacters(Vector<const uc16> str,
+                               int cp_offset,
+                               Label* on_failure,
+                               bool check_end_of_string);
+  virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
+  virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
+  virtual void IfRegisterEqPos(int register_index, Label* if_eq);
+
+  virtual IrregexpImplementation Implementation();
+  virtual Handle<Object> GetCode(Handle<String> source);
+ private:
+  void Expand();
+  // Code and bitmap emission.
+  inline void EmitOrLink(Label* label);
+  inline void Emit32(uint32_t x);
+  inline void Emit16(uint32_t x);
+  inline void Emit(uint32_t bc, uint32_t arg);
+  // Bytecode buffer.
+  int length();
+  void Copy(Address a);
+
+  // The buffer into which code and relocation info are generated.
+  Vector<byte> buffer_;
+  // The program counter.
+  int pc_;
+  // True if the assembler owns the buffer, false if buffer is external.
+  bool own_buffer_;
+  Label backtrack_;
+
+  int advance_current_start_;
+  int advance_current_offset_;
+  int advance_current_end_;
+
+  static const int kInvalidPC = -1;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
+};
+
+#endif  // !V8_NATIVE_REGEXP
+
+} }  // namespace v8::internal
+
+#endif  // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
new file mode 100644
index 0000000..0aad337
--- /dev/null
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -0,0 +1,363 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "ast.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-macro-assembler-tracer.h"
+
+namespace v8 {
+namespace internal {
+
+RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
+    RegExpMacroAssembler* assembler) :
+  assembler_(assembler) {
+  unsigned int type = assembler->Implementation();
+  ASSERT(type < 3);
+  const char* impl_names[3] = {"IA32", "ARM", "Bytecode"};
+  PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
+}
+
+
+RegExpMacroAssemblerTracer::~RegExpMacroAssemblerTracer() {
+}
+
+
+void RegExpMacroAssemblerTracer::Bind(Label* label) {
+  PrintF("label[%08x]: (Bind)\n", label, label);
+  assembler_->Bind(label);
+}
+
+
+void RegExpMacroAssemblerTracer::AdvanceCurrentPosition(int by) {
+  PrintF(" AdvanceCurrentPosition(by=%d);\n", by);
+  assembler_->AdvanceCurrentPosition(by);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckGreedyLoop(Label* label) {
+  PrintF(" CheckGreedyLoop(label[%08x]);\n\n", label);
+  assembler_->CheckGreedyLoop(label);
+}
+
+
+void RegExpMacroAssemblerTracer::PopCurrentPosition() {
+  PrintF(" PopCurrentPosition();\n");
+  assembler_->PopCurrentPosition();
+}
+
+
+void RegExpMacroAssemblerTracer::PushCurrentPosition() {
+  PrintF(" PushCurrentPosition();\n");
+  assembler_->PushCurrentPosition();
+}
+
+
+void RegExpMacroAssemblerTracer::Backtrack() {
+  PrintF(" Backtrack();\n");
+  assembler_->Backtrack();
+}
+
+
+void RegExpMacroAssemblerTracer::GoTo(Label* label) {
+  PrintF(" GoTo(label[%08x]);\n\n", label);
+  assembler_->GoTo(label);
+}
+
+
+void RegExpMacroAssemblerTracer::PushBacktrack(Label* label) {
+  PrintF(" PushBacktrack(label[%08x]);\n",
+         label);
+  assembler_->PushBacktrack(label);
+}
+
+
+void RegExpMacroAssemblerTracer::Succeed() {
+  PrintF(" Succeed();\n");
+  assembler_->Succeed();
+}
+
+
+void RegExpMacroAssemblerTracer::Fail() {
+  PrintF(" Fail();\n");
+  assembler_->Fail();
+}
+
+
+void RegExpMacroAssemblerTracer::PopRegister(int register_index) {
+  PrintF(" PopRegister(register=%d);\n", register_index);
+  assembler_->PopRegister(register_index);
+}
+
+
+void RegExpMacroAssemblerTracer::PushRegister(
+    int register_index,
+    StackCheckFlag check_stack_limit) {
+  PrintF(" PushRegister(register=%d, %s);\n",
+         register_index,
+         check_stack_limit ? "check stack limit" : "");
+  assembler_->PushRegister(register_index, check_stack_limit);
+}
+
+
+void RegExpMacroAssemblerTracer::AdvanceRegister(int reg, int by) {
+  PrintF(" AdvanceRegister(register=%d, by=%d);\n", reg, by);
+  assembler_->AdvanceRegister(reg, by);
+}
+
+
+void RegExpMacroAssemblerTracer::SetRegister(int register_index, int to) {
+  PrintF(" SetRegister(register=%d, to=%d);\n", register_index, to);
+  assembler_->SetRegister(register_index, to);
+}
+
+
+void RegExpMacroAssemblerTracer::WriteCurrentPositionToRegister(int reg,
+                                                                int cp_offset) {
+  PrintF(" WriteCurrentPositionToRegister(register=%d,cp_offset=%d);\n",
+         reg,
+         cp_offset);
+  assembler_->WriteCurrentPositionToRegister(reg, cp_offset);
+}
+
+
+void RegExpMacroAssemblerTracer::ClearRegisters(int reg_from, int reg_to) {
+  PrintF(" ClearRegister(from=%d, to=%d);\n", reg_from, reg_to);
+  assembler_->ClearRegisters(reg_from, reg_to);
+}
+
+
+void RegExpMacroAssemblerTracer::ReadCurrentPositionFromRegister(int reg) {
+  PrintF(" ReadCurrentPositionFromRegister(register=%d);\n", reg);
+  assembler_->ReadCurrentPositionFromRegister(reg);
+}
+
+
+void RegExpMacroAssemblerTracer::WriteStackPointerToRegister(int reg) {
+  PrintF(" WriteStackPointerToRegister(register=%d);\n", reg);
+  assembler_->WriteStackPointerToRegister(reg);
+}
+
+
+void RegExpMacroAssemblerTracer::ReadStackPointerFromRegister(int reg) {
+  PrintF(" ReadStackPointerFromRegister(register=%d);\n", reg);
+  assembler_->ReadStackPointerFromRegister(reg);
+}
+
+
+void RegExpMacroAssemblerTracer::LoadCurrentCharacter(int cp_offset,
+                                                      Label* on_end_of_input,
+                                                      bool check_bounds,
+                                                      int characters) {
+  const char* check_msg = check_bounds ? "" : " (unchecked)";
+  PrintF(" LoadCurrentCharacter(cp_offset=%d, label[%08x]%s (%d chars));\n",
+         cp_offset,
+         on_end_of_input,
+         check_msg,
+         characters);
+  assembler_->LoadCurrentCharacter(cp_offset,
+                                   on_end_of_input,
+                                   check_bounds,
+                                   characters);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckCharacterLT(uc16 limit, Label* on_less) {
+  PrintF(" CheckCharacterLT(c='u%04x', label[%08x]);\n", limit, on_less);
+  assembler_->CheckCharacterLT(limit, on_less);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckCharacterGT(uc16 limit,
+                                                  Label* on_greater) {
+  PrintF(" CheckCharacterGT(c='u%04x', label[%08x]);\n", limit, on_greater);
+  assembler_->CheckCharacterGT(limit, on_greater);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckCharacter(uint32_t c, Label* on_equal) {
+  PrintF(" CheckCharacter(c='u%04x', label[%08x]);\n", c, on_equal);
+  assembler_->CheckCharacter(c, on_equal);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckAtStart(Label* on_at_start) {
+  PrintF(" CheckAtStart(label[%08x]);\n", on_at_start);
+  assembler_->CheckAtStart(on_at_start);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckNotAtStart(Label* on_not_at_start) {
+  PrintF(" CheckNotAtStart(label[%08x]);\n", on_not_at_start);
+  assembler_->CheckNotAtStart(on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckNotCharacter(uint32_t c,
+                                                   Label* on_not_equal) {
+  PrintF(" CheckNotCharacter(c='u%04x', label[%08x]);\n", c, on_not_equal);
+  assembler_->CheckNotCharacter(c, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckCharacterAfterAnd(
+    uint32_t c,
+    uint32_t mask,
+    Label* on_equal) {
+  PrintF(" CheckCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
+         c,
+         mask,
+         on_equal);
+  assembler_->CheckCharacterAfterAnd(c, mask, on_equal);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckNotCharacterAfterAnd(
+    uint32_t c,
+    uint32_t mask,
+    Label* on_not_equal) {
+  PrintF(" CheckNotCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
+         c,
+         mask,
+         on_not_equal);
+  assembler_->CheckNotCharacterAfterAnd(c, mask, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckNotCharacterAfterMinusAnd(
+    uc16 c,
+    uc16 minus,
+    uc16 mask,
+    Label* on_not_equal) {
+  PrintF(" CheckNotCharacterAfterMinusAnd(c='u%04x', minus=%04x, mask=0x%04x, "
+             "label[%08x]);\n",
+         c,
+         minus,
+         mask,
+         on_not_equal);
+  assembler_->CheckNotCharacterAfterMinusAnd(c, minus, mask, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
+                                                       Label* on_no_match) {
+  PrintF(" CheckNotBackReference(register=%d, label[%08x]);\n", start_reg,
+         on_no_match);
+  assembler_->CheckNotBackReference(start_reg, on_no_match);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
+    int start_reg,
+    Label* on_no_match) {
+  PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, label[%08x]);\n",
+         start_reg, on_no_match);
+  assembler_->CheckNotBackReferenceIgnoreCase(start_reg, on_no_match);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckNotRegistersEqual(int reg1,
+                                                        int reg2,
+                                                        Label* on_not_equal) {
+  PrintF(" CheckNotRegistersEqual(reg1=%d, reg2=%d, label[%08x]);\n",
+         reg1,
+         reg2,
+         on_not_equal);
+  assembler_->CheckNotRegistersEqual(reg1, reg2, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckCharacters(Vector<const uc16> str,
+                                                 int cp_offset,
+                                                 Label* on_failure,
+                                                 bool check_end_of_string) {
+  PrintF(" %s(str=\"",
+         check_end_of_string ? "CheckCharacters" : "CheckCharactersUnchecked");
+  for (int i = 0; i < str.length(); i++) {
+    PrintF("u%04x", str[i]);
+  }
+  PrintF("\", cp_offset=%d, label[%08x])\n", cp_offset, on_failure);
+  assembler_->CheckCharacters(str, cp_offset, on_failure, check_end_of_string);
+}
+
+
+bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
+    uc16 type,
+    int cp_offset,
+    bool check_offset,
+    Label* on_no_match) {
+  bool supported = assembler_->CheckSpecialCharacterClass(type,
+                                                          cp_offset,
+                                                          check_offset,
+                                                          on_no_match);
+  PrintF(" CheckSpecialCharacterClass(type='%c', offset=%d, "
+             "check_offset=%s, label[%08x]): %s;\n",
+         type,
+         cp_offset,
+         check_offset ? "true" : "false",
+         on_no_match,
+         supported ? "true" : "false");
+  return supported;
+}
+
+
+void RegExpMacroAssemblerTracer::IfRegisterLT(int register_index,
+                                              int comparand, Label* if_lt) {
+  PrintF(" IfRegisterLT(register=%d, number=%d, label[%08x]);\n",
+         register_index, comparand, if_lt);
+  assembler_->IfRegisterLT(register_index, comparand, if_lt);
+}
+
+
+void RegExpMacroAssemblerTracer::IfRegisterEqPos(int register_index,
+                                                 Label* if_eq) {
+  PrintF(" IfRegisterEqPos(register=%d, label[%08x]);\n",
+         register_index, if_eq);
+  assembler_->IfRegisterEqPos(register_index, if_eq);
+}
+
+
+void RegExpMacroAssemblerTracer::IfRegisterGE(int register_index,
+                                              int comparand, Label* if_ge) {
+  PrintF(" IfRegisterGE(register=%d, number=%d, label[%08x]);\n",
+         register_index, comparand, if_ge);
+  assembler_->IfRegisterGE(register_index, comparand, if_ge);
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+    RegExpMacroAssemblerTracer::Implementation() {
+  return assembler_->Implementation();
+}
+
+
+Handle<Object> RegExpMacroAssemblerTracer::GetCode(Handle<String> source) {
+  PrintF(" GetCode(%s);\n", *(source->ToCString()));
+  return assembler_->GetCode(source);
+}
+
+}}  // namespace v8::internal
diff --git a/src/regexp-macro-assembler-tracer.h b/src/regexp-macro-assembler-tracer.h
new file mode 100644
index 0000000..28ca5f3
--- /dev/null
+++ b/src/regexp-macro-assembler-tracer.h
@@ -0,0 +1,105 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
+#define V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
+
+namespace v8 {
+namespace internal {
+
+// Decorator on a RegExpMacroAssembler that write all calls.
+class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
+ public:
+  explicit RegExpMacroAssemblerTracer(RegExpMacroAssembler* assembler);
+  virtual ~RegExpMacroAssemblerTracer();
+  virtual int stack_limit_slack() { return assembler_->stack_limit_slack(); }
+  virtual bool CanReadUnaligned() { return assembler_->CanReadUnaligned(); }
+  virtual void AdvanceCurrentPosition(int by);  // Signed cp change.
+  virtual void AdvanceRegister(int reg, int by);  // r[reg] += by.
+  virtual void Backtrack();
+  virtual void Bind(Label* label);
+  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckCharacter(uint32_t c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(uint32_t c,
+                                      uint32_t and_with,
+                                      Label* on_equal);
+  virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+  virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+  virtual void CheckCharacters(
+      Vector<const uc16> str,
+      int cp_offset,
+      Label* on_failure,
+      bool check_end_of_string);
+  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+  virtual void CheckNotAtStart(Label* on_not_at_start);
+  virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+                                               Label* on_no_match);
+  virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(uint32_t c,
+                                         uint32_t and_with,
+                                         Label* on_not_equal);
+  virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+                                              uc16 minus,
+                                              uc16 and_with,
+                                              Label* on_not_equal);
+  virtual bool CheckSpecialCharacterClass(uc16 type,
+                                          int cp_offset,
+                                          bool check_offset,
+                                          Label* on_no_match);
+  virtual void Fail();
+  virtual Handle<Object> GetCode(Handle<String> source);
+  virtual void GoTo(Label* label);
+  virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+  virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+  virtual void IfRegisterEqPos(int reg, Label* if_eq);
+  virtual IrregexpImplementation Implementation();
+  virtual void LoadCurrentCharacter(int cp_offset,
+                                    Label* on_end_of_input,
+                                    bool check_bounds = true,
+                                    int characters = 1);
+  virtual void PopCurrentPosition();
+  virtual void PopRegister(int register_index);
+  virtual void PushBacktrack(Label* label);
+  virtual void PushCurrentPosition();
+  virtual void PushRegister(int register_index,
+                            StackCheckFlag check_stack_limit);
+  virtual void ReadCurrentPositionFromRegister(int reg);
+  virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetRegister(int register_index, int to);
+  virtual void Succeed();
+  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+  virtual void ClearRegisters(int reg_from, int reg_to);
+  virtual void WriteStackPointerToRegister(int reg);
+ private:
+  RegExpMacroAssembler* assembler_;
+};
+
+}}  // namespace v8::internal
+
+#endif  // V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
new file mode 100644
index 0000000..0d00cee
--- /dev/null
+++ b/src/regexp-macro-assembler.cc
@@ -0,0 +1,260 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "ast.h"
+#include "assembler.h"
+#include "regexp-stack.h"
+#include "regexp-macro-assembler.h"
+#if V8_TARGET_ARCH_ARM
+#include "arm/simulator-arm.h"
+#elif V8_TARGET_ARCH_IA32
+#include "ia32/simulator-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/simulator-x64.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+RegExpMacroAssembler::RegExpMacroAssembler() {
+}
+
+
+RegExpMacroAssembler::~RegExpMacroAssembler() {
+}
+
+
+bool RegExpMacroAssembler::CanReadUnaligned() {
+#ifdef V8_HOST_CAN_READ_UNALIGNED
+  return true;
+#else
+  return false;
+#endif
+}
+
+
+#ifdef V8_NATIVE_REGEXP  // Avoid unused code, e.g., on ARM.
+
+NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() {
+}
+
+
+NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
+}
+
+
+bool NativeRegExpMacroAssembler::CanReadUnaligned() {
+#ifdef V8_TARGET_CAN_READ_UNALIGNED
+  return true;
+#else
+  return false;
+#endif
+}
+
+const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
+    String* subject,
+    int start_index) {
+  // Not just flat, but ultra flat.
+  ASSERT(subject->IsExternalString() || subject->IsSeqString());
+  ASSERT(start_index >= 0);
+  ASSERT(start_index <= subject->length());
+  if (subject->IsAsciiRepresentation()) {
+    const byte* address;
+    if (StringShape(subject).IsExternal()) {
+      const char* data = ExternalAsciiString::cast(subject)->resource()->data();
+      address = reinterpret_cast<const byte*>(data);
+    } else {
+      ASSERT(subject->IsSeqAsciiString());
+      char* data = SeqAsciiString::cast(subject)->GetChars();
+      address = reinterpret_cast<const byte*>(data);
+    }
+    return address + start_index;
+  }
+  const uc16* data;
+  if (StringShape(subject).IsExternal()) {
+    data = ExternalTwoByteString::cast(subject)->resource()->data();
+  } else {
+    ASSERT(subject->IsSeqTwoByteString());
+    data = SeqTwoByteString::cast(subject)->GetChars();
+  }
+  return reinterpret_cast<const byte*>(data + start_index);
+}
+
+
+NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
+    Handle<Code> regexp_code,
+    Handle<String> subject,
+    int* offsets_vector,
+    int offsets_vector_length,
+    int previous_index) {
+
+  ASSERT(subject->IsFlat());
+  ASSERT(previous_index >= 0);
+  ASSERT(previous_index <= subject->length());
+
+  // No allocations before calling the regexp, but we can't use
+  // AssertNoAllocation, since regexps might be preempted, and another thread
+  // might do allocation anyway.
+
+  String* subject_ptr = *subject;
+  // Character offsets into string.
+  int start_offset = previous_index;
+  int end_offset = subject_ptr->length();
+
+  bool is_ascii = subject->IsAsciiRepresentation();
+
+  if (StringShape(subject_ptr).IsCons()) {
+    subject_ptr = ConsString::cast(subject_ptr)->first();
+  } else if (StringShape(subject_ptr).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(subject_ptr);
+    start_offset += slice->start();
+    end_offset += slice->start();
+    subject_ptr = slice->buffer();
+  }
+  // Ensure that an underlying string has the same ascii-ness.
+  ASSERT(subject_ptr->IsAsciiRepresentation() == is_ascii);
+  ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
+  // String is now either Sequential or External
+  int char_size_shift = is_ascii ? 0 : 1;
+  int char_length = end_offset - start_offset;
+
+  const byte* input_start =
+      StringCharacterPosition(subject_ptr, start_offset);
+  int byte_length = char_length << char_size_shift;
+  const byte* input_end = input_start + byte_length;
+  Result res = Execute(*regexp_code,
+                       subject_ptr,
+                       start_offset,
+                       input_start,
+                       input_end,
+                       offsets_vector,
+                       previous_index == 0);
+
+  if (res == SUCCESS) {
+    // Capture values are relative to start_offset only.
+    // Convert them to be relative to start of string.
+    for (int i = 0; i < offsets_vector_length; i++) {
+      if (offsets_vector[i] >= 0) {
+        offsets_vector[i] += previous_index;
+      }
+    }
+  }
+
+  return res;
+}
+
+
+NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
+    Code* code,
+    String* input,
+    int start_offset,
+    const byte* input_start,
+    const byte* input_end,
+    int* output,
+    bool at_start) {
+  typedef int (*matcher)(String*, int, const byte*,
+                         const byte*, int*, int, Address);
+  matcher matcher_func = FUNCTION_CAST<matcher>(code->entry());
+
+  int at_start_val = at_start ? 1 : 0;
+
+  // Ensure that the minimum stack has been allocated.
+  RegExpStack stack;
+  Address stack_base = RegExpStack::stack_base();
+
+  int result = CALL_GENERATED_REGEXP_CODE(matcher_func,
+                                          input,
+                                          start_offset,
+                                          input_start,
+                                          input_end,
+                                          output,
+                                          at_start_val,
+                                          stack_base);
+  ASSERT(result <= SUCCESS);
+  ASSERT(result >= RETRY);
+
+  if (result == EXCEPTION && !Top::has_pending_exception()) {
+    // We detected a stack overflow (on the backtrack stack) in RegExp code,
+    // but haven't created the exception yet.
+    Top::StackOverflow();
+  }
+  return static_cast<Result>(result);
+}
+
+
+static unibrow::Mapping<unibrow::Ecma262Canonicalize> canonicalize;
+
+int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
+    Address byte_offset1,
+    Address byte_offset2,
+    size_t byte_length) {
+  // This function is not allowed to cause a garbage collection.
+  // A GC might move the calling generated code and invalidate the
+  // return address on the stack.
+  ASSERT(byte_length % 2 == 0);
+  uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
+  uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
+  size_t length = byte_length >> 1;
+
+  for (size_t i = 0; i < length; i++) {
+    unibrow::uchar c1 = substring1[i];
+    unibrow::uchar c2 = substring2[i];
+    if (c1 != c2) {
+      unibrow::uchar s1[1] = { c1 };
+      canonicalize.get(c1, '\0', s1);
+      if (s1[0] != c2) {
+        unibrow::uchar s2[1] = { c2 };
+        canonicalize.get(c2, '\0', s2);
+        if (s1[0] != s2[0]) {
+          return 0;
+        }
+      }
+    }
+  }
+  return 1;
+}
+
+
+Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
+                                              Address* stack_base) {
+  size_t size = RegExpStack::stack_capacity();
+  Address old_stack_base = RegExpStack::stack_base();
+  ASSERT(old_stack_base == *stack_base);
+  ASSERT(stack_pointer <= old_stack_base);
+  ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
+  Address new_stack_base = RegExpStack::EnsureCapacity(size * 2);
+  if (new_stack_base == NULL) {
+    return NULL;
+  }
+  *stack_base = new_stack_base;
+  intptr_t stack_content_size = old_stack_base - stack_pointer;
+  return new_stack_base - stack_content_size;
+}
+
+#endif  // V8_NATIVE_REGEXP
+} }  // namespace v8::internal
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
new file mode 100644
index 0000000..26aab2c
--- /dev/null
+++ b/src/regexp-macro-assembler.h
@@ -0,0 +1,238 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_REGEXP_MACRO_ASSEMBLER_H_
+#define V8_REGEXP_MACRO_ASSEMBLER_H_
+
+namespace v8 {
+namespace internal {
+
+struct DisjunctDecisionRow {
+  RegExpCharacterClass cc;
+  Label* on_match;
+};
+
+
+class RegExpMacroAssembler {
+ public:
+  // The implementation must be able to handle at least:
+  static const int kMaxRegister = (1 << 16) - 1;
+  static const int kMaxCPOffset = (1 << 15) - 1;
+  static const int kMinCPOffset = -(1 << 15);
+  enum IrregexpImplementation {
+    kIA32Implementation,
+    kARMImplementation,
+    kX64Implementation,
+    kBytecodeImplementation
+  };
+
+  enum StackCheckFlag {
+    kNoStackLimitCheck = false,
+    kCheckStackLimit = true
+  };
+
+  RegExpMacroAssembler();
+  virtual ~RegExpMacroAssembler();
+  // The maximal number of pushes between stack checks. Users must supply
+  // kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck)
+  // at least once for every stack_limit() pushes that are executed.
+  virtual int stack_limit_slack() = 0;
+  virtual bool CanReadUnaligned();
+  virtual void AdvanceCurrentPosition(int by) = 0;  // Signed cp change.
+  virtual void AdvanceRegister(int reg, int by) = 0;  // r[reg] += by.
+  // Continues execution from the position pushed on the top of the backtrack
+  // stack by an earlier PushBacktrack(Label*).
+  virtual void Backtrack() = 0;
+  virtual void Bind(Label* label) = 0;
+  virtual void CheckAtStart(Label* on_at_start) = 0;
+  // Dispatch after looking the current character up in a 2-bits-per-entry
+  // map.  The destinations vector has up to 4 labels.
+  virtual void CheckCharacter(uint32_t c, Label* on_equal) = 0;
+  // Bitwise and the current character with the given constant and then
+  // check for a match with c.
+  virtual void CheckCharacterAfterAnd(uint32_t c,
+                                      uint32_t and_with,
+                                      Label* on_equal) = 0;
+  virtual void CheckCharacterGT(uc16 limit, Label* on_greater) = 0;
+  virtual void CheckCharacterLT(uc16 limit, Label* on_less) = 0;
+  // Check the current character for a match with a literal string.  If we
+  // fail to match then goto the on_failure label.  If check_eos is set then
+  // the end of input always fails.  If check_eos is clear then it is the
+  // caller's responsibility to ensure that the end of string is not hit.
+  // If the label is NULL then we should pop a backtrack address off
+  // the stack and go to that.
+  virtual void CheckCharacters(
+      Vector<const uc16> str,
+      int cp_offset,
+      Label* on_failure,
+      bool check_eos) = 0;
+  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position) = 0;
+  virtual void CheckNotAtStart(Label* on_not_at_start) = 0;
+  virtual void CheckNotBackReference(int start_reg, Label* on_no_match) = 0;
+  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+                                               Label* on_no_match) = 0;
+  // Check the current character for a match with a literal character.  If we
+  // fail to match then goto the on_failure label.  End of input always
+  // matches.  If the label is NULL then we should pop a backtrack address off
+  // the stack and go to that.
+  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal) = 0;
+  virtual void CheckNotCharacterAfterAnd(uint32_t c,
+                                         uint32_t and_with,
+                                         Label* on_not_equal) = 0;
+  // Subtract a constant from the current character, then or with the given
+  // constant and then check for a match with c.
+  virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+                                              uc16 minus,
+                                              uc16 and_with,
+                                              Label* on_not_equal) = 0;
+  virtual void CheckNotRegistersEqual(int reg1,
+                                      int reg2,
+                                      Label* on_not_equal) = 0;
+
+  // Checks whether the given offset from the current position is before
+  // the end of the string.  May overwrite the current character.
+  virtual void CheckPosition(int cp_offset, Label* on_outside_input) {
+    LoadCurrentCharacter(cp_offset, on_outside_input, true);
+  }
+  // Check whether a standard/default character class matches the current
+  // character. Returns false if the type of special character class does
+  // not have custom support.
+  // May clobber the current loaded character.
+  virtual bool CheckSpecialCharacterClass(uc16 type,
+                                          int cp_offset,
+                                          bool check_offset,
+                                          Label* on_no_match) {
+    return false;
+  }
+  virtual void Fail() = 0;
+  virtual Handle<Object> GetCode(Handle<String> source) = 0;
+  virtual void GoTo(Label* label) = 0;
+  // Check whether a register is >= a given constant and go to a label if it
+  // is.  Backtracks instead if the label is NULL.
+  virtual void IfRegisterGE(int reg, int comparand, Label* if_ge) = 0;
+  // Check whether a register is < a given constant and go to a label if it is.
+  // Backtracks instead if the label is NULL.
+  virtual void IfRegisterLT(int reg, int comparand, Label* if_lt) = 0;
+  // Check whether a register is == to the current position and go to a
+  // label if it is.
+  virtual void IfRegisterEqPos(int reg, Label* if_eq) = 0;
+  virtual IrregexpImplementation Implementation() = 0;
+  virtual void LoadCurrentCharacter(int cp_offset,
+                                    Label* on_end_of_input,
+                                    bool check_bounds = true,
+                                    int characters = 1) = 0;
+  virtual void PopCurrentPosition() = 0;
+  virtual void PopRegister(int register_index) = 0;
+  // Pushes the label on the backtrack stack, so that a following Backtrack
+  // will go to this label. Always checks the backtrack stack limit.
+  virtual void PushBacktrack(Label* label) = 0;
+  virtual void PushCurrentPosition() = 0;
+  virtual void PushRegister(int register_index,
+                            StackCheckFlag check_stack_limit) = 0;
+  virtual void ReadCurrentPositionFromRegister(int reg) = 0;
+  virtual void ReadStackPointerFromRegister(int reg) = 0;
+  virtual void SetRegister(int register_index, int to) = 0;
+  virtual void Succeed() = 0;
+  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0;
+  virtual void ClearRegisters(int reg_from, int reg_to) = 0;
+  virtual void WriteStackPointerToRegister(int reg) = 0;
+};
+
+
+#ifdef V8_NATIVE_REGEXP  // Avoid compiling unused code.
+
+class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
+ public:
+  // Type of input string to generate code for.
+  enum Mode { ASCII = 1, UC16 = 2 };
+
+  // Result of calling generated native RegExp code.
+  // RETRY: Something significant changed during execution, and the matching
+  //        should be retried from scratch.
+  // EXCEPTION: Something failed during execution. If no exception has been
+  //        thrown, it's an internal out-of-memory, and the caller should
+  //        throw the exception.
+  // FAILURE: Matching failed.
+  // SUCCESS: Matching succeeded, and the output array has been filled with
+  //        capture positions.
+  enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
+
+  NativeRegExpMacroAssembler();
+  virtual ~NativeRegExpMacroAssembler();
+  virtual bool CanReadUnaligned();
+
+  static Result Match(Handle<Code> regexp,
+                      Handle<String> subject,
+                      int* offsets_vector,
+                      int offsets_vector_length,
+                      int previous_index);
+
+  // Compares two-byte strings case insensitively.
+  // Called from generated RegExp code.
+  static int CaseInsensitiveCompareUC16(Address byte_offset1,
+                                        Address byte_offset2,
+                                        size_t byte_length);
+
+  // Called from RegExp if the backtrack stack limit is hit.
+  // Tries to expand the stack. Returns the new stack-pointer if
+  // successful, and updates the stack_top address, or returns 0 if unable
+  // to grow the stack.
+  // This function must not trigger a garbage collection.
+  static Address GrowStack(Address stack_pointer, Address* stack_top);
+
+  static const byte* StringCharacterPosition(String* subject, int start_index);
+
+  static Result Execute(Code* code,
+                        String* input,
+                        int start_offset,
+                        const byte* input_start,
+                        const byte* input_end,
+                        int* output,
+                        bool at_start);
+};
+
+
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM.
+class RegExpCEntryStub: public CodeStub {
+ public:
+  RegExpCEntryStub() {}
+  virtual ~RegExpCEntryStub() {}
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return RegExpCEntry; }
+  int MinorKey() { return 0; }
+  const char* GetName() { return "RegExpCEntryStub"; }
+};
+
+#endif  // V8_NATIVE_REGEXP
+
+} }  // namespace v8::internal
+
+#endif  // V8_REGEXP_MACRO_ASSEMBLER_H_
diff --git a/src/regexp-stack.cc b/src/regexp-stack.cc
new file mode 100644
index 0000000..87a674d
--- /dev/null
+++ b/src/regexp-stack.cc
@@ -0,0 +1,103 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "top.h"
+#include "regexp-stack.h"
+
+namespace v8 {
+namespace internal {
+
+RegExpStack::RegExpStack() {
+  // Initialize, if not already initialized.
+  RegExpStack::EnsureCapacity(0);
+}
+
+
+RegExpStack::~RegExpStack() {
+  // Reset the buffer if it has grown.
+  RegExpStack::Reset();
+}
+
+
+char* RegExpStack::ArchiveStack(char* to) {
+  size_t size = sizeof(thread_local_);
+  memcpy(reinterpret_cast<void*>(to),
+         &thread_local_,
+         size);
+  thread_local_ = ThreadLocal();
+  return to + size;
+}
+
+
+char* RegExpStack::RestoreStack(char* from) {
+  size_t size = sizeof(thread_local_);
+  memcpy(&thread_local_, reinterpret_cast<void*>(from), size);
+  return from + size;
+}
+
+
+void RegExpStack::Reset() {
+  if (thread_local_.memory_size_ > kMinimumStackSize) {
+    DeleteArray(thread_local_.memory_);
+    thread_local_ = ThreadLocal();
+  }
+}
+
+
+void RegExpStack::ThreadLocal::Free() {
+  if (thread_local_.memory_size_ > 0) {
+    DeleteArray(thread_local_.memory_);
+    thread_local_ = ThreadLocal();
+  }
+}
+
+
+Address RegExpStack::EnsureCapacity(size_t size) {
+  if (size > kMaximumStackSize) return NULL;
+  if (size < kMinimumStackSize) size = kMinimumStackSize;
+  if (thread_local_.memory_size_ < size) {
+    Address new_memory = NewArray<byte>(size);
+    if (thread_local_.memory_size_ > 0) {
+      // Copy original memory into top of new memory.
+      memcpy(reinterpret_cast<void*>(
+          new_memory + size - thread_local_.memory_size_),
+             reinterpret_cast<void*>(thread_local_.memory_),
+             thread_local_.memory_size_);
+      DeleteArray(thread_local_.memory_);
+    }
+    thread_local_.memory_ = new_memory;
+    thread_local_.memory_size_ = size;
+    thread_local_.limit_ = new_memory + kStackLimitSlack * kPointerSize;
+  }
+  return thread_local_.memory_ + thread_local_.memory_size_;
+}
+
+
+RegExpStack::ThreadLocal RegExpStack::thread_local_;
+
+}}  // namespace v8::internal
diff --git a/src/regexp-stack.h b/src/regexp-stack.h
new file mode 100644
index 0000000..319ab28
--- /dev/null
+++ b/src/regexp-stack.h
@@ -0,0 +1,109 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_REGEXP_STACK_H_
+#define V8_REGEXP_STACK_H_
+
+namespace v8 {
+namespace internal {
+
+// Maintains a per-v8thread stack area that can be used by irregexp
+// implementation for its backtracking stack.
+// Since there is only one stack area, the Irregexp implementation is not
+// re-entrant. I.e., no regular expressions may be executed in the same thread
+// during a preempted Irregexp execution.
+class RegExpStack {
+ public:
+  // Number of allocated locations on the stack below the limit.
+  // No sequence of pushes must be longer that this without doing a stack-limit
+  // check.
+  static const int kStackLimitSlack = 32;
+
+  // Create and delete an instance to control the life-time of a growing stack.
+  RegExpStack();  // Initializes the stack memory area if necessary.
+  ~RegExpStack();  // Releases the stack if it has grown.
+
+  // Gives the top of the memory used as stack.
+  static Address stack_base() {
+    ASSERT(thread_local_.memory_size_ != 0);
+    return thread_local_.memory_ + thread_local_.memory_size_;
+  }
+
+  // The total size of the memory allocated for the stack.
+  static size_t stack_capacity() { return thread_local_.memory_size_; }
+
+  // If the stack pointer gets below the limit, we should react and
+  // either grow the stack or report an out-of-stack exception.
+  // There is only a limited number of locations below the stack limit,
+  // so users of the stack should check the stack limit during any
+  // sequence of pushes longer that this.
+  static Address* limit_address() { return &(thread_local_.limit_); }
+
+  // Ensures that there is a memory area with at least the specified size.
+  // If passing zero, the default/minimum size buffer is allocated.
+  static Address EnsureCapacity(size_t size);
+
+  // Thread local archiving.
+  static size_t ArchiveSpacePerThread() { return sizeof(thread_local_); }
+  static char* ArchiveStack(char* to);
+  static char* RestoreStack(char* from);
+  static void FreeThreadResources() { thread_local_.Free(); }
+
+ private:
+  // Artificial limit used when no memory has been allocated.
+  static const uintptr_t kMemoryTop = static_cast<uintptr_t>(-1);
+
+  // Minimal size of allocated stack area.
+  static const size_t kMinimumStackSize = 1 * KB;
+
+  // Maximal size of allocated stack area.
+  static const size_t kMaximumStackSize = 64 * MB;
+
+  // Structure holding the allocated memory, size and limit.
+  struct ThreadLocal {
+    ThreadLocal()
+        : memory_(NULL),
+          memory_size_(0),
+          limit_(reinterpret_cast<Address>(kMemoryTop)) {}
+    // If memory_size_ > 0 then memory_ must be non-NULL.
+    Address memory_;
+    size_t memory_size_;
+    Address limit_;
+    void Free();
+  };
+
+  // Resets the buffer if it has grown beyond the default/minimum size.
+  // After this, the buffer is either the default size, or it is empty, so
+  // you have to call EnsureCapacity before using it again.
+  static void Reset();
+
+  static ThreadLocal thread_local_;
+};
+
+}}  // namespace v8::internal
+
+#endif  // V8_REGEXP_STACK_H_
diff --git a/src/register-allocator-inl.h b/src/register-allocator-inl.h
new file mode 100644
index 0000000..8fb498b
--- /dev/null
+++ b/src/register-allocator-inl.h
@@ -0,0 +1,74 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_REGISTER_ALLOCATOR_INL_H_
+#define V8_REGISTER_ALLOCATOR_INL_H_
+
+#include "codegen.h"
+#include "register-allocator.h"
+#include "virtual-frame.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/register-allocator-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/register-allocator-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/register-allocator-arm-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+
+namespace v8 {
+namespace internal {
+
+Result::~Result() {
+  if (is_register()) {
+    CodeGeneratorScope::Current()->allocator()->Unuse(reg());
+  }
+}
+
+
+void Result::Unuse() {
+  if (is_register()) {
+    CodeGeneratorScope::Current()->allocator()->Unuse(reg());
+  }
+  invalidate();
+}
+
+
+void Result::CopyTo(Result* destination) const {
+  destination->value_ = value_;
+  if (is_register()) {
+    CodeGeneratorScope::Current()->allocator()->Use(reg());
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_REGISTER_ALLOCATOR_INL_H_
diff --git a/src/register-allocator.cc b/src/register-allocator.cc
new file mode 100644
index 0000000..d55f949
--- /dev/null
+++ b/src/register-allocator.cc
@@ -0,0 +1,100 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+
+Result::Result(Register reg) {
+  ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
+  CodeGeneratorScope::Current()->allocator()->Use(reg);
+  value_ = TypeField::encode(REGISTER) | DataField::encode(reg.code_);
+}
+
+
+Result::ZoneObjectList* Result::ConstantList() {
+  static ZoneObjectList list(10);
+  return &list;
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+
+Result RegisterAllocator::AllocateWithoutSpilling() {
+  // Return the first free register, if any.
+  int num = registers_.ScanForFreeRegister();
+  if (num == RegisterAllocator::kInvalidRegister) {
+    return Result();
+  }
+  return Result(RegisterAllocator::ToRegister(num));
+}
+
+
+Result RegisterAllocator::Allocate() {
+  Result result = AllocateWithoutSpilling();
+  if (!result.is_valid()) {
+    // Ask the current frame to spill a register.
+    ASSERT(cgen_->has_valid_frame());
+    Register free_reg = cgen_->frame()->SpillAnyRegister();
+    if (free_reg.is_valid()) {
+      ASSERT(!is_used(free_reg));
+      return Result(free_reg);
+    }
+  }
+  return result;
+}
+
+
+Result RegisterAllocator::Allocate(Register target) {
+  // If the target is not referenced, it can simply be allocated.
+  if (!is_used(target)) {
+    return Result(target);
+  }
+  // If the target is only referenced in the frame, it can be spilled and
+  // then allocated.
+  ASSERT(cgen_->has_valid_frame());
+  if (cgen_->frame()->is_used(target) && count(target) == 1)  {
+    cgen_->frame()->Spill(target);
+    ASSERT(!is_used(target));
+    return Result(target);
+  }
+  // Otherwise (if it's referenced outside the frame) we cannot allocate it.
+  return Result();
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/register-allocator.h b/src/register-allocator.h
new file mode 100644
index 0000000..1765633
--- /dev/null
+++ b/src/register-allocator.h
@@ -0,0 +1,295 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_REGISTER_ALLOCATOR_H_
+#define V8_REGISTER_ALLOCATOR_H_
+
+#include "macro-assembler.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/register-allocator-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/register-allocator-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/register-allocator-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+// -------------------------------------------------------------------------
+// Results
+//
+// Results encapsulate the compile-time values manipulated by the code
+// generator.  They can represent registers or constants.
+
+class Result BASE_EMBEDDED {
+ public:
+  enum Type {
+    INVALID,
+    REGISTER,
+    CONSTANT
+  };
+
+  // Construct an invalid result.
+  Result() { invalidate(); }
+
+  // Construct a register Result.
+  explicit Result(Register reg);
+
+  // Construct a Result whose value is a compile-time constant.
+  explicit Result(Handle<Object> value) {
+    value_ = TypeField::encode(CONSTANT)
+        | DataField::encode(ConstantList()->length());
+    ConstantList()->Add(value);
+  }
+
+  // The copy constructor and assignment operators could each create a new
+  // register reference.
+  Result(const Result& other) {
+    other.CopyTo(this);
+  }
+
+  Result& operator=(const Result& other) {
+    if (this != &other) {
+      Unuse();
+      other.CopyTo(this);
+    }
+    return *this;
+  }
+
+  inline ~Result();
+
+  // Static indirection table for handles to constants.  If a Result
+  // represents a constant, the data contains an index into this table
+  // of handles to the actual constants.
+  typedef ZoneList<Handle<Object> > ZoneObjectList;
+
+  static ZoneObjectList* ConstantList();
+
+  // Clear the constants indirection table.
+  static void ClearConstantList() {
+    ConstantList()->Clear();
+  }
+
+  inline void Unuse();
+
+  Type type() const { return TypeField::decode(value_); }
+
+  void invalidate() { value_ = TypeField::encode(INVALID); }
+
+  bool is_valid() const { return type() != INVALID; }
+  bool is_register() const { return type() == REGISTER; }
+  bool is_constant() const { return type() == CONSTANT; }
+
+  Register reg() const {
+    ASSERT(is_register());
+    uint32_t reg = DataField::decode(value_);
+    Register result;
+    result.code_ = reg;
+    return result;
+  }
+
+  Handle<Object> handle() const {
+    ASSERT(type() == CONSTANT);
+    return ConstantList()->at(DataField::decode(value_));
+  }
+
+  // Move this result to an arbitrary register.  The register is not
+  // necessarily spilled from the frame or even singly-referenced outside
+  // it.
+  void ToRegister();
+
+  // Move this result to a specified register.  The register is spilled from
+  // the frame, and the register is singly-referenced (by this result)
+  // outside the frame.
+  void ToRegister(Register reg);
+
+ private:
+  uint32_t value_;
+
+  class TypeField: public BitField<Type, 0, 2> {};
+  class DataField: public BitField<uint32_t, 2, 32 - 3> {};
+
+  inline void CopyTo(Result* destination) const;
+
+  friend class CodeGeneratorScope;
+};
+
+
+// -------------------------------------------------------------------------
+// Register file
+//
+// The register file tracks reference counts for the processor registers.
+// It is used by both the register allocator and the virtual frame.
+
+class RegisterFile BASE_EMBEDDED {
+ public:
+  RegisterFile() { Reset(); }
+
+  void Reset() {
+    for (int i = 0; i < kNumRegisters; i++) {
+      ref_counts_[i] = 0;
+    }
+  }
+
+  // Predicates and accessors for the reference counts.
+  bool is_used(int num) {
+    ASSERT(0 <= num && num < kNumRegisters);
+    return ref_counts_[num] > 0;
+  }
+
+  int count(int num) {
+    ASSERT(0 <= num && num < kNumRegisters);
+    return ref_counts_[num];
+  }
+
+  // Record a use of a register by incrementing its reference count.
+  void Use(int num) {
+    ASSERT(0 <= num && num < kNumRegisters);
+    ref_counts_[num]++;
+  }
+
+  // Record that a register will no longer be used by decrementing its
+  // reference count.
+  void Unuse(int num) {
+    ASSERT(is_used(num));
+    ref_counts_[num]--;
+  }
+
+  // Copy the reference counts from this register file to the other.
+  void CopyTo(RegisterFile* other) {
+    for (int i = 0; i < kNumRegisters; i++) {
+      other->ref_counts_[i] = ref_counts_[i];
+    }
+  }
+
+ private:
+  static const int kNumRegisters = RegisterAllocatorConstants::kNumRegisters;
+
+  int ref_counts_[kNumRegisters];
+
+  // Very fast inlined loop to find a free register.  Used in
+  // RegisterAllocator::AllocateWithoutSpilling.  Returns
+  // kInvalidRegister if no free register found.
+  int ScanForFreeRegister() {
+    for (int i = 0; i < RegisterAllocatorConstants::kNumRegisters; i++) {
+      if (!is_used(i)) return i;
+    }
+    return RegisterAllocatorConstants::kInvalidRegister;
+  }
+
+  friend class RegisterAllocator;
+};
+
+
+// -------------------------------------------------------------------------
+// Register allocator
+//
+
+class RegisterAllocator BASE_EMBEDDED {
+ public:
+  static const int kNumRegisters =
+      RegisterAllocatorConstants::kNumRegisters;
+  static const int kInvalidRegister =
+      RegisterAllocatorConstants::kInvalidRegister;
+
+  explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
+
+  // True if the register is reserved by the code generator, false if it
+  // can be freely used by the allocator Defined in the
+  // platform-specific XXX-inl.h files..
+  static inline bool IsReserved(Register reg);
+
+  // Convert between (unreserved) assembler registers and allocator
+  // numbers.  Defined in the platform-specific XXX-inl.h files.
+  static inline int ToNumber(Register reg);
+  static inline Register ToRegister(int num);
+
+  // Predicates and accessors for the registers' reference counts.
+  bool is_used(int num) { return registers_.is_used(num); }
+  bool is_used(Register reg) { return registers_.is_used(ToNumber(reg)); }
+
+  int count(int num) { return registers_.count(num); }
+  int count(Register reg) { return registers_.count(ToNumber(reg)); }
+
+  // Explicitly record a reference to a register.
+  void Use(int num) { registers_.Use(num); }
+  void Use(Register reg) { registers_.Use(ToNumber(reg)); }
+
+  // Explicitly record that a register will no longer be used.
+  void Unuse(int num) { registers_.Unuse(num); }
+  void Unuse(Register reg) { registers_.Unuse(ToNumber(reg)); }
+
+  // Reset the register reference counts to free all non-reserved registers.
+  void Reset() { registers_.Reset(); }
+
+  // Initialize the register allocator for entry to a JS function.  On
+  // entry, the (non-reserved) registers used by the JS calling
+  // convention are referenced and the other (non-reserved) registers
+  // are free.
+  inline void Initialize();
+
+  // Allocate a free register and return a register result if possible or
+  // fail and return an invalid result.
+  Result Allocate();
+
+  // Allocate a specific register if possible, spilling it from the
+  // current frame if necessary, or else fail and return an invalid
+  // result.
+  Result Allocate(Register target);
+
+  // Allocate a free register without spilling any from the current
+  // frame or fail and return an invalid result.
+  Result AllocateWithoutSpilling();
+
+  // Allocate a free byte register without spilling any from the current
+  // frame or fail and return an invalid result.
+  Result AllocateByteRegisterWithoutSpilling();
+
+  // Copy the internal state to a register file, to be restored later by
+  // RestoreFrom.
+  void SaveTo(RegisterFile* register_file) {
+    registers_.CopyTo(register_file);
+  }
+
+  // Restore the internal state.
+  void RestoreFrom(RegisterFile* register_file) {
+    register_file->CopyTo(&registers_);
+  }
+
+ private:
+  CodeGenerator* cgen_;
+  RegisterFile registers_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_REGISTER_ALLOCATOR_H_
diff --git a/src/rewriter.cc b/src/rewriter.cc
new file mode 100644
index 0000000..11fc071
--- /dev/null
+++ b/src/rewriter.cc
@@ -0,0 +1,831 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "func-name-inferrer.h"
+#include "scopes.h"
+#include "rewriter.h"
+
+namespace v8 {
+namespace internal {
+
+
+class AstOptimizer: public AstVisitor {
+ public:
+  explicit AstOptimizer() : has_function_literal_(false) {}
+  explicit AstOptimizer(Handle<String> enclosing_name)
+      : has_function_literal_(false) {
+    func_name_inferrer_.PushEnclosingName(enclosing_name);
+  }
+
+  void Optimize(ZoneList<Statement*>* statements);
+
+ private:
+  // Used for loop condition analysis.  Cleared before visiting a loop
+  // condition, set when a function literal is visited.
+  bool has_function_literal_;
+  // Helper object for function name inferring.
+  FuncNameInferrer func_name_inferrer_;
+
+  // Helpers
+  void OptimizeArguments(ZoneList<Expression*>* arguments);
+
+  // Node visitors.
+#define DEF_VISIT(type) \
+  virtual void Visit##type(type* node);
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+  DISALLOW_COPY_AND_ASSIGN(AstOptimizer);
+};
+
+
+void AstOptimizer::Optimize(ZoneList<Statement*>* statements) {
+  int len = statements->length();
+  for (int i = 0; i < len; i++) {
+    Visit(statements->at(i));
+  }
+}
+
+
+void AstOptimizer::OptimizeArguments(ZoneList<Expression*>* arguments) {
+  for (int i = 0; i < arguments->length(); i++) {
+    Visit(arguments->at(i));
+  }
+}
+
+
+void AstOptimizer::VisitBlock(Block* node) {
+  Optimize(node->statements());
+}
+
+
+void AstOptimizer::VisitExpressionStatement(ExpressionStatement* node) {
+  Visit(node->expression());
+}
+
+
+void AstOptimizer::VisitIfStatement(IfStatement* node) {
+  Visit(node->condition());
+  Visit(node->then_statement());
+  if (node->HasElseStatement()) {
+    Visit(node->else_statement());
+  }
+}
+
+
+void AstOptimizer::VisitLoopStatement(LoopStatement* node) {
+  if (node->init() != NULL) {
+    Visit(node->init());
+  }
+  if (node->cond() != NULL) {
+    has_function_literal_ = false;
+    Visit(node->cond());
+    node->may_have_function_literal_ = has_function_literal_;
+  }
+  if (node->body() != NULL) {
+    Visit(node->body());
+  }
+  if (node->next() != NULL) {
+    Visit(node->next());
+  }
+}
+
+
+void AstOptimizer::VisitForInStatement(ForInStatement* node) {
+  Visit(node->each());
+  Visit(node->enumerable());
+  Visit(node->body());
+}
+
+
+void AstOptimizer::VisitTryCatch(TryCatch* node) {
+  Visit(node->try_block());
+  Visit(node->catch_var());
+  Visit(node->catch_block());
+}
+
+
+void AstOptimizer::VisitTryFinally(TryFinally* node) {
+  Visit(node->try_block());
+  Visit(node->finally_block());
+}
+
+
+void AstOptimizer::VisitSwitchStatement(SwitchStatement* node) {
+  Visit(node->tag());
+  for (int i = 0; i < node->cases()->length(); i++) {
+    CaseClause* clause = node->cases()->at(i);
+    if (!clause->is_default()) {
+      Visit(clause->label());
+    }
+    Optimize(clause->statements());
+  }
+}
+
+
+void AstOptimizer::VisitContinueStatement(ContinueStatement* node) {
+  USE(node);
+}
+
+
+void AstOptimizer::VisitBreakStatement(BreakStatement* node) {
+  USE(node);
+}
+
+
+void AstOptimizer::VisitDeclaration(Declaration* node) {
+  // Will not be reached by the current optimizations.
+  USE(node);
+}
+
+
+void AstOptimizer::VisitEmptyStatement(EmptyStatement* node) {
+  USE(node);
+}
+
+
+void AstOptimizer::VisitReturnStatement(ReturnStatement* node) {
+  Visit(node->expression());
+}
+
+
+void AstOptimizer::VisitWithEnterStatement(WithEnterStatement* node) {
+  Visit(node->expression());
+}
+
+
+void AstOptimizer::VisitWithExitStatement(WithExitStatement* node) {
+  USE(node);
+}
+
+
+void AstOptimizer::VisitDebuggerStatement(DebuggerStatement* node) {
+  USE(node);
+}
+
+
+void AstOptimizer::VisitFunctionLiteral(FunctionLiteral* node) {
+  has_function_literal_ = true;
+
+  if (node->name()->length() == 0) {
+    // Anonymous function.
+    func_name_inferrer_.AddFunction(node);
+  }
+}
+
+
+void AstOptimizer::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* node) {
+  USE(node);
+}
+
+
+void AstOptimizer::VisitConditional(Conditional* node) {
+  Visit(node->condition());
+  Visit(node->then_expression());
+  Visit(node->else_expression());
+}
+
+
+void AstOptimizer::VisitSlot(Slot* node) {
+  USE(node);
+}
+
+
+void AstOptimizer::VisitVariableProxy(VariableProxy* node) {
+  Variable* var = node->AsVariable();
+  if (var != NULL) {
+    if (var->type()->IsKnown()) {
+      node->type()->CopyFrom(var->type());
+    } else if (node->type()->IsLikelySmi()) {
+      var->type()->SetAsLikelySmi();
+    }
+
+    if (!var->is_this() &&
+        !Heap::result_symbol()->Equals(*var->name())) {
+      func_name_inferrer_.PushName(var->name());
+    }
+  }
+}
+
+
+void AstOptimizer::VisitLiteral(Literal* node) {
+  Handle<Object> literal = node->handle();
+  if (literal->IsSmi()) {
+    node->type()->SetAsLikelySmi();
+  } else if (literal->IsString()) {
+    Handle<String> lit_str(Handle<String>::cast(literal));
+    if (!Heap::prototype_symbol()->Equals(*lit_str)) {
+      func_name_inferrer_.PushName(lit_str);
+    }
+  }
+}
+
+
+void AstOptimizer::VisitRegExpLiteral(RegExpLiteral* node) {
+  USE(node);
+}
+
+
+void AstOptimizer::VisitArrayLiteral(ArrayLiteral* node) {
+  for (int i = 0; i < node->values()->length(); i++) {
+    Visit(node->values()->at(i));
+  }
+}
+
+void AstOptimizer::VisitObjectLiteral(ObjectLiteral* node) {
+  for (int i = 0; i < node->properties()->length(); i++) {
+    ScopedFuncNameInferrer scoped_fni(&func_name_inferrer_);
+    scoped_fni.Enter();
+    Visit(node->properties()->at(i)->key());
+    Visit(node->properties()->at(i)->value());
+  }
+}
+
+
+void AstOptimizer::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  Visit(node->key());
+  Visit(node->value());
+}
+
+
+void AstOptimizer::VisitAssignment(Assignment* node) {
+  ScopedFuncNameInferrer scoped_fni(&func_name_inferrer_);
+  switch (node->op()) {
+    case Token::INIT_VAR:
+    case Token::INIT_CONST:
+    case Token::ASSIGN:
+      // No type can be infered from the general assignment.
+
+      // Don't infer if it is "a = function(){...}();"-like expression.
+      if (node->value()->AsCall() == NULL) {
+        scoped_fni.Enter();
+      }
+      break;
+    case Token::ASSIGN_BIT_OR:
+    case Token::ASSIGN_BIT_XOR:
+    case Token::ASSIGN_BIT_AND:
+    case Token::ASSIGN_SHL:
+    case Token::ASSIGN_SAR:
+    case Token::ASSIGN_SHR:
+      node->type()->SetAsLikelySmiIfUnknown();
+      node->target()->type()->SetAsLikelySmiIfUnknown();
+      node->value()->type()->SetAsLikelySmiIfUnknown();
+      break;
+    case Token::ASSIGN_ADD:
+    case Token::ASSIGN_SUB:
+    case Token::ASSIGN_MUL:
+    case Token::ASSIGN_DIV:
+    case Token::ASSIGN_MOD:
+      if (node->type()->IsLikelySmi()) {
+        node->target()->type()->SetAsLikelySmiIfUnknown();
+        node->value()->type()->SetAsLikelySmiIfUnknown();
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  Visit(node->target());
+  Visit(node->value());
+
+  switch (node->op()) {
+    case Token::INIT_VAR:
+    case Token::INIT_CONST:
+    case Token::ASSIGN:
+      // Pure assignment copies the type from the value.
+      node->type()->CopyFrom(node->value()->type());
+      break;
+    case Token::ASSIGN_BIT_OR:
+    case Token::ASSIGN_BIT_XOR:
+    case Token::ASSIGN_BIT_AND:
+    case Token::ASSIGN_SHL:
+    case Token::ASSIGN_SAR:
+    case Token::ASSIGN_SHR:
+      // Should have been setup above already.
+      break;
+    case Token::ASSIGN_ADD:
+    case Token::ASSIGN_SUB:
+    case Token::ASSIGN_MUL:
+    case Token::ASSIGN_DIV:
+    case Token::ASSIGN_MOD:
+      if (node->type()->IsUnknown()) {
+        if (node->target()->type()->IsLikelySmi() ||
+            node->value()->type()->IsLikelySmi()) {
+          node->type()->SetAsLikelySmi();
+        }
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // Since this is an assignment. We have to propagate this node's type to the
+  // variable.
+  VariableProxy* proxy = node->target()->AsVariableProxy();
+  if (proxy != NULL) {
+    Variable* var = proxy->AsVariable();
+    if (var != NULL) {
+      SmiAnalysis* var_type = var->type();
+      if (var_type->IsUnknown()) {
+        var_type->CopyFrom(node->type());
+      } else if (var_type->IsLikelySmi()) {
+        // We do not reset likely types to Unknown.
+      }
+    }
+  }
+}
+
+
+void AstOptimizer::VisitThrow(Throw* node) {
+  Visit(node->exception());
+}
+
+
+void AstOptimizer::VisitProperty(Property* node) {
+  Visit(node->obj());
+  Visit(node->key());
+}
+
+
+void AstOptimizer::VisitCall(Call* node) {
+  Visit(node->expression());
+  OptimizeArguments(node->arguments());
+}
+
+
+void AstOptimizer::VisitCallNew(CallNew* node) {
+  Visit(node->expression());
+  OptimizeArguments(node->arguments());
+}
+
+
+void AstOptimizer::VisitCallRuntime(CallRuntime* node) {
+  ScopedFuncNameInferrer scoped_fni(&func_name_inferrer_);
+  if (Factory::InitializeVarGlobal_symbol()->Equals(*node->name()) &&
+      node->arguments()->length() >= 2 &&
+      node->arguments()->at(1)->AsFunctionLiteral() != NULL) {
+      scoped_fni.Enter();
+  }
+  OptimizeArguments(node->arguments());
+}
+
+
+void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
+  Visit(node->expression());
+}
+
+
+void AstOptimizer::VisitCountOperation(CountOperation* node) {
+  // Count operations assume that they work on Smis.
+  node->type()->SetAsLikelySmiIfUnknown();
+  node->expression()->type()->SetAsLikelySmiIfUnknown();
+  Visit(node->expression());
+}
+
+
+void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
+  // Depending on the operation we can propagate this node's type down the
+  // AST nodes.
+  switch (node->op()) {
+    case Token::COMMA:
+    case Token::OR:
+    case Token::AND:
+      break;
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+    case Token::SHL:
+    case Token::SAR:
+    case Token::SHR:
+      node->type()->SetAsLikelySmiIfUnknown();
+      node->left()->type()->SetAsLikelySmiIfUnknown();
+      node->right()->type()->SetAsLikelySmiIfUnknown();
+      break;
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+      if (node->type()->IsLikelySmi()) {
+        node->left()->type()->SetAsLikelySmiIfUnknown();
+        node->right()->type()->SetAsLikelySmiIfUnknown();
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  Visit(node->left());
+  Visit(node->right());
+
+  // After visiting the operand nodes we have to check if this node's type
+  // can be updated. If it does, then we can push that information down
+  // towards the leafs again if the new information is an upgrade over the
+  // previous type of the operand nodes.
+  if (node->type()->IsUnknown()) {
+    if (node->left()->type()->IsLikelySmi() ||
+        node->right()->type()->IsLikelySmi()) {
+      node->type()->SetAsLikelySmi();
+    }
+    if (node->type()->IsLikelySmi()) {
+      // The type of this node changed to LIKELY_SMI. Propagate this knowledge
+      // down through the nodes.
+      if (node->left()->type()->IsUnknown()) {
+        node->left()->type()->SetAsLikelySmi();
+        Visit(node->left());
+      }
+      if (node->right()->type()->IsUnknown()) {
+        node->right()->type()->SetAsLikelySmi();
+        Visit(node->right());
+      }
+    }
+  }
+}
+
+
+void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
+  if (node->type()->IsKnown()) {
+    // Propagate useful information down towards the leafs.
+    node->left()->type()->SetAsLikelySmiIfUnknown();
+    node->right()->type()->SetAsLikelySmiIfUnknown();
+  }
+
+  Visit(node->left());
+  Visit(node->right());
+
+  // After visiting the operand nodes we have to check if this node's type
+  // can be updated. If it does, then we can push that information down
+  // towards the leafs again if the new information is an upgrade over the
+  // previous type of the operand nodes.
+  if (node->type()->IsUnknown()) {
+    if (node->left()->type()->IsLikelySmi() ||
+        node->right()->type()->IsLikelySmi()) {
+      node->type()->SetAsLikelySmi();
+    }
+    if (node->type()->IsLikelySmi()) {
+      // The type of this node changed to LIKELY_SMI. Propagate this knowledge
+      // down through the nodes.
+      if (node->left()->type()->IsUnknown()) {
+        node->left()->type()->SetAsLikelySmi();
+        Visit(node->left());
+      }
+      if (node->right()->type()->IsUnknown()) {
+        node->right()->type()->SetAsLikelySmi();
+        Visit(node->right());
+      }
+    }
+  }
+}
+
+
+void AstOptimizer::VisitThisFunction(ThisFunction* node) {
+  USE(node);
+}
+
+
+class Processor: public AstVisitor {
+ public:
+  explicit Processor(VariableProxy* result)
+      : result_(result),
+        result_assigned_(false),
+        is_set_(false),
+        in_try_(false) {
+  }
+
+  void Process(ZoneList<Statement*>* statements);
+  bool result_assigned() const  { return result_assigned_; }
+
+ private:
+  VariableProxy* result_;
+
+  // We are not tracking result usage via the result_'s use
+  // counts (we leave the accurate computation to the
+  // usage analyzer). Instead we simple remember if
+  // there was ever an assignment to result_.
+  bool result_assigned_;
+
+  // To avoid storing to .result all the time, we eliminate some of
+  // the stores by keeping track of whether or not we're sure .result
+  // will be overwritten anyway. This is a bit more tricky than what I
+  // was hoping for
+  bool is_set_;
+  bool in_try_;
+
+  Expression* SetResult(Expression* value) {
+    result_assigned_ = true;
+    return new Assignment(Token::ASSIGN, result_, value,
+                          RelocInfo::kNoPosition);
+  }
+
+  // Node visitors.
+#define DEF_VISIT(type) \
+  virtual void Visit##type(type* node);
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+};
+
+
+void Processor::Process(ZoneList<Statement*>* statements) {
+  for (int i = statements->length() - 1; i >= 0; --i) {
+    Visit(statements->at(i));
+  }
+}
+
+
+void Processor::VisitBlock(Block* node) {
+  // An initializer block is the rewritten form of a variable declaration
+  // with initialization expressions. The initializer block contains the
+  // list of assignments corresponding to the initialization expressions.
+  // While unclear from the spec (ECMA-262, 3rd., 12.2), the value of
+  // a variable declaration with initialization expression is 'undefined'
+  // with some JS VMs: For instance, using smjs, print(eval('var x = 7'))
+  // returns 'undefined'. To obtain the same behavior with v8, we need
+  // to prevent rewriting in that case.
+  if (!node->is_initializer_block()) Process(node->statements());
+}
+
+
+void Processor::VisitExpressionStatement(ExpressionStatement* node) {
+  // Rewrite : <x>; -> .result = <x>;
+  if (!is_set_) {
+    node->set_expression(SetResult(node->expression()));
+    if (!in_try_) is_set_ = true;
+  }
+}
+
+
+void Processor::VisitIfStatement(IfStatement* node) {
+  // Rewrite both then and else parts (reversed).
+  bool save = is_set_;
+  Visit(node->else_statement());
+  bool set_after_then = is_set_;
+  is_set_ = save;
+  Visit(node->then_statement());
+  is_set_ = is_set_ && set_after_then;
+}
+
+
+
+
+void Processor::VisitLoopStatement(LoopStatement* node) {
+  // Rewrite loop body statement.
+  bool set_after_loop = is_set_;
+  Visit(node->body());
+  is_set_ = is_set_ && set_after_loop;
+}
+
+
+void Processor::VisitForInStatement(ForInStatement* node) {
+  // Rewrite for-in body statement.
+  bool set_after_for = is_set_;
+  Visit(node->body());
+  is_set_ = is_set_ && set_after_for;
+}
+
+
+void Processor::VisitTryCatch(TryCatch* node) {
+  // Rewrite both try and catch blocks (reversed order).
+  bool set_after_catch = is_set_;
+  Visit(node->catch_block());
+  is_set_ = is_set_ && set_after_catch;
+  bool save = in_try_;
+  in_try_ = true;
+  Visit(node->try_block());
+  in_try_ = save;
+}
+
+
+void Processor::VisitTryFinally(TryFinally* node) {
+  // Rewrite both try and finally block (reversed order).
+  Visit(node->finally_block());
+  bool save = in_try_;
+  in_try_ = true;
+  Visit(node->try_block());
+  in_try_ = save;
+}
+
+
+void Processor::VisitSwitchStatement(SwitchStatement* node) {
+  // Rewrite statements in all case clauses in reversed order.
+  ZoneList<CaseClause*>* clauses = node->cases();
+  bool set_after_switch = is_set_;
+  for (int i = clauses->length() - 1; i >= 0; --i) {
+    CaseClause* clause = clauses->at(i);
+    Process(clause->statements());
+  }
+  is_set_ = is_set_ && set_after_switch;
+}
+
+
+void Processor::VisitContinueStatement(ContinueStatement* node) {
+  is_set_ = false;
+}
+
+
+void Processor::VisitBreakStatement(BreakStatement* node) {
+  is_set_ = false;
+}
+
+
+// Do nothing:
+void Processor::VisitDeclaration(Declaration* node) {}
+void Processor::VisitEmptyStatement(EmptyStatement* node) {}
+void Processor::VisitReturnStatement(ReturnStatement* node) {}
+void Processor::VisitWithEnterStatement(WithEnterStatement* node) {}
+void Processor::VisitWithExitStatement(WithExitStatement* node) {}
+void Processor::VisitDebuggerStatement(DebuggerStatement* node) {}
+
+
+// Expressions are never visited yet.
+void Processor::VisitFunctionLiteral(FunctionLiteral* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitConditional(Conditional* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitSlot(Slot* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitVariableProxy(VariableProxy* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitLiteral(Literal* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitRegExpLiteral(RegExpLiteral* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitArrayLiteral(ArrayLiteral* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitObjectLiteral(ObjectLiteral* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitAssignment(Assignment* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitThrow(Throw* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitProperty(Property* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitCall(Call* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitCallNew(CallNew* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitCallRuntime(CallRuntime* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitUnaryOperation(UnaryOperation* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitCountOperation(CountOperation* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitBinaryOperation(BinaryOperation* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitCompareOperation(CompareOperation* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+void Processor::VisitThisFunction(ThisFunction* node) {
+  USE(node);
+  UNREACHABLE();
+}
+
+
+bool Rewriter::Process(FunctionLiteral* function) {
+  HistogramTimerScope timer(&Counters::rewriting);
+  Scope* scope = function->scope();
+  if (scope->is_function_scope()) return true;
+
+  ZoneList<Statement*>* body = function->body();
+  if (body->is_empty()) return true;
+
+  VariableProxy* result = scope->NewTemporary(Factory::result_symbol());
+  Processor processor(result);
+  processor.Process(body);
+  if (processor.HasStackOverflow()) return false;
+
+  if (processor.result_assigned()) body->Add(new ReturnStatement(result));
+  return true;
+}
+
+
+bool Rewriter::Optimize(FunctionLiteral* function) {
+  ZoneList<Statement*>* body = function->body();
+
+  if (FLAG_optimize_ast && !body->is_empty()) {
+    HistogramTimerScope timer(&Counters::ast_optimization);
+    AstOptimizer optimizer(function->name());
+    optimizer.Optimize(body);
+    if (optimizer.HasStackOverflow()) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/rewriter.h b/src/rewriter.h
new file mode 100644
index 0000000..8943e75
--- /dev/null
+++ b/src/rewriter.h
@@ -0,0 +1,54 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_REWRITER_H_
+#define V8_REWRITER_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Currently, the rewriter takes function literals (only top-level)
+// and rewrites them to return the value of the last expression in
+// them.
+//
+// The rewriter adds a (hidden) variable, called .result, to the
+// activation, and tries to figure out where it needs to store into
+// this variable. If the variable is ever used, we conclude by adding
+// a return statement that returns the variable to the body of the
+// given function.
+
+class Rewriter {
+ public:
+  static bool Process(FunctionLiteral* function);
+  static bool Optimize(FunctionLiteral* function);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_REWRITER_H_
diff --git a/src/runtime.cc b/src/runtime.cc
new file mode 100644
index 0000000..4e1940d
--- /dev/null
+++ b/src/runtime.cc
@@ -0,0 +1,7731 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "arguments.h"
+#include "compiler.h"
+#include "cpu.h"
+#include "dateparser.h"
+#include "dateparser-inl.h"
+#include "debug.h"
+#include "execution.h"
+#include "jsregexp.h"
+#include "platform.h"
+#include "runtime.h"
+#include "scopeinfo.h"
+#include "v8threads.h"
+#include "smart-pointer.h"
+#include "parser.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define RUNTIME_ASSERT(value) \
+  if (!(value)) return Top::ThrowIllegalOperation();
+
+// Cast the given object to a value of the specified type and store
+// it in a variable with the given name.  If the object is not of the
+// expected type call IllegalOperation and return.
+#define CONVERT_CHECKED(Type, name, obj)                             \
+  RUNTIME_ASSERT(obj->Is##Type());                                   \
+  Type* name = Type::cast(obj);
+
+#define CONVERT_ARG_CHECKED(Type, name, index)                       \
+  RUNTIME_ASSERT(args[index]->Is##Type());                           \
+  Handle<Type> name = args.at<Type>(index);
+
+// Cast the given object to a boolean and store it in a variable with
+// the given name.  If the object is not a boolean call IllegalOperation
+// and return.
+#define CONVERT_BOOLEAN_CHECKED(name, obj)                            \
+  RUNTIME_ASSERT(obj->IsBoolean());                                   \
+  bool name = (obj)->IsTrue();
+
+// Cast the given object to a Smi and store its value in an int variable
+// with the given name.  If the object is not a Smi call IllegalOperation
+// and return.
+#define CONVERT_SMI_CHECKED(name, obj)                            \
+  RUNTIME_ASSERT(obj->IsSmi());                                   \
+  int name = Smi::cast(obj)->value();
+
+// Cast the given object to a double and store it in a variable with
+// the given name.  If the object is not a number (as opposed to
+// the number not-a-number) call IllegalOperation and return.
+#define CONVERT_DOUBLE_CHECKED(name, obj)                            \
+  RUNTIME_ASSERT(obj->IsNumber());                                   \
+  double name = (obj)->Number();
+
+// Call the specified converter on the object *comand store the result in
+// a variable of the specified type with the given name.  If the
+// object is not a Number call IllegalOperation and return.
+#define CONVERT_NUMBER_CHECKED(type, name, Type, obj)                \
+  RUNTIME_ASSERT(obj->IsNumber());                                   \
+  type name = NumberTo##Type(obj);
+
+// Non-reentrant string buffer for efficient general use in this file.
+static StaticResource<StringInputBuffer> runtime_string_input_buffer;
+
+
+static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
+  StackLimitCheck check;
+  if (check.HasOverflowed()) return Top::StackOverflow();
+
+  Object* result = Heap::CopyJSObject(boilerplate);
+  if (result->IsFailure()) return result;
+  JSObject* copy = JSObject::cast(result);
+
+  // Deep copy local properties.
+  if (copy->HasFastProperties()) {
+    FixedArray* properties = copy->properties();
+    WriteBarrierMode mode = properties->GetWriteBarrierMode();
+    for (int i = 0; i < properties->length(); i++) {
+      Object* value = properties->get(i);
+      if (value->IsJSObject()) {
+        JSObject* jsObject = JSObject::cast(value);
+        result = DeepCopyBoilerplate(jsObject);
+        if (result->IsFailure()) return result;
+        properties->set(i, result, mode);
+      }
+    }
+    mode = copy->GetWriteBarrierMode();
+    int nof = copy->map()->inobject_properties();
+    for (int i = 0; i < nof; i++) {
+      Object* value = copy->InObjectPropertyAt(i);
+      if (value->IsJSObject()) {
+        JSObject* jsObject = JSObject::cast(value);
+        result = DeepCopyBoilerplate(jsObject);
+        if (result->IsFailure()) return result;
+        copy->InObjectPropertyAtPut(i, result, mode);
+      }
+    }
+  } else {
+    result = Heap::AllocateFixedArray(copy->NumberOfLocalProperties(NONE));
+    if (result->IsFailure()) return result;
+    FixedArray* names = FixedArray::cast(result);
+    copy->GetLocalPropertyNames(names, 0);
+    for (int i = 0; i < names->length(); i++) {
+      ASSERT(names->get(i)->IsString());
+      String* keyString = String::cast(names->get(i));
+      PropertyAttributes attributes =
+        copy->GetLocalPropertyAttribute(keyString);
+      // Only deep copy fields from the object literal expression.
+      // In particular, don't try to copy the length attribute of
+      // an array.
+      if (attributes != NONE) continue;
+      Object* value = copy->GetProperty(keyString, &attributes);
+      ASSERT(!value->IsFailure());
+      if (value->IsJSObject()) {
+        JSObject* jsObject = JSObject::cast(value);
+        result = DeepCopyBoilerplate(jsObject);
+        if (result->IsFailure()) return result;
+        result = copy->SetProperty(keyString, result, NONE);
+        if (result->IsFailure()) return result;
+      }
+    }
+  }
+
+  // Deep copy local elements.
+  // Pixel elements cannot be created using an object literal.
+  ASSERT(!copy->HasPixelElements());
+  switch (copy->GetElementsKind()) {
+    case JSObject::FAST_ELEMENTS: {
+      FixedArray* elements = FixedArray::cast(copy->elements());
+      WriteBarrierMode mode = elements->GetWriteBarrierMode();
+      for (int i = 0; i < elements->length(); i++) {
+        Object* value = elements->get(i);
+        if (value->IsJSObject()) {
+          JSObject* jsObject = JSObject::cast(value);
+          result = DeepCopyBoilerplate(jsObject);
+          if (result->IsFailure()) return result;
+          elements->set(i, result, mode);
+        }
+      }
+      break;
+    }
+    case JSObject::DICTIONARY_ELEMENTS: {
+      NumberDictionary* element_dictionary = copy->element_dictionary();
+      int capacity = element_dictionary->Capacity();
+      for (int i = 0; i < capacity; i++) {
+        Object* k = element_dictionary->KeyAt(i);
+        if (element_dictionary->IsKey(k)) {
+          Object* value = element_dictionary->ValueAt(i);
+          if (value->IsJSObject()) {
+            JSObject* jsObject = JSObject::cast(value);
+            result = DeepCopyBoilerplate(jsObject);
+            if (result->IsFailure()) return result;
+            element_dictionary->ValueAtPut(i, result);
+          }
+        }
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return copy;
+}
+
+
+static Object* Runtime_CloneLiteralBoilerplate(Arguments args) {
+  CONVERT_CHECKED(JSObject, boilerplate, args[0]);
+  return DeepCopyBoilerplate(boilerplate);
+}
+
+
+static Object* Runtime_CloneShallowLiteralBoilerplate(Arguments args) {
+  CONVERT_CHECKED(JSObject, boilerplate, args[0]);
+  return Heap::CopyJSObject(boilerplate);
+}
+
+
+static Handle<Map> ComputeObjectLiteralMap(
+    Handle<Context> context,
+    Handle<FixedArray> constant_properties,
+    bool* is_result_from_cache) {
+  int number_of_properties = constant_properties->length() / 2;
+  if (FLAG_canonicalize_object_literal_maps) {
+    // First find prefix of consecutive symbol keys.
+    int number_of_symbol_keys = 0;
+    while ((number_of_symbol_keys < number_of_properties) &&
+           (constant_properties->get(number_of_symbol_keys*2)->IsSymbol())) {
+      number_of_symbol_keys++;
+    }
+    // Based on the number of prefix symbols key we decide whether
+    // to use the map cache in the global context.
+    const int kMaxKeys = 10;
+    if ((number_of_symbol_keys == number_of_properties) &&
+        (number_of_symbol_keys < kMaxKeys)) {
+      // Create the fixed array with the key.
+      Handle<FixedArray> keys = Factory::NewFixedArray(number_of_symbol_keys);
+      for (int i = 0; i < number_of_symbol_keys; i++) {
+        keys->set(i, constant_properties->get(i*2));
+      }
+      *is_result_from_cache = true;
+      return Factory::ObjectLiteralMapFromCache(context, keys);
+    }
+  }
+  *is_result_from_cache = false;
+  return Factory::CopyMap(
+      Handle<Map>(context->object_function()->initial_map()),
+      number_of_properties);
+}
+
+
+static Handle<Object> CreateLiteralBoilerplate(
+    Handle<FixedArray> literals,
+    Handle<FixedArray> constant_properties);
+
+
+static Handle<Object> CreateObjectLiteralBoilerplate(
+    Handle<FixedArray> literals,
+    Handle<FixedArray> constant_properties) {
+  // Get the global context from the literals array.  This is the
+  // context in which the function was created and we use the object
+  // function from this context to create the object literal.  We do
+  // not use the object function from the current global context
+  // because this might be the object function from another context
+  // which we should not have access to.
+  Handle<Context> context =
+      Handle<Context>(JSFunction::GlobalContextFromLiterals(*literals));
+
+  bool is_result_from_cache;
+  Handle<Map> map = ComputeObjectLiteralMap(context,
+                                            constant_properties,
+                                            &is_result_from_cache);
+
+  Handle<JSObject> boilerplate = Factory::NewJSObjectFromMap(map);
+  {  // Add the constant properties to the boilerplate.
+    int length = constant_properties->length();
+    OptimizedObjectForAddingMultipleProperties opt(boilerplate,
+                                                   length / 2,
+                                                   !is_result_from_cache);
+    for (int index = 0; index < length; index +=2) {
+      Handle<Object> key(constant_properties->get(index+0));
+      Handle<Object> value(constant_properties->get(index+1));
+      if (value->IsFixedArray()) {
+        // The value contains the constant_properties of a
+        // simple object literal.
+        Handle<FixedArray> array = Handle<FixedArray>::cast(value);
+        value = CreateLiteralBoilerplate(literals, array);
+        if (value.is_null()) return value;
+      }
+      Handle<Object> result;
+      uint32_t element_index = 0;
+      if (key->IsSymbol()) {
+        // If key is a symbol it is not an array element.
+        Handle<String> name(String::cast(*key));
+        ASSERT(!name->AsArrayIndex(&element_index));
+        result = SetProperty(boilerplate, name, value, NONE);
+      } else if (Array::IndexFromObject(*key, &element_index)) {
+        // Array index (uint32).
+        result = SetElement(boilerplate, element_index, value);
+      } else {
+        // Non-uint32 number.
+        ASSERT(key->IsNumber());
+        double num = key->Number();
+        char arr[100];
+        Vector<char> buffer(arr, ARRAY_SIZE(arr));
+        const char* str = DoubleToCString(num, buffer);
+        Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
+        result = SetProperty(boilerplate, name, value, NONE);
+      }
+      // If setting the property on the boilerplate throws an
+      // exception, the exception is converted to an empty handle in
+      // the handle based operations.  In that case, we need to
+      // convert back to an exception.
+      if (result.is_null()) return result;
+    }
+  }
+
+  return boilerplate;
+}
+
+
+static Handle<Object> CreateArrayLiteralBoilerplate(
+    Handle<FixedArray> literals,
+    Handle<FixedArray> elements) {
+  // Create the JSArray.
+  Handle<JSFunction> constructor(
+      JSFunction::GlobalContextFromLiterals(*literals)->array_function());
+  Handle<Object> object = Factory::NewJSObject(constructor);
+
+  Handle<Object> copied_elements = Factory::CopyFixedArray(elements);
+
+  Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
+  for (int i = 0; i < content->length(); i++) {
+    if (content->get(i)->IsFixedArray()) {
+      // The value contains the constant_properties of a
+      // simple object literal.
+      Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
+      Handle<Object> result =
+        CreateLiteralBoilerplate(literals, fa);
+      if (result.is_null()) return result;
+      content->set(i, *result);
+    }
+  }
+
+  // Set the elements.
+  Handle<JSArray>::cast(object)->SetContent(*content);
+  return object;
+}
+
+
+static Handle<Object> CreateLiteralBoilerplate(
+    Handle<FixedArray> literals,
+    Handle<FixedArray> array) {
+  Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
+  switch (CompileTimeValue::GetType(array)) {
+    case CompileTimeValue::OBJECT_LITERAL:
+      return CreateObjectLiteralBoilerplate(literals, elements);
+    case CompileTimeValue::ARRAY_LITERAL:
+      return CreateArrayLiteralBoilerplate(literals, elements);
+    default:
+      UNREACHABLE();
+      return Handle<Object>::null();
+  }
+}
+
+
+static Object* Runtime_CreateObjectLiteralBoilerplate(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  // Copy the arguments.
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_SMI_CHECKED(literals_index, args[1]);
+  CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
+
+  Handle<Object> result =
+    CreateObjectLiteralBoilerplate(literals, constant_properties);
+
+  if (result.is_null()) return Failure::Exception();
+
+  // Update the functions literal and return the boilerplate.
+  literals->set(literals_index, *result);
+
+  return *result;
+}
+
+
+static Object* Runtime_CreateArrayLiteralBoilerplate(Arguments args) {
+  // Takes a FixedArray of elements containing the literal elements of
+  // the array literal and produces JSArray with those elements.
+  // Additionally takes the literals array of the surrounding function
+  // which contains the context from which to get the Array function
+  // to use for creating the array literal.
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_SMI_CHECKED(literals_index, args[1]);
+  CONVERT_ARG_CHECKED(FixedArray, elements, 2);
+
+  Handle<Object> object = CreateArrayLiteralBoilerplate(literals, elements);
+  if (object.is_null()) return Failure::Exception();
+
+  // Update the functions literal and return the boilerplate.
+  literals->set(literals_index, *object);
+  return *object;
+}
+
+
+static Object* Runtime_CreateCatchExtensionObject(Arguments args) {
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(String, key, args[0]);
+  Object* value = args[1];
+  // Create a catch context extension object.
+  JSFunction* constructor =
+      Top::context()->global_context()->context_extension_function();
+  Object* object = Heap::AllocateJSObject(constructor);
+  if (object->IsFailure()) return object;
+  // Assign the exception value to the catch variable and make sure
+  // that the catch variable is DontDelete.
+  value = JSObject::cast(object)->SetProperty(key, value, DONT_DELETE);
+  if (value->IsFailure()) return value;
+  return object;
+}
+
+
+static Object* Runtime_ClassOf(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+  Object* obj = args[0];
+  if (!obj->IsJSObject()) return Heap::null_value();
+  return JSObject::cast(obj)->class_name();
+}
+
+
+static Object* Runtime_IsInPrototypeChain(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  // See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
+  Object* O = args[0];
+  Object* V = args[1];
+  while (true) {
+    Object* prototype = V->GetPrototype();
+    if (prototype->IsNull()) return Heap::false_value();
+    if (O == prototype) return Heap::true_value();
+    V = prototype;
+  }
+}
+
+
+// Inserts an object as the hidden prototype of another object.
+static Object* Runtime_SetHiddenPrototype(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSObject, jsobject, args[0]);
+  CONVERT_CHECKED(JSObject, proto, args[1]);
+
+  // Sanity checks.  The old prototype (that we are replacing) could
+  // theoretically be null, but if it is not null then check that we
+  // didn't already install a hidden prototype here.
+  RUNTIME_ASSERT(!jsobject->GetPrototype()->IsHeapObject() ||
+    !HeapObject::cast(jsobject->GetPrototype())->map()->is_hidden_prototype());
+  RUNTIME_ASSERT(!proto->map()->is_hidden_prototype());
+
+  // Allocate up front before we start altering state in case we get a GC.
+  Object* map_or_failure = proto->map()->CopyDropTransitions();
+  if (map_or_failure->IsFailure()) return map_or_failure;
+  Map* new_proto_map = Map::cast(map_or_failure);
+
+  map_or_failure = jsobject->map()->CopyDropTransitions();
+  if (map_or_failure->IsFailure()) return map_or_failure;
+  Map* new_map = Map::cast(map_or_failure);
+
+  // Set proto's prototype to be the old prototype of the object.
+  new_proto_map->set_prototype(jsobject->GetPrototype());
+  proto->set_map(new_proto_map);
+  new_proto_map->set_is_hidden_prototype();
+
+  // Set the object's prototype to proto.
+  new_map->set_prototype(proto);
+  jsobject->set_map(new_map);
+
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_IsConstructCall(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 0);
+  JavaScriptFrameIterator it;
+  return Heap::ToBoolean(it.frame()->IsConstructor());
+}
+
+
+static Object* Runtime_RegExpCompile(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(JSRegExp, re, 0);
+  CONVERT_ARG_CHECKED(String, pattern, 1);
+  CONVERT_ARG_CHECKED(String, flags, 2);
+  Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
+  if (result.is_null()) return Failure::Exception();
+  return *result;
+}
+
+
+static Object* Runtime_CreateApiFunction(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0);
+  return *Factory::CreateApiFunction(data);
+}
+
+
+static Object* Runtime_IsTemplate(Arguments args) {
+  ASSERT(args.length() == 1);
+  Object* arg = args[0];
+  bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
+  return Heap::ToBoolean(result);
+}
+
+
+static Object* Runtime_GetTemplateField(Arguments args) {
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(HeapObject, templ, args[0]);
+  CONVERT_CHECKED(Smi, field, args[1]);
+  int index = field->value();
+  int offset = index * kPointerSize + HeapObject::kHeaderSize;
+  InstanceType type = templ->map()->instance_type();
+  RUNTIME_ASSERT(type ==  FUNCTION_TEMPLATE_INFO_TYPE ||
+                 type ==  OBJECT_TEMPLATE_INFO_TYPE);
+  RUNTIME_ASSERT(offset > 0);
+  if (type ==  FUNCTION_TEMPLATE_INFO_TYPE) {
+    RUNTIME_ASSERT(offset < FunctionTemplateInfo::kSize);
+  } else {
+    RUNTIME_ASSERT(offset < ObjectTemplateInfo::kSize);
+  }
+  return *HeapObject::RawField(templ, offset);
+}
+
+
+static Object* Runtime_DisableAccessChecks(Arguments args) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(HeapObject, object, args[0]);
+  Map* old_map = object->map();
+  bool needs_access_checks = old_map->is_access_check_needed();
+  if (needs_access_checks) {
+    // Copy map so it won't interfere constructor's initial map.
+    Object* new_map = old_map->CopyDropTransitions();
+    if (new_map->IsFailure()) return new_map;
+
+    Map::cast(new_map)->set_is_access_check_needed(false);
+    object->set_map(Map::cast(new_map));
+  }
+  return needs_access_checks ? Heap::true_value() : Heap::false_value();
+}
+
+
+static Object* Runtime_EnableAccessChecks(Arguments args) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(HeapObject, object, args[0]);
+  Map* old_map = object->map();
+  if (!old_map->is_access_check_needed()) {
+    // Copy map so it won't interfere constructor's initial map.
+    Object* new_map = old_map->CopyDropTransitions();
+    if (new_map->IsFailure()) return new_map;
+
+    Map::cast(new_map)->set_is_access_check_needed(true);
+    object->set_map(Map::cast(new_map));
+  }
+  return Heap::undefined_value();
+}
+
+
+static Object* ThrowRedeclarationError(const char* type, Handle<String> name) {
+  HandleScope scope;
+  Handle<Object> type_handle = Factory::NewStringFromAscii(CStrVector(type));
+  Handle<Object> args[2] = { type_handle, name };
+  Handle<Object> error =
+      Factory::NewTypeError("redeclaration", HandleVector(args, 2));
+  return Top::Throw(*error);
+}
+
+
+static Object* Runtime_DeclareGlobals(Arguments args) {
+  HandleScope scope;
+  Handle<GlobalObject> global = Handle<GlobalObject>(Top::context()->global());
+
+  CONVERT_ARG_CHECKED(FixedArray, pairs, 0);
+  Handle<Context> context = args.at<Context>(1);
+  bool is_eval = Smi::cast(args[2])->value() == 1;
+
+  // Compute the property attributes. According to ECMA-262, section
+  // 13, page 71, the property must be read-only and
+  // non-deletable. However, neither SpiderMonkey nor KJS creates the
+  // property as read-only, so we don't either.
+  PropertyAttributes base = is_eval ? NONE : DONT_DELETE;
+
+  // Traverse the name/value pairs and set the properties.
+  int length = pairs->length();
+  for (int i = 0; i < length; i += 2) {
+    HandleScope scope;
+    Handle<String> name(String::cast(pairs->get(i)));
+    Handle<Object> value(pairs->get(i + 1));
+
+    // We have to declare a global const property. To capture we only
+    // assign to it when evaluating the assignment for "const x =
+    // <expr>" the initial value is the hole.
+    bool is_const_property = value->IsTheHole();
+
+    if (value->IsUndefined() || is_const_property) {
+      // Lookup the property in the global object, and don't set the
+      // value of the variable if the property is already there.
+      LookupResult lookup;
+      global->Lookup(*name, &lookup);
+      if (lookup.IsProperty()) {
+        // Determine if the property is local by comparing the holder
+        // against the global object. The information will be used to
+        // avoid throwing re-declaration errors when declaring
+        // variables or constants that exist in the prototype chain.
+        bool is_local = (*global == lookup.holder());
+        // Get the property attributes and determine if the property is
+        // read-only.
+        PropertyAttributes attributes = global->GetPropertyAttribute(*name);
+        bool is_read_only = (attributes & READ_ONLY) != 0;
+        if (lookup.type() == INTERCEPTOR) {
+          // If the interceptor says the property is there, we
+          // just return undefined without overwriting the property.
+          // Otherwise, we continue to setting the property.
+          if (attributes != ABSENT) {
+            // Check if the existing property conflicts with regards to const.
+            if (is_local && (is_read_only || is_const_property)) {
+              const char* type = (is_read_only) ? "const" : "var";
+              return ThrowRedeclarationError(type, name);
+            };
+            // The property already exists without conflicting: Go to
+            // the next declaration.
+            continue;
+          }
+          // Fall-through and introduce the absent property by using
+          // SetProperty.
+        } else {
+          if (is_local && (is_read_only || is_const_property)) {
+            const char* type = (is_read_only) ? "const" : "var";
+            return ThrowRedeclarationError(type, name);
+          }
+          // The property already exists without conflicting: Go to
+          // the next declaration.
+          continue;
+        }
+      }
+    } else {
+      // Copy the function and update its context. Use it as value.
+      Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(value);
+      Handle<JSFunction> function =
+          Factory::NewFunctionFromBoilerplate(boilerplate, context);
+      value = function;
+    }
+
+    LookupResult lookup;
+    global->LocalLookup(*name, &lookup);
+
+    PropertyAttributes attributes = is_const_property
+        ? static_cast<PropertyAttributes>(base | READ_ONLY)
+        : base;
+
+    if (lookup.IsProperty()) {
+      // There's a local property that we need to overwrite because
+      // we're either declaring a function or there's an interceptor
+      // that claims the property is absent.
+
+      // Check for conflicting re-declarations. We cannot have
+      // conflicting types in case of intercepted properties because
+      // they are absent.
+      if (lookup.type() != INTERCEPTOR &&
+          (lookup.IsReadOnly() || is_const_property)) {
+        const char* type = (lookup.IsReadOnly()) ? "const" : "var";
+        return ThrowRedeclarationError(type, name);
+      }
+      SetProperty(global, name, value, attributes);
+    } else {
+      // If a property with this name does not already exist on the
+      // global object add the property locally.  We take special
+      // precautions to always add it as a local property even in case
+      // of callbacks in the prototype chain (this rules out using
+      // SetProperty).  Also, we must use the handle-based version to
+      // avoid GC issues.
+      IgnoreAttributesAndSetLocalProperty(global, name, value, attributes);
+    }
+  }
+
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_DeclareContextSlot(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 4);
+
+  CONVERT_ARG_CHECKED(Context, context, 0);
+  Handle<String> name(String::cast(args[1]));
+  PropertyAttributes mode =
+      static_cast<PropertyAttributes>(Smi::cast(args[2])->value());
+  ASSERT(mode == READ_ONLY || mode == NONE);
+  Handle<Object> initial_value(args[3]);
+
+  // Declarations are always done in the function context.
+  context = Handle<Context>(context->fcontext());
+
+  int index;
+  PropertyAttributes attributes;
+  ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
+  Handle<Object> holder =
+      context->Lookup(name, flags, &index, &attributes);
+
+  if (attributes != ABSENT) {
+    // The name was declared before; check for conflicting
+    // re-declarations: This is similar to the code in parser.cc in
+    // the AstBuildingParser::Declare function.
+    if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
+      // Functions are not read-only.
+      ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
+      const char* type = ((attributes & READ_ONLY) != 0) ? "const" : "var";
+      return ThrowRedeclarationError(type, name);
+    }
+
+    // Initialize it if necessary.
+    if (*initial_value != NULL) {
+      if (index >= 0) {
+        // The variable or constant context slot should always be in
+        // the function context; not in any outer context nor in the
+        // arguments object.
+        ASSERT(holder.is_identical_to(context));
+        if (((attributes & READ_ONLY) == 0) ||
+            context->get(index)->IsTheHole()) {
+          context->set(index, *initial_value);
+        }
+      } else {
+        // Slow case: The property is not in the FixedArray part of the context.
+        Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
+        SetProperty(context_ext, name, initial_value, mode);
+      }
+    }
+
+  } else {
+    // The property is not in the function context. It needs to be
+    // "declared" in the function context's extension context, or in the
+    // global context.
+    Handle<JSObject> context_ext;
+    if (context->has_extension()) {
+      // The function context's extension context exists - use it.
+      context_ext = Handle<JSObject>(context->extension());
+    } else {
+      // The function context's extension context does not exists - allocate
+      // it.
+      context_ext = Factory::NewJSObject(Top::context_extension_function());
+      // And store it in the extension slot.
+      context->set_extension(*context_ext);
+    }
+    ASSERT(*context_ext != NULL);
+
+    // Declare the property by setting it to the initial value if provided,
+    // or undefined, and use the correct mode (e.g. READ_ONLY attribute for
+    // constant declarations).
+    ASSERT(!context_ext->HasLocalProperty(*name));
+    Handle<Object> value(Heap::undefined_value());
+    if (*initial_value != NULL) value = initial_value;
+    SetProperty(context_ext, name, value, mode);
+    ASSERT(context_ext->GetLocalPropertyAttribute(*name) == mode);
+  }
+
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_InitializeVarGlobal(Arguments args) {
+  NoHandleAllocation nha;
+
+  // Determine if we need to assign to the variable if it already
+  // exists (based on the number of arguments).
+  RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
+  bool assign = args.length() == 2;
+
+  CONVERT_ARG_CHECKED(String, name, 0);
+  GlobalObject* global = Top::context()->global();
+
+  // According to ECMA-262, section 12.2, page 62, the property must
+  // not be deletable.
+  PropertyAttributes attributes = DONT_DELETE;
+
+  // Lookup the property locally in the global object. If it isn't
+  // there, there is a property with this name in the prototype chain.
+  // We follow Safari and Firefox behavior and only set the property
+  // locally if there is an explicit initialization value that we have
+  // to assign to the property. When adding the property we take
+  // special precautions to always add it as a local property even in
+  // case of callbacks in the prototype chain (this rules out using
+  // SetProperty).  We have IgnoreAttributesAndSetLocalProperty for
+  // this.
+  LookupResult lookup;
+  global->LocalLookup(*name, &lookup);
+  if (!lookup.IsProperty()) {
+    if (assign) {
+      return global->IgnoreAttributesAndSetLocalProperty(*name,
+                                                         args[1],
+                                                         attributes);
+    }
+    return Heap::undefined_value();
+  }
+
+  // Determine if this is a redeclaration of something read-only.
+  if (lookup.IsReadOnly()) {
+    return ThrowRedeclarationError("const", name);
+  }
+
+  // Determine if this is a redeclaration of an intercepted read-only
+  // property and figure out if the property exists at all.
+  bool found = true;
+  PropertyType type = lookup.type();
+  if (type == INTERCEPTOR) {
+    PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
+    if (intercepted == ABSENT) {
+      // The interceptor claims the property isn't there. We need to
+      // make sure to introduce it.
+      found = false;
+    } else if ((intercepted & READ_ONLY) != 0) {
+      // The property is present, but read-only. Since we're trying to
+      // overwrite it with a variable declaration we must throw a
+      // re-declaration error.
+      return ThrowRedeclarationError("const", name);
+    }
+    // Restore global object from context (in case of GC).
+    global = Top::context()->global();
+  }
+
+  if (found && !assign) {
+    // The global property is there and we're not assigning any value
+    // to it. Just return.
+    return Heap::undefined_value();
+  }
+
+  // Assign the value (or undefined) to the property.
+  Object* value = (assign) ? args[1] : Heap::undefined_value();
+  return global->SetProperty(&lookup, *name, value, attributes);
+}
+
+
+static Object* Runtime_InitializeConstGlobal(Arguments args) {
+  // All constants are declared with an initial value. The name
+  // of the constant is the first argument and the initial value
+  // is the second.
+  RUNTIME_ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(String, name, 0);
+  Handle<Object> value = args.at<Object>(1);
+
+  // Get the current global object from top.
+  GlobalObject* global = Top::context()->global();
+
+  // According to ECMA-262, section 12.2, page 62, the property must
+  // not be deletable. Since it's a const, it must be READ_ONLY too.
+  PropertyAttributes attributes =
+      static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+
+  // Lookup the property locally in the global object. If it isn't
+  // there, we add the property and take special precautions to always
+  // add it as a local property even in case of callbacks in the
+  // prototype chain (this rules out using SetProperty).
+  // We use IgnoreAttributesAndSetLocalProperty instead
+  LookupResult lookup;
+  global->LocalLookup(*name, &lookup);
+  if (!lookup.IsProperty()) {
+    return global->IgnoreAttributesAndSetLocalProperty(*name,
+                                                       *value,
+                                                       attributes);
+  }
+
+  // Determine if this is a redeclaration of something not
+  // read-only. In case the result is hidden behind an interceptor we
+  // need to ask it for the property attributes.
+  if (!lookup.IsReadOnly()) {
+    if (lookup.type() != INTERCEPTOR) {
+      return ThrowRedeclarationError("var", name);
+    }
+
+    PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
+
+    // Throw re-declaration error if the intercepted property is present
+    // but not read-only.
+    if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
+      return ThrowRedeclarationError("var", name);
+    }
+
+    // Restore global object from context (in case of GC) and continue
+    // with setting the value because the property is either absent or
+    // read-only. We also have to do redo the lookup.
+    global = Top::context()->global();
+
+    // BUG 1213579: Handle the case where we have to set a read-only
+    // property through an interceptor and only do it if it's
+    // uninitialized, e.g. the hole. Nirk...
+    global->SetProperty(*name, *value, attributes);
+    return *value;
+  }
+
+  // Set the value, but only we're assigning the initial value to a
+  // constant. For now, we determine this by checking if the
+  // current value is the hole.
+  PropertyType type = lookup.type();
+  if (type == FIELD) {
+    FixedArray* properties = global->properties();
+    int index = lookup.GetFieldIndex();
+    if (properties->get(index)->IsTheHole()) {
+      properties->set(index, *value);
+    }
+  } else if (type == NORMAL) {
+    if (global->GetNormalizedProperty(&lookup)->IsTheHole()) {
+      global->SetNormalizedProperty(&lookup, *value);
+    }
+  } else {
+    // Ignore re-initialization of constants that have already been
+    // assigned a function value.
+    ASSERT(lookup.IsReadOnly() && type == CONSTANT_FUNCTION);
+  }
+
+  // Use the set value as the result of the operation.
+  return *value;
+}
+
+
+static Object* Runtime_InitializeConstContextSlot(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+
+  Handle<Object> value(args[0]);
+  ASSERT(!value->IsTheHole());
+  CONVERT_ARG_CHECKED(Context, context, 1);
+  Handle<String> name(String::cast(args[2]));
+
+  // Initializations are always done in the function context.
+  context = Handle<Context>(context->fcontext());
+
+  int index;
+  PropertyAttributes attributes;
+  ContextLookupFlags flags = FOLLOW_CHAINS;
+  Handle<Object> holder =
+      context->Lookup(name, flags, &index, &attributes);
+
+  // In most situations, the property introduced by the const
+  // declaration should be present in the context extension object.
+  // However, because declaration and initialization are separate, the
+  // property might have been deleted (if it was introduced by eval)
+  // before we reach the initialization point.
+  //
+  // Example:
+  //
+  //    function f() { eval("delete x; const x;"); }
+  //
+  // In that case, the initialization behaves like a normal assignment
+  // to property 'x'.
+  if (index >= 0) {
+    // Property was found in a context.
+    if (holder->IsContext()) {
+      // The holder cannot be the function context.  If it is, there
+      // should have been a const redeclaration error when declaring
+      // the const property.
+      ASSERT(!holder.is_identical_to(context));
+      if ((attributes & READ_ONLY) == 0) {
+        Handle<Context>::cast(holder)->set(index, *value);
+      }
+    } else {
+      // The holder is an arguments object.
+      ASSERT((attributes & READ_ONLY) == 0);
+      Handle<JSObject>::cast(holder)->SetElement(index, *value);
+    }
+    return *value;
+  }
+
+  // The property could not be found, we introduce it in the global
+  // context.
+  if (attributes == ABSENT) {
+    Handle<JSObject> global = Handle<JSObject>(Top::context()->global());
+    SetProperty(global, name, value, NONE);
+    return *value;
+  }
+
+  // The property was present in a context extension object.
+  Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
+
+  if (*context_ext == context->extension()) {
+    // This is the property that was introduced by the const
+    // declaration.  Set it if it hasn't been set before.  NOTE: We
+    // cannot use GetProperty() to get the current value as it
+    // 'unholes' the value.
+    LookupResult lookup;
+    context_ext->LocalLookupRealNamedProperty(*name, &lookup);
+    ASSERT(lookup.IsProperty());  // the property was declared
+    ASSERT(lookup.IsReadOnly());  // and it was declared as read-only
+
+    PropertyType type = lookup.type();
+    if (type == FIELD) {
+      FixedArray* properties = context_ext->properties();
+      int index = lookup.GetFieldIndex();
+      if (properties->get(index)->IsTheHole()) {
+        properties->set(index, *value);
+      }
+    } else if (type == NORMAL) {
+      if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) {
+        context_ext->SetNormalizedProperty(&lookup, *value);
+      }
+    } else {
+      // We should not reach here. Any real, named property should be
+      // either a field or a dictionary slot.
+      UNREACHABLE();
+    }
+  } else {
+    // The property was found in a different context extension object.
+    // Set it if it is not a read-only property.
+    if ((attributes & READ_ONLY) == 0) {
+      Handle<Object> set = SetProperty(context_ext, name, value, attributes);
+      // Setting a property might throw an exception.  Exceptions
+      // are converted to empty handles in handle operations.  We
+      // need to convert back to exceptions here.
+      if (set.is_null()) {
+        ASSERT(Top::has_pending_exception());
+        return Failure::Exception();
+      }
+    }
+  }
+
+  return *value;
+}
+
+
+static Object* Runtime_OptimizeObjectForAddingMultipleProperties(
+    Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  CONVERT_SMI_CHECKED(properties, args[1]);
+  if (object->HasFastProperties()) {
+    NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
+  }
+  return *object;
+}
+
+
+static Object* Runtime_TransformToFastProperties(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  if (!object->HasFastProperties() && !object->IsGlobalObject()) {
+    TransformToFastProperties(object, 0);
+  }
+  return *object;
+}
+
+
+static Object* Runtime_RegExpExec(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 4);
+  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+  CONVERT_ARG_CHECKED(String, subject, 1);
+  // Due to the way the JS calls are constructed this must be less than the
+  // length of a string, i.e. it is always a Smi.  We check anyway for security.
+  CONVERT_SMI_CHECKED(index, args[2]);
+  CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
+  RUNTIME_ASSERT(last_match_info->HasFastElements());
+  RUNTIME_ASSERT(index >= 0);
+  RUNTIME_ASSERT(index <= subject->length());
+  Handle<Object> result = RegExpImpl::Exec(regexp,
+                                           subject,
+                                           index,
+                                           last_match_info);
+  if (result.is_null()) return Failure::Exception();
+  return *result;
+}
+
+
+static Object* Runtime_MaterializeRegExpLiteral(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 4);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  int index = Smi::cast(args[1])->value();
+  Handle<String> pattern = args.at<String>(2);
+  Handle<String> flags = args.at<String>(3);
+
+  // Get the RegExp function from the context in the literals array.
+  // This is the RegExp function from the context in which the
+  // function was created.  We do not use the RegExp function from the
+  // current global context because this might be the RegExp function
+  // from another context which we should not have access to.
+  Handle<JSFunction> constructor =
+      Handle<JSFunction>(
+          JSFunction::GlobalContextFromLiterals(*literals)->regexp_function());
+  // Compute the regular expression literal.
+  bool has_pending_exception;
+  Handle<Object> regexp =
+      RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags,
+                                      &has_pending_exception);
+  if (has_pending_exception) {
+    ASSERT(Top::has_pending_exception());
+    return Failure::Exception();
+  }
+  literals->set(index, *regexp);
+  return *regexp;
+}
+
+
+static Object* Runtime_FunctionGetName(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  return f->shared()->name();
+}
+
+
+static Object* Runtime_FunctionSetName(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  CONVERT_CHECKED(String, name, args[1]);
+  f->shared()->set_name(name);
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_FunctionGetScript(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  Handle<Object> script = Handle<Object>(fun->shared()->script());
+  if (!script->IsScript()) return Heap::undefined_value();
+
+  return *GetScriptWrapper(Handle<Script>::cast(script));
+}
+
+
+static Object* Runtime_FunctionGetSourceCode(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  return f->shared()->GetSourceCode();
+}
+
+
+static Object* Runtime_FunctionGetScriptSourcePosition(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  int pos = fun->shared()->start_position();
+  return Smi::FromInt(pos);
+}
+
+
+static Object* Runtime_FunctionGetPositionForOffset(Arguments args) {
+  ASSERT(args.length() == 2);
+
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
+
+  Code* code = fun->code();
+  RUNTIME_ASSERT(0 <= offset && offset < code->Size());
+
+  Address pc = code->address() + offset;
+  return Smi::FromInt(fun->code()->SourcePosition(pc));
+}
+
+
+
+static Object* Runtime_FunctionSetInstanceClassName(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_CHECKED(String, name, args[1]);
+  fun->SetInstanceClassName(name);
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_FunctionSetLength(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_CHECKED(Smi, length, args[1]);
+  fun->shared()->set_length(length->value());
+  return length;
+}
+
+
+static Object* Runtime_FunctionSetPrototype(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  Object* obj = Accessors::FunctionSetPrototype(fun, args[1], NULL);
+  if (obj->IsFailure()) return obj;
+  return args[0];  // return TOS
+}
+
+
+static Object* Runtime_FunctionIsAPIFunction(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  // The function_data field of the shared function info is used exclusively by
+  // the API.
+  return !f->shared()->function_data()->IsUndefined() ? Heap::true_value()
+                                                      : Heap::false_value();
+}
+
+static Object* Runtime_FunctionIsBuiltin(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  return f->IsBuiltin() ? Heap::true_value() : Heap::false_value();
+}
+
+
+static Object* Runtime_SetCode(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+
+  CONVERT_ARG_CHECKED(JSFunction, target, 0);
+  Handle<Object> code = args.at<Object>(1);
+
+  Handle<Context> context(target->context());
+
+  if (!code->IsNull()) {
+    RUNTIME_ASSERT(code->IsJSFunction());
+    Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
+    SetExpectedNofProperties(target, fun->shared()->expected_nof_properties());
+    if (!fun->is_compiled() && !CompileLazy(fun, KEEP_EXCEPTION)) {
+      return Failure::Exception();
+    }
+    // Set the code, formal parameter count, and the length of the target
+    // function.
+    target->set_code(fun->code());
+    target->shared()->set_length(fun->shared()->length());
+    target->shared()->set_formal_parameter_count(
+        fun->shared()->formal_parameter_count());
+    // Set the source code of the target function to undefined.
+    // SetCode is only used for built-in constructors like String,
+    // Array, and Object, and some web code
+    // doesn't like seeing source code for constructors.
+    target->shared()->set_script(Heap::undefined_value());
+    // Clear the optimization hints related to the compiled code as these are no
+    // longer valid when the code is overwritten.
+    target->shared()->ClearThisPropertyAssignmentsInfo();
+    context = Handle<Context>(fun->context());
+
+    // Make sure we get a fresh copy of the literal vector to avoid
+    // cross context contamination.
+    int number_of_literals = fun->NumberOfLiterals();
+    Handle<FixedArray> literals =
+        Factory::NewFixedArray(number_of_literals, TENURED);
+    if (number_of_literals > 0) {
+      // Insert the object, regexp and array functions in the literals
+      // array prefix.  These are the functions that will be used when
+      // creating object, regexp and array literals.
+      literals->set(JSFunction::kLiteralGlobalContextIndex,
+                    context->global_context());
+    }
+    target->set_literals(*literals, SKIP_WRITE_BARRIER);
+  }
+
+  target->set_context(*context);
+  return *target;
+}
+
+
+static Object* CharCodeAt(String* subject, Object* index) {
+  uint32_t i = 0;
+  if (!Array::IndexFromObject(index, &i)) return Heap::nan_value();
+  // Flatten the string.  If someone wants to get a char at an index
+  // in a cons string, it is likely that more indices will be
+  // accessed.
+  subject->TryFlattenIfNotFlat();
+  if (i >= static_cast<uint32_t>(subject->length())) {
+    return Heap::nan_value();
+  }
+  return Smi::FromInt(subject->Get(i));
+}
+
+
+static Object* Runtime_StringCharCodeAt(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_CHECKED(String, subject, args[0]);
+  Object* index = args[1];
+  return CharCodeAt(subject, index);
+}
+
+
+static Object* Runtime_CharFromCode(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+  uint32_t code;
+  if (Array::IndexFromObject(args[0], &code)) {
+    if (code <= 0xffff) {
+      return Heap::LookupSingleCharacterStringFromCode(code);
+    }
+  }
+  return Heap::empty_string();
+}
+
+// Forward declarations.
+static const int kStringBuilderConcatHelperLengthBits = 11;
+static const int kStringBuilderConcatHelperPositionBits = 19;
+
+template <typename schar>
+static inline void StringBuilderConcatHelper(String*,
+                                             schar*,
+                                             FixedArray*,
+                                             int);
+
+typedef BitField<int, 0, 11> StringBuilderSubstringLength;
+typedef BitField<int, 11, 19> StringBuilderSubstringPosition;
+
+class ReplacementStringBuilder {
+ public:
+  ReplacementStringBuilder(Handle<String> subject, int estimated_part_count)
+      : subject_(subject),
+        parts_(Factory::NewFixedArray(estimated_part_count)),
+        part_count_(0),
+        character_count_(0),
+        is_ascii_(subject->IsAsciiRepresentation()) {
+    // Require a non-zero initial size. Ensures that doubling the size to
+    // extend the array will work.
+    ASSERT(estimated_part_count > 0);
+  }
+
+  void EnsureCapacity(int elements) {
+    int length = parts_->length();
+    int required_length = part_count_ + elements;
+    if (length < required_length) {
+      int new_length = length;
+      do {
+        new_length *= 2;
+      } while (new_length < required_length);
+      Handle<FixedArray> extended_array =
+          Factory::NewFixedArray(new_length);
+      parts_->CopyTo(0, *extended_array, 0, part_count_);
+      parts_ = extended_array;
+    }
+  }
+
+  void AddSubjectSlice(int from, int to) {
+    ASSERT(from >= 0);
+    int length = to - from;
+    ASSERT(length > 0);
+    // Can we encode the slice in 11 bits for length and 19 bits for
+    // start position - as used by StringBuilderConcatHelper?
+    if (StringBuilderSubstringLength::is_valid(length) &&
+        StringBuilderSubstringPosition::is_valid(from)) {
+      int encoded_slice = StringBuilderSubstringLength::encode(length) |
+          StringBuilderSubstringPosition::encode(from);
+      AddElement(Smi::FromInt(encoded_slice));
+    } else {
+      Handle<String> slice = Factory::NewStringSlice(subject_, from, to);
+      AddElement(*slice);
+    }
+    IncrementCharacterCount(length);
+  }
+
+
+  void AddString(Handle<String> string) {
+    int length = string->length();
+    ASSERT(length > 0);
+    AddElement(*string);
+    if (!string->IsAsciiRepresentation()) {
+      is_ascii_ = false;
+    }
+    IncrementCharacterCount(length);
+  }
+
+
+  Handle<String> ToString() {
+    if (part_count_ == 0) {
+      return Factory::empty_string();
+    }
+
+    Handle<String> joined_string;
+    if (is_ascii_) {
+      joined_string = NewRawAsciiString(character_count_);
+      AssertNoAllocation no_alloc;
+      SeqAsciiString* seq = SeqAsciiString::cast(*joined_string);
+      char* char_buffer = seq->GetChars();
+      StringBuilderConcatHelper(*subject_,
+                                char_buffer,
+                                *parts_,
+                                part_count_);
+    } else {
+      // Non-ASCII.
+      joined_string = NewRawTwoByteString(character_count_);
+      AssertNoAllocation no_alloc;
+      SeqTwoByteString* seq = SeqTwoByteString::cast(*joined_string);
+      uc16* char_buffer = seq->GetChars();
+      StringBuilderConcatHelper(*subject_,
+                                char_buffer,
+                                *parts_,
+                                part_count_);
+    }
+    return joined_string;
+  }
+
+
+  void IncrementCharacterCount(int by) {
+    if (character_count_ > Smi::kMaxValue - by) {
+      V8::FatalProcessOutOfMemory("String.replace result too large.");
+    }
+    character_count_ += by;
+  }
+
+ private:
+
+  Handle<String> NewRawAsciiString(int size) {
+    CALL_HEAP_FUNCTION(Heap::AllocateRawAsciiString(size), String);
+  }
+
+
+  Handle<String> NewRawTwoByteString(int size) {
+    CALL_HEAP_FUNCTION(Heap::AllocateRawTwoByteString(size), String);
+  }
+
+
+  void AddElement(Object* element) {
+    ASSERT(element->IsSmi() || element->IsString());
+    ASSERT(parts_->length() > part_count_);
+    parts_->set(part_count_, element);
+    part_count_++;
+  }
+
+  Handle<String> subject_;
+  Handle<FixedArray> parts_;
+  int part_count_;
+  int character_count_;
+  bool is_ascii_;
+};
+
+
+class CompiledReplacement {
+ public:
+  CompiledReplacement()
+      : parts_(1), replacement_substrings_(0) {}
+
+  void Compile(Handle<String> replacement,
+               int capture_count,
+               int subject_length);
+
+  void Apply(ReplacementStringBuilder* builder,
+             int match_from,
+             int match_to,
+             Handle<JSArray> last_match_info);
+
+  // Number of distinct parts of the replacement pattern.
+  int parts() {
+    return parts_.length();
+  }
+ private:
+  enum PartType {
+    SUBJECT_PREFIX = 1,
+    SUBJECT_SUFFIX,
+    SUBJECT_CAPTURE,
+    REPLACEMENT_SUBSTRING,
+    REPLACEMENT_STRING,
+
+    NUMBER_OF_PART_TYPES
+  };
+
+  struct ReplacementPart {
+    static inline ReplacementPart SubjectMatch() {
+      return ReplacementPart(SUBJECT_CAPTURE, 0);
+    }
+    static inline ReplacementPart SubjectCapture(int capture_index) {
+      return ReplacementPart(SUBJECT_CAPTURE, capture_index);
+    }
+    static inline ReplacementPart SubjectPrefix() {
+      return ReplacementPart(SUBJECT_PREFIX, 0);
+    }
+    static inline ReplacementPart SubjectSuffix(int subject_length) {
+      return ReplacementPart(SUBJECT_SUFFIX, subject_length);
+    }
+    static inline ReplacementPart ReplacementString() {
+      return ReplacementPart(REPLACEMENT_STRING, 0);
+    }
+    static inline ReplacementPart ReplacementSubString(int from, int to) {
+      ASSERT(from >= 0);
+      ASSERT(to > from);
+      return ReplacementPart(-from, to);
+    }
+
+    // If tag <= 0 then it is the negation of a start index of a substring of
+    // the replacement pattern, otherwise it's a value from PartType.
+    ReplacementPart(int tag, int data)
+        : tag(tag), data(data) {
+      // Must be non-positive or a PartType value.
+      ASSERT(tag < NUMBER_OF_PART_TYPES);
+    }
+    // Either a value of PartType or a non-positive number that is
+    // the negation of an index into the replacement string.
+    int tag;
+    // The data value's interpretation depends on the value of tag:
+    // tag == SUBJECT_PREFIX ||
+    // tag == SUBJECT_SUFFIX:  data is unused.
+    // tag == SUBJECT_CAPTURE: data is the number of the capture.
+    // tag == REPLACEMENT_SUBSTRING ||
+    // tag == REPLACEMENT_STRING:    data is index into array of substrings
+    //                               of the replacement string.
+    // tag <= 0: Temporary representation of the substring of the replacement
+    //           string ranging over -tag .. data.
+    //           Is replaced by REPLACEMENT_{SUB,}STRING when we create the
+    //           substring objects.
+    int data;
+  };
+
+  template<typename Char>
+  static void ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
+                                      Vector<Char> characters,
+                                      int capture_count,
+                                      int subject_length) {
+    int length = characters.length();
+    int last = 0;
+    for (int i = 0; i < length; i++) {
+      Char c = characters[i];
+      if (c == '$') {
+        int next_index = i + 1;
+        if (next_index == length) {  // No next character!
+          break;
+        }
+        Char c2 = characters[next_index];
+        switch (c2) {
+        case '$':
+          if (i > last) {
+            // There is a substring before. Include the first "$".
+            parts->Add(ReplacementPart::ReplacementSubString(last, next_index));
+            last = next_index + 1;  // Continue after the second "$".
+          } else {
+            // Let the next substring start with the second "$".
+            last = next_index;
+          }
+          i = next_index;
+          break;
+        case '`':
+          if (i > last) {
+            parts->Add(ReplacementPart::ReplacementSubString(last, i));
+          }
+          parts->Add(ReplacementPart::SubjectPrefix());
+          i = next_index;
+          last = i + 1;
+          break;
+        case '\'':
+          if (i > last) {
+            parts->Add(ReplacementPart::ReplacementSubString(last, i));
+          }
+          parts->Add(ReplacementPart::SubjectSuffix(subject_length));
+          i = next_index;
+          last = i + 1;
+          break;
+        case '&':
+          if (i > last) {
+            parts->Add(ReplacementPart::ReplacementSubString(last, i));
+          }
+          parts->Add(ReplacementPart::SubjectMatch());
+          i = next_index;
+          last = i + 1;
+          break;
+        case '0':
+        case '1':
+        case '2':
+        case '3':
+        case '4':
+        case '5':
+        case '6':
+        case '7':
+        case '8':
+        case '9': {
+          int capture_ref = c2 - '0';
+          if (capture_ref > capture_count) {
+            i = next_index;
+            continue;
+          }
+          int second_digit_index = next_index + 1;
+          if (second_digit_index < length) {
+            // Peek ahead to see if we have two digits.
+            Char c3 = characters[second_digit_index];
+            if ('0' <= c3 && c3 <= '9') {  // Double digits.
+              int double_digit_ref = capture_ref * 10 + c3 - '0';
+              if (double_digit_ref <= capture_count) {
+                next_index = second_digit_index;
+                capture_ref = double_digit_ref;
+              }
+            }
+          }
+          if (capture_ref > 0) {
+            if (i > last) {
+              parts->Add(ReplacementPart::ReplacementSubString(last, i));
+            }
+            ASSERT(capture_ref <= capture_count);
+            parts->Add(ReplacementPart::SubjectCapture(capture_ref));
+            last = next_index + 1;
+          }
+          i = next_index;
+          break;
+        }
+        default:
+          i = next_index;
+          break;
+        }
+      }
+    }
+    if (length > last) {
+      if (last == 0) {
+        parts->Add(ReplacementPart::ReplacementString());
+      } else {
+        parts->Add(ReplacementPart::ReplacementSubString(last, length));
+      }
+    }
+  }
+
+  ZoneList<ReplacementPart> parts_;
+  ZoneList<Handle<String> > replacement_substrings_;
+};
+
+
+void CompiledReplacement::Compile(Handle<String> replacement,
+                                  int capture_count,
+                                  int subject_length) {
+  ASSERT(replacement->IsFlat());
+  if (replacement->IsAsciiRepresentation()) {
+    AssertNoAllocation no_alloc;
+    ParseReplacementPattern(&parts_,
+                            replacement->ToAsciiVector(),
+                            capture_count,
+                            subject_length);
+  } else {
+    ASSERT(replacement->IsTwoByteRepresentation());
+    AssertNoAllocation no_alloc;
+
+    ParseReplacementPattern(&parts_,
+                            replacement->ToUC16Vector(),
+                            capture_count,
+                            subject_length);
+  }
+  // Find substrings of replacement string and create them as String objects..
+  int substring_index = 0;
+  for (int i = 0, n = parts_.length(); i < n; i++) {
+    int tag = parts_[i].tag;
+    if (tag <= 0) {  // A replacement string slice.
+      int from = -tag;
+      int to = parts_[i].data;
+      replacement_substrings_.Add(Factory::NewStringSlice(replacement,
+                                                          from,
+                                                          to));
+      parts_[i].tag = REPLACEMENT_SUBSTRING;
+      parts_[i].data = substring_index;
+      substring_index++;
+    } else if (tag == REPLACEMENT_STRING) {
+      replacement_substrings_.Add(replacement);
+      parts_[i].data = substring_index;
+      substring_index++;
+    }
+  }
+}
+
+
+void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
+                                int match_from,
+                                int match_to,
+                                Handle<JSArray> last_match_info) {
+  for (int i = 0, n = parts_.length(); i < n; i++) {
+    ReplacementPart part = parts_[i];
+    switch (part.tag) {
+      case SUBJECT_PREFIX:
+        if (match_from > 0) builder->AddSubjectSlice(0, match_from);
+        break;
+      case SUBJECT_SUFFIX: {
+        int subject_length = part.data;
+        if (match_to < subject_length) {
+          builder->AddSubjectSlice(match_to, subject_length);
+        }
+        break;
+      }
+      case SUBJECT_CAPTURE: {
+        int capture = part.data;
+        FixedArray* match_info = FixedArray::cast(last_match_info->elements());
+        int from = RegExpImpl::GetCapture(match_info, capture * 2);
+        int to = RegExpImpl::GetCapture(match_info, capture * 2 + 1);
+        if (from >= 0 && to > from) {
+          builder->AddSubjectSlice(from, to);
+        }
+        break;
+      }
+      case REPLACEMENT_SUBSTRING:
+      case REPLACEMENT_STRING:
+        builder->AddString(replacement_substrings_[part.data]);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+}
+
+
+
+static Object* StringReplaceRegExpWithString(String* subject,
+                                             JSRegExp* regexp,
+                                             String* replacement,
+                                             JSArray* last_match_info) {
+  ASSERT(subject->IsFlat());
+  ASSERT(replacement->IsFlat());
+
+  HandleScope handles;
+
+  int length = subject->length();
+  Handle<String> subject_handle(subject);
+  Handle<JSRegExp> regexp_handle(regexp);
+  Handle<String> replacement_handle(replacement);
+  Handle<JSArray> last_match_info_handle(last_match_info);
+  Handle<Object> match = RegExpImpl::Exec(regexp_handle,
+                                          subject_handle,
+                                          0,
+                                          last_match_info_handle);
+  if (match.is_null()) {
+    return Failure::Exception();
+  }
+  if (match->IsNull()) {
+    return *subject_handle;
+  }
+
+  int capture_count = regexp_handle->CaptureCount();
+
+  // CompiledReplacement uses zone allocation.
+  CompilationZoneScope zone(DELETE_ON_EXIT);
+  CompiledReplacement compiled_replacement;
+  compiled_replacement.Compile(replacement_handle,
+                               capture_count,
+                               length);
+
+  bool is_global = regexp_handle->GetFlags().is_global();
+
+  // Guessing the number of parts that the final result string is built
+  // from. Global regexps can match any number of times, so we guess
+  // conservatively.
+  int expected_parts =
+      (compiled_replacement.parts() + 1) * (is_global ? 4 : 1) + 1;
+  ReplacementStringBuilder builder(subject_handle, expected_parts);
+
+  // Index of end of last match.
+  int prev = 0;
+
+  // Number of parts added by compiled replacement plus preceeding string
+  // and possibly suffix after last match.
+  const int parts_added_per_loop = compiled_replacement.parts() + 2;
+  bool matched = true;
+  do {
+    ASSERT(last_match_info_handle->HasFastElements());
+    // Increase the capacity of the builder before entering local handle-scope,
+    // so its internal buffer can safely allocate a new handle if it grows.
+    builder.EnsureCapacity(parts_added_per_loop);
+
+    HandleScope loop_scope;
+    int start, end;
+    {
+      AssertNoAllocation match_info_array_is_not_in_a_handle;
+      FixedArray* match_info_array =
+          FixedArray::cast(last_match_info_handle->elements());
+
+      ASSERT_EQ(capture_count * 2 + 2,
+                RegExpImpl::GetLastCaptureCount(match_info_array));
+      start = RegExpImpl::GetCapture(match_info_array, 0);
+      end = RegExpImpl::GetCapture(match_info_array, 1);
+    }
+
+    if (prev < start) {
+      builder.AddSubjectSlice(prev, start);
+    }
+    compiled_replacement.Apply(&builder,
+                               start,
+                               end,
+                               last_match_info_handle);
+    prev = end;
+
+    // Only continue checking for global regexps.
+    if (!is_global) break;
+
+    // Continue from where the match ended, unless it was an empty match.
+    int next = end;
+    if (start == end) {
+      next = end + 1;
+      if (next > length) break;
+    }
+
+    match = RegExpImpl::Exec(regexp_handle,
+                             subject_handle,
+                             next,
+                             last_match_info_handle);
+    if (match.is_null()) {
+      return Failure::Exception();
+    }
+    matched = !match->IsNull();
+  } while (matched);
+
+  if (prev < length) {
+    builder.AddSubjectSlice(prev, length);
+  }
+
+  return *(builder.ToString());
+}
+
+
+static Object* Runtime_StringReplaceRegExpWithString(Arguments args) {
+  ASSERT(args.length() == 4);
+
+  CONVERT_CHECKED(String, subject, args[0]);
+  if (!subject->IsFlat()) {
+    Object* flat_subject = subject->TryFlatten();
+    if (flat_subject->IsFailure()) {
+      return flat_subject;
+    }
+    subject = String::cast(flat_subject);
+  }
+
+  CONVERT_CHECKED(String, replacement, args[2]);
+  if (!replacement->IsFlat()) {
+    Object* flat_replacement = replacement->TryFlatten();
+    if (flat_replacement->IsFailure()) {
+      return flat_replacement;
+    }
+    replacement = String::cast(flat_replacement);
+  }
+
+  CONVERT_CHECKED(JSRegExp, regexp, args[1]);
+  CONVERT_CHECKED(JSArray, last_match_info, args[3]);
+
+  ASSERT(last_match_info->HasFastElements());
+
+  return StringReplaceRegExpWithString(subject,
+                                       regexp,
+                                       replacement,
+                                       last_match_info);
+}
+
+
+
+// Cap on the maximal shift in the Boyer-Moore implementation. By setting a
+// limit, we can fix the size of tables.
+static const int kBMMaxShift = 0xff;
+// Reduce alphabet to this size.
+static const int kBMAlphabetSize = 0x100;
+// For patterns below this length, the skip length of Boyer-Moore is too short
+// to compensate for the algorithmic overhead compared to simple brute force.
+static const int kBMMinPatternLength = 5;
+
+// Holds the two buffers used by Boyer-Moore string search's Good Suffix
+// shift. Only allows the last kBMMaxShift characters of the needle
+// to be indexed.
+class BMGoodSuffixBuffers {
+ public:
+  BMGoodSuffixBuffers() {}
+  inline void init(int needle_length) {
+    ASSERT(needle_length > 1);
+    int start = needle_length < kBMMaxShift ? 0 : needle_length - kBMMaxShift;
+    int len = needle_length - start;
+    biased_suffixes_ = suffixes_ - start;
+    biased_good_suffix_shift_ = good_suffix_shift_ - start;
+    for (int i = 0; i <= len; i++) {
+      good_suffix_shift_[i] = len;
+    }
+  }
+  inline int& suffix(int index) {
+    ASSERT(biased_suffixes_ + index >= suffixes_);
+    return biased_suffixes_[index];
+  }
+  inline int& shift(int index) {
+    ASSERT(biased_good_suffix_shift_ + index >= good_suffix_shift_);
+    return biased_good_suffix_shift_[index];
+  }
+ private:
+  int suffixes_[kBMMaxShift + 1];
+  int good_suffix_shift_[kBMMaxShift + 1];
+  int* biased_suffixes_;
+  int* biased_good_suffix_shift_;
+  DISALLOW_COPY_AND_ASSIGN(BMGoodSuffixBuffers);
+};
+
+// buffers reused by BoyerMoore
+static int bad_char_occurrence[kBMAlphabetSize];
+static BMGoodSuffixBuffers bmgs_buffers;
+
+// Compute the bad-char table for Boyer-Moore in the static buffer.
+template <typename pchar>
+static void BoyerMoorePopulateBadCharTable(Vector<const pchar> pattern,
+                                          int start) {
+  // Run forwards to populate bad_char_table, so that *last* instance
+  // of character equivalence class is the one registered.
+  // Notice: Doesn't include the last character.
+  int table_size = (sizeof(pchar) == 1) ? String::kMaxAsciiCharCode + 1
+                                        : kBMAlphabetSize;
+  if (start == 0) {  // All patterns less than kBMMaxShift in length.
+    memset(bad_char_occurrence, -1, table_size * sizeof(*bad_char_occurrence));
+  } else {
+    for (int i = 0; i < table_size; i++) {
+      bad_char_occurrence[i] = start - 1;
+    }
+  }
+  for (int i = start; i < pattern.length() - 1; i++) {
+    pchar c = pattern[i];
+    int bucket = (sizeof(pchar) ==1) ? c : c % kBMAlphabetSize;
+    bad_char_occurrence[bucket] = i;
+  }
+}
+
+template <typename pchar>
+static void BoyerMoorePopulateGoodSuffixTable(Vector<const pchar> pattern,
+                                              int start) {
+  int m = pattern.length();
+  int len = m - start;
+  // Compute Good Suffix tables.
+  bmgs_buffers.init(m);
+
+  bmgs_buffers.shift(m-1) = 1;
+  bmgs_buffers.suffix(m) = m + 1;
+  pchar last_char = pattern[m - 1];
+  int suffix = m + 1;
+  for (int i = m; i > start;) {
+    for (pchar c = pattern[i - 1]; suffix <= m && c != pattern[suffix - 1];) {
+      if (bmgs_buffers.shift(suffix) == len) {
+        bmgs_buffers.shift(suffix) = suffix - i;
+      }
+      suffix = bmgs_buffers.suffix(suffix);
+    }
+    i--;
+    suffix--;
+    bmgs_buffers.suffix(i) = suffix;
+    if (suffix == m) {
+      // No suffix to extend, so we check against last_char only.
+      while (i > start && pattern[i - 1] != last_char) {
+        if (bmgs_buffers.shift(m) == len) {
+          bmgs_buffers.shift(m) = m - i;
+        }
+        i--;
+        bmgs_buffers.suffix(i) = m;
+      }
+      if (i > start) {
+        i--;
+        suffix--;
+        bmgs_buffers.suffix(i) = suffix;
+      }
+    }
+  }
+  if (suffix < m) {
+    for (int i = start; i <= m; i++) {
+      if (bmgs_buffers.shift(i) == len) {
+        bmgs_buffers.shift(i) = suffix - start;
+      }
+      if (i == suffix) {
+        suffix = bmgs_buffers.suffix(suffix);
+      }
+    }
+  }
+}
+
+template <typename schar, typename pchar>
+static inline int CharOccurrence(int char_code) {
+  if (sizeof(schar) == 1) {
+    return bad_char_occurrence[char_code];
+  }
+  if (sizeof(pchar) == 1) {
+    if (char_code > String::kMaxAsciiCharCode) {
+      return -1;
+    }
+    return bad_char_occurrence[char_code];
+  }
+  return bad_char_occurrence[char_code % kBMAlphabetSize];
+}
+
+// Restricted simplified Boyer-Moore string matching.
+// Uses only the bad-shift table of Boyer-Moore and only uses it
+// for the character compared to the last character of the needle.
+template <typename schar, typename pchar>
+static int BoyerMooreHorspool(Vector<const schar> subject,
+                              Vector<const pchar> pattern,
+                              int start_index,
+                              bool* complete) {
+  int n = subject.length();
+  int m = pattern.length();
+  // Only preprocess at most kBMMaxShift last characters of pattern.
+  int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
+
+  BoyerMoorePopulateBadCharTable(pattern, start);
+
+  int badness = -m;  // How bad we are doing without a good-suffix table.
+  int idx;  // No matches found prior to this index.
+  pchar last_char = pattern[m - 1];
+  int last_char_shift = m - 1 - CharOccurrence<schar, pchar>(last_char);
+  // Perform search
+  for (idx = start_index; idx <= n - m;) {
+    int j = m - 1;
+    int c;
+    while (last_char != (c = subject[idx + j])) {
+      int bc_occ = CharOccurrence<schar, pchar>(c);
+      int shift = j - bc_occ;
+      idx += shift;
+      badness += 1 - shift;  // at most zero, so badness cannot increase.
+      if (idx > n - m) {
+        *complete = true;
+        return -1;
+      }
+    }
+    j--;
+    while (j >= 0 && pattern[j] == (subject[idx + j])) j--;
+    if (j < 0) {
+      *complete = true;
+      return idx;
+    } else {
+      idx += last_char_shift;
+      // Badness increases by the number of characters we have
+      // checked, and decreases by the number of characters we
+      // can skip by shifting. It's a measure of how we are doing
+      // compared to reading each character exactly once.
+      badness += (m - j) - last_char_shift;
+      if (badness > 0) {
+        *complete = false;
+        return idx;
+      }
+    }
+  }
+  *complete = true;
+  return -1;
+}
+
+
+template <typename schar, typename pchar>
+static int BoyerMooreIndexOf(Vector<const schar> subject,
+                             Vector<const pchar> pattern,
+                             int idx) {
+  int n = subject.length();
+  int m = pattern.length();
+  // Only preprocess at most kBMMaxShift last characters of pattern.
+  int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
+
+  // Build the Good Suffix table and continue searching.
+  BoyerMoorePopulateGoodSuffixTable(pattern, start);
+  pchar last_char = pattern[m - 1];
+  // Continue search from i.
+  while (idx <= n - m) {
+    int j = m - 1;
+    schar c;
+    while (last_char != (c = subject[idx + j])) {
+      int shift = j - CharOccurrence<schar, pchar>(c);
+      idx += shift;
+      if (idx > n - m) {
+        return -1;
+      }
+    }
+    while (j >= 0 && pattern[j] == (c = subject[idx + j])) j--;
+    if (j < 0) {
+      return idx;
+    } else if (j < start) {
+      // we have matched more than our tables allow us to be smart about.
+      // Fall back on BMH shift.
+      idx += m - 1 - CharOccurrence<schar, pchar>(last_char);
+    } else {
+      int gs_shift = bmgs_buffers.shift(j + 1);       // Good suffix shift.
+      int bc_occ = CharOccurrence<schar, pchar>(c);
+      int shift = j - bc_occ;                         // Bad-char shift.
+      if (gs_shift > shift) {
+        shift = gs_shift;
+      }
+      idx += shift;
+    }
+  }
+
+  return -1;
+}
+
+
+template <typename schar>
+static int SingleCharIndexOf(Vector<const schar> string,
+                             schar pattern_char,
+                             int start_index) {
+  for (int i = start_index, n = string.length(); i < n; i++) {
+    if (pattern_char == string[i]) {
+      return i;
+    }
+  }
+  return -1;
+}
+
+// Trivial string search for shorter strings.
+// On return, if "complete" is set to true, the return value is the
+// final result of searching for the patter in the subject.
+// If "complete" is set to false, the return value is the index where
+// further checking should start, i.e., it's guaranteed that the pattern
+// does not occur at a position prior to the returned index.
+template <typename pchar, typename schar>
+static int SimpleIndexOf(Vector<const schar> subject,
+                         Vector<const pchar> pattern,
+                         int idx,
+                         bool* complete) {
+  // Badness is a count of how much work we have done.  When we have
+  // done enough work we decide it's probably worth switching to a better
+  // algorithm.
+  int badness = -10 - (pattern.length() << 2);
+  // We know our pattern is at least 2 characters, we cache the first so
+  // the common case of the first character not matching is faster.
+  pchar pattern_first_char = pattern[0];
+
+  for (int i = idx, n = subject.length() - pattern.length(); i <= n; i++) {
+    badness++;
+    if (badness > 0) {
+      *complete = false;
+      return i;
+    }
+    if (subject[i] != pattern_first_char) continue;
+    int j = 1;
+    do {
+      if (pattern[j] != subject[i+j]) {
+        break;
+      }
+      j++;
+    } while (j < pattern.length());
+    if (j == pattern.length()) {
+      *complete = true;
+      return i;
+    }
+    badness += j;
+  }
+  *complete = true;
+  return -1;
+}
+
+// Simple indexOf that never bails out. For short patterns only.
+template <typename pchar, typename schar>
+static int SimpleIndexOf(Vector<const schar> subject,
+                         Vector<const pchar> pattern,
+                         int idx) {
+  pchar pattern_first_char = pattern[0];
+  for (int i = idx, n = subject.length() - pattern.length(); i <= n; i++) {
+    if (subject[i] != pattern_first_char) continue;
+    int j = 1;
+    do {
+      if (pattern[j] != subject[i+j]) {
+        break;
+      }
+      j++;
+    } while (j < pattern.length());
+    if (j == pattern.length()) {
+      return i;
+    }
+  }
+  return -1;
+}
+
+
+// Dispatch to different algorithms.
+template <typename schar, typename pchar>
+static int StringMatchStrategy(Vector<const schar> sub,
+                               Vector<const pchar> pat,
+                               int start_index) {
+  ASSERT(pat.length() > 1);
+
+  // We have an ASCII haystack and a non-ASCII needle. Check if there
+  // really is a non-ASCII character in the needle and bail out if there
+  // is.
+  if (sizeof(pchar) > 1 && sizeof(schar) == 1) {
+    for (int i = 0; i < pat.length(); i++) {
+      uc16 c = pat[i];
+      if (c > String::kMaxAsciiCharCode) {
+        return -1;
+      }
+    }
+  }
+  if (pat.length() < kBMMinPatternLength) {
+    // We don't believe fancy searching can ever be more efficient.
+    // The max shift of Boyer-Moore on a pattern of this length does
+    // not compensate for the overhead.
+    return SimpleIndexOf(sub, pat, start_index);
+  }
+  // Try algorithms in order of increasing setup cost and expected performance.
+  bool complete;
+  int idx = SimpleIndexOf(sub, pat, start_index, &complete);
+  if (complete) return idx;
+  idx = BoyerMooreHorspool(sub, pat, idx, &complete);
+  if (complete) return idx;
+  return BoyerMooreIndexOf(sub, pat, idx);
+}
+
+// Perform string match of pattern on subject, starting at start index.
+// Caller must ensure that 0 <= start_index <= sub->length(),
+// and should check that pat->length() + start_index <= sub->length()
+int Runtime::StringMatch(Handle<String> sub,
+                         Handle<String> pat,
+                         int start_index) {
+  ASSERT(0 <= start_index);
+  ASSERT(start_index <= sub->length());
+
+  int pattern_length = pat->length();
+  if (pattern_length == 0) return start_index;
+
+  int subject_length = sub->length();
+  if (start_index + pattern_length > subject_length) return -1;
+
+  if (!sub->IsFlat()) {
+    FlattenString(sub);
+  }
+  // Searching for one specific character is common.  For one
+  // character patterns linear search is necessary, so any smart
+  // algorithm is unnecessary overhead.
+  if (pattern_length == 1) {
+    AssertNoAllocation no_heap_allocation;  // ensure vectors stay valid
+    if (sub->IsAsciiRepresentation()) {
+      uc16 pchar = pat->Get(0);
+      if (pchar > String::kMaxAsciiCharCode) {
+        return -1;
+      }
+      Vector<const char> ascii_vector =
+        sub->ToAsciiVector().SubVector(start_index, subject_length);
+      const void* pos = memchr(ascii_vector.start(),
+                               static_cast<const char>(pchar),
+                               static_cast<size_t>(ascii_vector.length()));
+      if (pos == NULL) {
+        return -1;
+      }
+      return reinterpret_cast<const char*>(pos) - ascii_vector.start()
+          + start_index;
+    }
+    return SingleCharIndexOf(sub->ToUC16Vector(), pat->Get(0), start_index);
+  }
+
+  if (!pat->IsFlat()) {
+    FlattenString(pat);
+  }
+
+  AssertNoAllocation no_heap_allocation;  // ensure vectors stay valid
+  // dispatch on type of strings
+  if (pat->IsAsciiRepresentation()) {
+    Vector<const char> pat_vector = pat->ToAsciiVector();
+    if (sub->IsAsciiRepresentation()) {
+      return StringMatchStrategy(sub->ToAsciiVector(), pat_vector, start_index);
+    }
+    return StringMatchStrategy(sub->ToUC16Vector(), pat_vector, start_index);
+  }
+  Vector<const uc16> pat_vector = pat->ToUC16Vector();
+  if (sub->IsAsciiRepresentation()) {
+    return StringMatchStrategy(sub->ToAsciiVector(), pat_vector, start_index);
+  }
+  return StringMatchStrategy(sub->ToUC16Vector(), pat_vector, start_index);
+}
+
+
+static Object* Runtime_StringIndexOf(Arguments args) {
+  HandleScope scope;  // create a new handle scope
+  ASSERT(args.length() == 3);
+
+  CONVERT_ARG_CHECKED(String, sub, 0);
+  CONVERT_ARG_CHECKED(String, pat, 1);
+
+  Object* index = args[2];
+  uint32_t start_index;
+  if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
+
+  RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
+  int position = Runtime::StringMatch(sub, pat, start_index);
+  return Smi::FromInt(position);
+}
+
+
+static Object* Runtime_StringLastIndexOf(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 3);
+
+  CONVERT_CHECKED(String, sub, args[0]);
+  CONVERT_CHECKED(String, pat, args[1]);
+  Object* index = args[2];
+
+  sub->TryFlattenIfNotFlat();
+  pat->TryFlattenIfNotFlat();
+
+  uint32_t start_index;
+  if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
+
+  uint32_t pattern_length = pat->length();
+  uint32_t sub_length = sub->length();
+
+  if (start_index + pattern_length > sub_length) {
+    start_index = sub_length - pattern_length;
+  }
+
+  for (int i = start_index; i >= 0; i--) {
+    bool found = true;
+    for (uint32_t j = 0; j < pattern_length; j++) {
+      if (sub->Get(i + j) != pat->Get(j)) {
+        found = false;
+        break;
+      }
+    }
+    if (found) return Smi::FromInt(i);
+  }
+
+  return Smi::FromInt(-1);
+}
+
+
+static Object* Runtime_StringLocaleCompare(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_CHECKED(String, str1, args[0]);
+  CONVERT_CHECKED(String, str2, args[1]);
+
+  if (str1 == str2) return Smi::FromInt(0);  // Equal.
+  int str1_length = str1->length();
+  int str2_length = str2->length();
+
+  // Decide trivial cases without flattening.
+  if (str1_length == 0) {
+    if (str2_length == 0) return Smi::FromInt(0);  // Equal.
+    return Smi::FromInt(-str2_length);
+  } else {
+    if (str2_length == 0) return Smi::FromInt(str1_length);
+  }
+
+  int end = str1_length < str2_length ? str1_length : str2_length;
+
+  // No need to flatten if we are going to find the answer on the first
+  // character.  At this point we know there is at least one character
+  // in each string, due to the trivial case handling above.
+  int d = str1->Get(0) - str2->Get(0);
+  if (d != 0) return Smi::FromInt(d);
+
+  str1->TryFlattenIfNotFlat();
+  str2->TryFlattenIfNotFlat();
+
+  static StringInputBuffer buf1;
+  static StringInputBuffer buf2;
+
+  buf1.Reset(str1);
+  buf2.Reset(str2);
+
+  for (int i = 0; i < end; i++) {
+    uint16_t char1 = buf1.GetNext();
+    uint16_t char2 = buf2.GetNext();
+    if (char1 != char2) return Smi::FromInt(char1 - char2);
+  }
+
+  return Smi::FromInt(str1_length - str2_length);
+}
+
+
+static Object* Runtime_StringSlice(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 3);
+
+  CONVERT_CHECKED(String, value, args[0]);
+  CONVERT_DOUBLE_CHECKED(from_number, args[1]);
+  CONVERT_DOUBLE_CHECKED(to_number, args[2]);
+
+  int start = FastD2I(from_number);
+  int end = FastD2I(to_number);
+
+  RUNTIME_ASSERT(end >= start);
+  RUNTIME_ASSERT(start >= 0);
+  RUNTIME_ASSERT(end <= value->length());
+  return value->Slice(start, end);
+}
+
+
+static Object* Runtime_StringMatch(Arguments args) {
+  ASSERT_EQ(3, args.length());
+
+  CONVERT_ARG_CHECKED(String, subject, 0);
+  CONVERT_ARG_CHECKED(JSRegExp, regexp, 1);
+  CONVERT_ARG_CHECKED(JSArray, regexp_info, 2);
+  HandleScope handles;
+
+  Handle<Object> match = RegExpImpl::Exec(regexp, subject, 0, regexp_info);
+
+  if (match.is_null()) {
+    return Failure::Exception();
+  }
+  if (match->IsNull()) {
+    return Heap::null_value();
+  }
+  int length = subject->length();
+
+  CompilationZoneScope zone_space(DELETE_ON_EXIT);
+  ZoneList<int> offsets(8);
+  do {
+    int start;
+    int end;
+    {
+      AssertNoAllocation no_alloc;
+      FixedArray* elements = FixedArray::cast(regexp_info->elements());
+      start = Smi::cast(elements->get(RegExpImpl::kFirstCapture))->value();
+      end = Smi::cast(elements->get(RegExpImpl::kFirstCapture + 1))->value();
+    }
+    offsets.Add(start);
+    offsets.Add(end);
+    int index = start < end ? end : end + 1;
+    if (index > length) break;
+    match = RegExpImpl::Exec(regexp, subject, index, regexp_info);
+    if (match.is_null()) {
+      return Failure::Exception();
+    }
+  } while (!match->IsNull());
+  int matches = offsets.length() / 2;
+  Handle<FixedArray> elements = Factory::NewFixedArray(matches);
+  for (int i = 0; i < matches ; i++) {
+    int from = offsets.at(i * 2);
+    int to = offsets.at(i * 2 + 1);
+    elements->set(i, *Factory::NewStringSlice(subject, from, to));
+  }
+  Handle<JSArray> result = Factory::NewJSArrayWithElements(elements);
+  result->set_length(Smi::FromInt(matches));
+  return *result;
+}
+
+
+static Object* Runtime_NumberToRadixString(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  // Fast case where the result is a one character string.
+  if (args[0]->IsSmi() && args[1]->IsSmi()) {
+    int value = Smi::cast(args[0])->value();
+    int radix = Smi::cast(args[1])->value();
+    if (value >= 0 && value < radix) {
+      RUNTIME_ASSERT(radix <= 36);
+      // Character array used for conversion.
+      static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+      return Heap::LookupSingleCharacterStringFromCode(kCharTable[value]);
+    }
+  }
+
+  // Slow case.
+  CONVERT_DOUBLE_CHECKED(value, args[0]);
+  if (isnan(value)) {
+    return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+  }
+  if (isinf(value)) {
+    if (value < 0) {
+      return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+    }
+    return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+  }
+  CONVERT_DOUBLE_CHECKED(radix_number, args[1]);
+  int radix = FastD2I(radix_number);
+  RUNTIME_ASSERT(2 <= radix && radix <= 36);
+  char* str = DoubleToRadixCString(value, radix);
+  Object* result = Heap::AllocateStringFromAscii(CStrVector(str));
+  DeleteArray(str);
+  return result;
+}
+
+
+static Object* Runtime_NumberToFixed(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(value, args[0]);
+  if (isnan(value)) {
+    return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+  }
+  if (isinf(value)) {
+    if (value < 0) {
+      return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+    }
+    return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+  }
+  CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+  int f = FastD2I(f_number);
+  RUNTIME_ASSERT(f >= 0);
+  char* str = DoubleToFixedCString(value, f);
+  Object* res = Heap::AllocateStringFromAscii(CStrVector(str));
+  DeleteArray(str);
+  return res;
+}
+
+
+static Object* Runtime_NumberToExponential(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(value, args[0]);
+  if (isnan(value)) {
+    return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+  }
+  if (isinf(value)) {
+    if (value < 0) {
+      return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+    }
+    return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+  }
+  CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+  int f = FastD2I(f_number);
+  RUNTIME_ASSERT(f >= -1 && f <= 20);
+  char* str = DoubleToExponentialCString(value, f);
+  Object* res = Heap::AllocateStringFromAscii(CStrVector(str));
+  DeleteArray(str);
+  return res;
+}
+
+
+static Object* Runtime_NumberToPrecision(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(value, args[0]);
+  if (isnan(value)) {
+    return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+  }
+  if (isinf(value)) {
+    if (value < 0) {
+      return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+    }
+    return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+  }
+  CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+  int f = FastD2I(f_number);
+  RUNTIME_ASSERT(f >= 1 && f <= 21);
+  char* str = DoubleToPrecisionCString(value, f);
+  Object* res = Heap::AllocateStringFromAscii(CStrVector(str));
+  DeleteArray(str);
+  return res;
+}
+
+
+// Returns a single character string where first character equals
+// string->Get(index).
+static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
+  if (index < static_cast<uint32_t>(string->length())) {
+    string->TryFlattenIfNotFlat();
+    return LookupSingleCharacterStringFromCode(
+        string->Get(index));
+  }
+  return Execution::CharAt(string, index);
+}
+
+
+Object* Runtime::GetElementOrCharAt(Handle<Object> object, uint32_t index) {
+  // Handle [] indexing on Strings
+  if (object->IsString()) {
+    Handle<Object> result = GetCharAt(Handle<String>::cast(object), index);
+    if (!result->IsUndefined()) return *result;
+  }
+
+  // Handle [] indexing on String objects
+  if (object->IsStringObjectWithCharacterAt(index)) {
+    Handle<JSValue> js_value = Handle<JSValue>::cast(object);
+    Handle<Object> result =
+        GetCharAt(Handle<String>(String::cast(js_value->value())), index);
+    if (!result->IsUndefined()) return *result;
+  }
+
+  if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
+    Handle<Object> prototype = GetPrototype(object);
+    return prototype->GetElement(index);
+  }
+
+  return object->GetElement(index);
+}
+
+
+Object* Runtime::GetObjectProperty(Handle<Object> object, Handle<Object> key) {
+  HandleScope scope;
+
+  if (object->IsUndefined() || object->IsNull()) {
+    Handle<Object> args[2] = { key, object };
+    Handle<Object> error =
+        Factory::NewTypeError("non_object_property_load",
+                              HandleVector(args, 2));
+    return Top::Throw(*error);
+  }
+
+  // Check if the given key is an array index.
+  uint32_t index;
+  if (Array::IndexFromObject(*key, &index)) {
+    return GetElementOrCharAt(object, index);
+  }
+
+  // Convert the key to a string - possibly by calling back into JavaScript.
+  Handle<String> name;
+  if (key->IsString()) {
+    name = Handle<String>::cast(key);
+  } else {
+    bool has_pending_exception = false;
+    Handle<Object> converted =
+        Execution::ToString(key, &has_pending_exception);
+    if (has_pending_exception) return Failure::Exception();
+    name = Handle<String>::cast(converted);
+  }
+
+  // Check if the name is trivially convertible to an index and get
+  // the element if so.
+  if (name->AsArrayIndex(&index)) {
+    return GetElementOrCharAt(object, index);
+  } else {
+    PropertyAttributes attr;
+    return object->GetProperty(*name, &attr);
+  }
+}
+
+
+static Object* Runtime_GetProperty(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> key = args.at<Object>(1);
+
+  return Runtime::GetObjectProperty(object, key);
+}
+
+
+
+// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
+static Object* Runtime_KeyedGetProperty(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  // Fast cases for getting named properties of the receiver JSObject
+  // itself.
+  //
+  // The global proxy objects has to be excluded since LocalLookup on
+  // the global proxy object can return a valid result even though the
+  // global proxy object never has properties.  This is the case
+  // because the global proxy object forwards everything to its hidden
+  // prototype including local lookups.
+  //
+  // Additionally, we need to make sure that we do not cache results
+  // for objects that require access checks.
+  if (args[0]->IsJSObject() &&
+      !args[0]->IsJSGlobalProxy() &&
+      !args[0]->IsAccessCheckNeeded() &&
+      args[1]->IsString()) {
+    JSObject* receiver = JSObject::cast(args[0]);
+    String* key = String::cast(args[1]);
+    if (receiver->HasFastProperties()) {
+      // Attempt to use lookup cache.
+      Map* receiver_map = receiver->map();
+      int offset = KeyedLookupCache::Lookup(receiver_map, key);
+      if (offset != -1) {
+        Object* value = receiver->FastPropertyAt(offset);
+        return value->IsTheHole() ? Heap::undefined_value() : value;
+      }
+      // Lookup cache miss.  Perform lookup and update the cache if appropriate.
+      LookupResult result;
+      receiver->LocalLookup(key, &result);
+      if (result.IsProperty() && result.IsLoaded() && result.type() == FIELD) {
+        int offset = result.GetFieldIndex();
+        KeyedLookupCache::Update(receiver_map, key, offset);
+        return receiver->FastPropertyAt(offset);
+      }
+    } else {
+      // Attempt dictionary lookup.
+      StringDictionary* dictionary = receiver->property_dictionary();
+      int entry = dictionary->FindEntry(key);
+      if ((entry != StringDictionary::kNotFound) &&
+          (dictionary->DetailsAt(entry).type() == NORMAL)) {
+        Object* value = dictionary->ValueAt(entry);
+        if (!receiver->IsGlobalObject()) return value;
+        value = JSGlobalPropertyCell::cast(value)->value();
+        if (!value->IsTheHole()) return value;
+        // If value is the hole do the general lookup.
+      }
+    }
+  }
+
+  // Fall back to GetObjectProperty.
+  return Runtime::GetObjectProperty(args.at<Object>(0),
+                                    args.at<Object>(1));
+}
+
+
+Object* Runtime::SetObjectProperty(Handle<Object> object,
+                                   Handle<Object> key,
+                                   Handle<Object> value,
+                                   PropertyAttributes attr) {
+  HandleScope scope;
+
+  if (object->IsUndefined() || object->IsNull()) {
+    Handle<Object> args[2] = { key, object };
+    Handle<Object> error =
+        Factory::NewTypeError("non_object_property_store",
+                              HandleVector(args, 2));
+    return Top::Throw(*error);
+  }
+
+  // If the object isn't a JavaScript object, we ignore the store.
+  if (!object->IsJSObject()) return *value;
+
+  Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+
+  // Check if the given key is an array index.
+  uint32_t index;
+  if (Array::IndexFromObject(*key, &index)) {
+    ASSERT(attr == NONE);
+
+    // In Firefox/SpiderMonkey, Safari and Opera you can access the characters
+    // of a string using [] notation.  We need to support this too in
+    // JavaScript.
+    // In the case of a String object we just need to redirect the assignment to
+    // the underlying string if the index is in range.  Since the underlying
+    // string does nothing with the assignment then we can ignore such
+    // assignments.
+    if (js_object->IsStringObjectWithCharacterAt(index)) {
+      return *value;
+    }
+
+    Handle<Object> result = SetElement(js_object, index, value);
+    if (result.is_null()) return Failure::Exception();
+    return *value;
+  }
+
+  if (key->IsString()) {
+    Handle<Object> result;
+    if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
+      ASSERT(attr == NONE);
+      result = SetElement(js_object, index, value);
+    } else {
+      Handle<String> key_string = Handle<String>::cast(key);
+      key_string->TryFlattenIfNotFlat();
+      result = SetProperty(js_object, key_string, value, attr);
+    }
+    if (result.is_null()) return Failure::Exception();
+    return *value;
+  }
+
+  // Call-back into JavaScript to convert the key to a string.
+  bool has_pending_exception = false;
+  Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+  if (has_pending_exception) return Failure::Exception();
+  Handle<String> name = Handle<String>::cast(converted);
+
+  if (name->AsArrayIndex(&index)) {
+    ASSERT(attr == NONE);
+    return js_object->SetElement(index, *value);
+  } else {
+    return js_object->SetProperty(*name, *value, attr);
+  }
+}
+
+
+Object* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
+                                        Handle<Object> key,
+                                        Handle<Object> value,
+                                        PropertyAttributes attr) {
+  HandleScope scope;
+
+  // Check if the given key is an array index.
+  uint32_t index;
+  if (Array::IndexFromObject(*key, &index)) {
+    ASSERT(attr == NONE);
+
+    // In Firefox/SpiderMonkey, Safari and Opera you can access the characters
+    // of a string using [] notation.  We need to support this too in
+    // JavaScript.
+    // In the case of a String object we just need to redirect the assignment to
+    // the underlying string if the index is in range.  Since the underlying
+    // string does nothing with the assignment then we can ignore such
+    // assignments.
+    if (js_object->IsStringObjectWithCharacterAt(index)) {
+      return *value;
+    }
+
+    return js_object->SetElement(index, *value);
+  }
+
+  if (key->IsString()) {
+    if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
+      ASSERT(attr == NONE);
+      return js_object->SetElement(index, *value);
+    } else {
+      Handle<String> key_string = Handle<String>::cast(key);
+      key_string->TryFlattenIfNotFlat();
+      return js_object->IgnoreAttributesAndSetLocalProperty(*key_string,
+                                                            *value,
+                                                            attr);
+    }
+  }
+
+  // Call-back into JavaScript to convert the key to a string.
+  bool has_pending_exception = false;
+  Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+  if (has_pending_exception) return Failure::Exception();
+  Handle<String> name = Handle<String>::cast(converted);
+
+  if (name->AsArrayIndex(&index)) {
+    ASSERT(attr == NONE);
+    return js_object->SetElement(index, *value);
+  } else {
+    return js_object->IgnoreAttributesAndSetLocalProperty(*name, *value, attr);
+  }
+}
+
+
+Object* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
+                                           Handle<Object> key) {
+  HandleScope scope;
+
+  // Check if the given key is an array index.
+  uint32_t index;
+  if (Array::IndexFromObject(*key, &index)) {
+    // In Firefox/SpiderMonkey, Safari and Opera you can access the
+    // characters of a string using [] notation.  In the case of a
+    // String object we just need to redirect the deletion to the
+    // underlying string if the index is in range.  Since the
+    // underlying string does nothing with the deletion, we can ignore
+    // such deletions.
+    if (js_object->IsStringObjectWithCharacterAt(index)) {
+      return Heap::true_value();
+    }
+
+    return js_object->DeleteElement(index, JSObject::FORCE_DELETION);
+  }
+
+  Handle<String> key_string;
+  if (key->IsString()) {
+    key_string = Handle<String>::cast(key);
+  } else {
+    // Call-back into JavaScript to convert the key to a string.
+    bool has_pending_exception = false;
+    Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+    if (has_pending_exception) return Failure::Exception();
+    key_string = Handle<String>::cast(converted);
+  }
+
+  key_string->TryFlattenIfNotFlat();
+  return js_object->DeleteProperty(*key_string, JSObject::FORCE_DELETION);
+}
+
+
+static Object* Runtime_SetProperty(Arguments args) {
+  NoHandleAllocation ha;
+  RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
+
+  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> key = args.at<Object>(1);
+  Handle<Object> value = args.at<Object>(2);
+
+  // Compute attributes.
+  PropertyAttributes attributes = NONE;
+  if (args.length() == 4) {
+    CONVERT_CHECKED(Smi, value_obj, args[3]);
+    int unchecked_value = value_obj->value();
+    // Only attribute bits should be set.
+    RUNTIME_ASSERT(
+        (unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+    attributes = static_cast<PropertyAttributes>(unchecked_value);
+  }
+  return Runtime::SetObjectProperty(object, key, value, attributes);
+}
+
+
+// Set a local property, even if it is READ_ONLY.  If the property does not
+// exist, it will be added with attributes NONE.
+static Object* Runtime_IgnoreAttributesAndSetProperty(Arguments args) {
+  NoHandleAllocation ha;
+  RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
+  CONVERT_CHECKED(JSObject, object, args[0]);
+  CONVERT_CHECKED(String, name, args[1]);
+  // Compute attributes.
+  PropertyAttributes attributes = NONE;
+  if (args.length() == 4) {
+    CONVERT_CHECKED(Smi, value_obj, args[3]);
+    int unchecked_value = value_obj->value();
+    // Only attribute bits should be set.
+    RUNTIME_ASSERT(
+        (unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+    attributes = static_cast<PropertyAttributes>(unchecked_value);
+  }
+
+  return object->
+      IgnoreAttributesAndSetLocalProperty(name, args[2], attributes);
+}
+
+
+static Object* Runtime_DeleteProperty(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_CHECKED(JSObject, object, args[0]);
+  CONVERT_CHECKED(String, key, args[1]);
+  return object->DeleteProperty(key, JSObject::NORMAL_DELETION);
+}
+
+
+static Object* HasLocalPropertyImplementation(Handle<JSObject> object,
+                                              Handle<String> key) {
+  if (object->HasLocalProperty(*key)) return Heap::true_value();
+  // Handle hidden prototypes.  If there's a hidden prototype above this thing
+  // then we have to check it for properties, because they are supposed to
+  // look like they are on this object.
+  Handle<Object> proto(object->GetPrototype());
+  if (proto->IsJSObject() &&
+      Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) {
+    return HasLocalPropertyImplementation(Handle<JSObject>::cast(proto), key);
+  }
+  return Heap::false_value();
+}
+
+
+static Object* Runtime_HasLocalProperty(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(String, key, args[1]);
+
+  Object* obj = args[0];
+  // Only JS objects can have properties.
+  if (obj->IsJSObject()) {
+    JSObject* object = JSObject::cast(obj);
+    // Fast case - no interceptors.
+    if (object->HasRealNamedProperty(key)) return Heap::true_value();
+    // Slow case.  Either it's not there or we have an interceptor.  We should
+    // have handles for this kind of deal.
+    HandleScope scope;
+    return HasLocalPropertyImplementation(Handle<JSObject>(object),
+                                          Handle<String>(key));
+  } else if (obj->IsString()) {
+    // Well, there is one exception:  Handle [] on strings.
+    uint32_t index;
+    if (key->AsArrayIndex(&index)) {
+      String* string = String::cast(obj);
+      if (index < static_cast<uint32_t>(string->length()))
+        return Heap::true_value();
+    }
+  }
+  return Heap::false_value();
+}
+
+
+static Object* Runtime_HasProperty(Arguments args) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 2);
+
+  // Only JS objects can have properties.
+  if (args[0]->IsJSObject()) {
+    JSObject* object = JSObject::cast(args[0]);
+    CONVERT_CHECKED(String, key, args[1]);
+    if (object->HasProperty(key)) return Heap::true_value();
+  }
+  return Heap::false_value();
+}
+
+
+static Object* Runtime_HasElement(Arguments args) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 2);
+
+  // Only JS objects can have elements.
+  if (args[0]->IsJSObject()) {
+    JSObject* object = JSObject::cast(args[0]);
+    CONVERT_CHECKED(Smi, index_obj, args[1]);
+    uint32_t index = index_obj->value();
+    if (object->HasElement(index)) return Heap::true_value();
+  }
+  return Heap::false_value();
+}
+
+
+static Object* Runtime_IsPropertyEnumerable(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_CHECKED(JSObject, object, args[0]);
+  CONVERT_CHECKED(String, key, args[1]);
+
+  uint32_t index;
+  if (key->AsArrayIndex(&index)) {
+    return Heap::ToBoolean(object->HasElement(index));
+  }
+
+  PropertyAttributes att = object->GetLocalPropertyAttribute(key);
+  return Heap::ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
+}
+
+
+static Object* Runtime_GetPropertyNames(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  return *GetKeysFor(object);
+}
+
+
+// Returns either a FixedArray as Runtime_GetPropertyNames,
+// or, if the given object has an enum cache that contains
+// all enumerable properties of the object and its prototypes
+// have none, the map of the object. This is used to speed up
+// the check for deletions during a for-in.
+static Object* Runtime_GetPropertyNamesFast(Arguments args) {
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSObject, raw_object, args[0]);
+
+  if (raw_object->IsSimpleEnum()) return raw_object->map();
+
+  HandleScope scope;
+  Handle<JSObject> object(raw_object);
+  Handle<FixedArray> content = GetKeysInFixedArrayFor(object,
+                                                      INCLUDE_PROTOS);
+
+  // Test again, since cache may have been built by preceding call.
+  if (object->IsSimpleEnum()) return object->map();
+
+  return *content;
+}
+
+
+static Object* Runtime_LocalKeys(Arguments args) {
+  ASSERT_EQ(args.length(), 1);
+  CONVERT_CHECKED(JSObject, raw_object, args[0]);
+  HandleScope scope;
+  Handle<JSObject> object(raw_object);
+  Handle<FixedArray> contents = GetKeysInFixedArrayFor(object,
+                                                       LOCAL_ONLY);
+  // Some fast paths through GetKeysInFixedArrayFor reuse a cached
+  // property array and since the result is mutable we have to create
+  // a fresh clone on each invocation.
+  int length = contents->length();
+  Handle<FixedArray> copy = Factory::NewFixedArray(length);
+  for (int i = 0; i < length; i++) {
+    Object* entry = contents->get(i);
+    if (entry->IsString()) {
+      copy->set(i, entry);
+    } else {
+      ASSERT(entry->IsNumber());
+      HandleScope scope;
+      Handle<Object> entry_handle(entry);
+      Handle<Object> entry_str = Factory::NumberToString(entry_handle);
+      copy->set(i, *entry_str);
+    }
+  }
+  return *Factory::NewJSArrayWithElements(copy);
+}
+
+
+static Object* Runtime_GetArgumentsProperty(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  // Compute the frame holding the arguments.
+  JavaScriptFrameIterator it;
+  it.AdvanceToArgumentsFrame();
+  JavaScriptFrame* frame = it.frame();
+
+  // Get the actual number of provided arguments.
+  const uint32_t n = frame->GetProvidedParametersCount();
+
+  // Try to convert the key to an index. If successful and within
+  // index return the the argument from the frame.
+  uint32_t index;
+  if (Array::IndexFromObject(args[0], &index) && index < n) {
+    return frame->GetParameter(index);
+  }
+
+  // Convert the key to a string.
+  HandleScope scope;
+  bool exception = false;
+  Handle<Object> converted =
+      Execution::ToString(args.at<Object>(0), &exception);
+  if (exception) return Failure::Exception();
+  Handle<String> key = Handle<String>::cast(converted);
+
+  // Try to convert the string key into an array index.
+  if (key->AsArrayIndex(&index)) {
+    if (index < n) {
+      return frame->GetParameter(index);
+    } else {
+      return Top::initial_object_prototype()->GetElement(index);
+    }
+  }
+
+  // Handle special arguments properties.
+  if (key->Equals(Heap::length_symbol())) return Smi::FromInt(n);
+  if (key->Equals(Heap::callee_symbol())) return frame->function();
+
+  // Lookup in the initial Object.prototype object.
+  return Top::initial_object_prototype()->GetProperty(*key);
+}
+
+
+static Object* Runtime_ToFastProperties(Arguments args) {
+  ASSERT(args.length() == 1);
+  Handle<Object> object = args.at<Object>(0);
+  if (object->IsJSObject()) {
+    Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+    js_object->TransformToFastProperties(0);
+  }
+  return *object;
+}
+
+
+static Object* Runtime_ToSlowProperties(Arguments args) {
+  ASSERT(args.length() == 1);
+  Handle<Object> object = args.at<Object>(0);
+  if (object->IsJSObject()) {
+    Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+    js_object->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+  }
+  return *object;
+}
+
+
+static Object* Runtime_ToBool(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  return args[0]->ToBoolean();
+}
+
+
+// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
+// Possible optimizations: put the type string into the oddballs.
+static Object* Runtime_Typeof(Arguments args) {
+  NoHandleAllocation ha;
+
+  Object* obj = args[0];
+  if (obj->IsNumber()) return Heap::number_symbol();
+  HeapObject* heap_obj = HeapObject::cast(obj);
+
+  // typeof an undetectable object is 'undefined'
+  if (heap_obj->map()->is_undetectable()) return Heap::undefined_symbol();
+
+  InstanceType instance_type = heap_obj->map()->instance_type();
+  if (instance_type < FIRST_NONSTRING_TYPE) {
+    return Heap::string_symbol();
+  }
+
+  switch (instance_type) {
+    case ODDBALL_TYPE:
+      if (heap_obj->IsTrue() || heap_obj->IsFalse()) {
+        return Heap::boolean_symbol();
+      }
+      if (heap_obj->IsNull()) {
+        return Heap::object_symbol();
+      }
+      ASSERT(heap_obj->IsUndefined());
+      return Heap::undefined_symbol();
+    case JS_FUNCTION_TYPE: case JS_REGEXP_TYPE:
+      return Heap::function_symbol();
+    default:
+      // For any kind of object not handled above, the spec rule for
+      // host objects gives that it is okay to return "object"
+      return Heap::object_symbol();
+  }
+}
+
+
+static Object* Runtime_StringToNumber(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(String, subject, args[0]);
+  subject->TryFlattenIfNotFlat();
+  return Heap::NumberFromDouble(StringToDouble(subject, ALLOW_HEX));
+}
+
+
+static Object* Runtime_StringFromCharCodeArray(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSArray, codes, args[0]);
+  int length = Smi::cast(codes->length())->value();
+
+  // Check if the string can be ASCII.
+  int i;
+  for (i = 0; i < length; i++) {
+    Object* element = codes->GetElement(i);
+    CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
+    if ((chr & 0xffff) > String::kMaxAsciiCharCode)
+      break;
+  }
+
+  Object* object = NULL;
+  if (i == length) {  // The string is ASCII.
+    object = Heap::AllocateRawAsciiString(length);
+  } else {  // The string is not ASCII.
+    object = Heap::AllocateRawTwoByteString(length);
+  }
+
+  if (object->IsFailure()) return object;
+  String* result = String::cast(object);
+  for (int i = 0; i < length; i++) {
+    Object* element = codes->GetElement(i);
+    CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
+    result->Set(i, chr & 0xffff);
+  }
+  return result;
+}
+
+
+// kNotEscaped is generated by the following:
+//
+// #!/bin/perl
+// for (my $i = 0; $i < 256; $i++) {
+//   print "\n" if $i % 16 == 0;
+//   my $c = chr($i);
+//   my $escaped = 1;
+//   $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#;
+//   print $escaped ? "0, " : "1, ";
+// }
+
+
+static bool IsNotEscaped(uint16_t character) {
+  // Only for 8 bit characters, the rest are always escaped (in a different way)
+  ASSERT(character < 256);
+  static const char kNotEscaped[256] = {
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
+    0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  };
+  return kNotEscaped[character] != 0;
+}
+
+
+static Object* Runtime_URIEscape(Arguments args) {
+  const char hex_chars[] = "0123456789ABCDEF";
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(String, source, args[0]);
+
+  source->TryFlattenIfNotFlat();
+
+  int escaped_length = 0;
+  int length = source->length();
+  {
+    Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
+    buffer->Reset(source);
+    while (buffer->has_more()) {
+      uint16_t character = buffer->GetNext();
+      if (character >= 256) {
+        escaped_length += 6;
+      } else if (IsNotEscaped(character)) {
+        escaped_length++;
+      } else {
+        escaped_length += 3;
+      }
+      // We don't allow strings that are longer than Smi range.
+      if (!Smi::IsValid(escaped_length)) {
+        Top::context()->mark_out_of_memory();
+        return Failure::OutOfMemoryException();
+      }
+    }
+  }
+  // No length change implies no change.  Return original string if no change.
+  if (escaped_length == length) {
+    return source;
+  }
+  Object* o = Heap::AllocateRawAsciiString(escaped_length);
+  if (o->IsFailure()) return o;
+  String* destination = String::cast(o);
+  int dest_position = 0;
+
+  Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
+  buffer->Rewind();
+  while (buffer->has_more()) {
+    uint16_t chr = buffer->GetNext();
+    if (chr >= 256) {
+      destination->Set(dest_position, '%');
+      destination->Set(dest_position+1, 'u');
+      destination->Set(dest_position+2, hex_chars[chr >> 12]);
+      destination->Set(dest_position+3, hex_chars[(chr >> 8) & 0xf]);
+      destination->Set(dest_position+4, hex_chars[(chr >> 4) & 0xf]);
+      destination->Set(dest_position+5, hex_chars[chr & 0xf]);
+      dest_position += 6;
+    } else if (IsNotEscaped(chr)) {
+      destination->Set(dest_position, chr);
+      dest_position++;
+    } else {
+      destination->Set(dest_position, '%');
+      destination->Set(dest_position+1, hex_chars[chr >> 4]);
+      destination->Set(dest_position+2, hex_chars[chr & 0xf]);
+      dest_position += 3;
+    }
+  }
+  return destination;
+}
+
+
+static inline int TwoDigitHex(uint16_t character1, uint16_t character2) {
+  static const signed char kHexValue['g'] = {
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    0,  1,  2,   3,  4,  5,  6,  7,  8,  9, -1, -1, -1, -1, -1, -1,
+    -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, 10, 11, 12, 13, 14, 15 };
+
+  if (character1 > 'f') return -1;
+  int hi = kHexValue[character1];
+  if (hi == -1) return -1;
+  if (character2 > 'f') return -1;
+  int lo = kHexValue[character2];
+  if (lo == -1) return -1;
+  return (hi << 4) + lo;
+}
+
+
+static inline int Unescape(String* source,
+                           int i,
+                           int length,
+                           int* step) {
+  uint16_t character = source->Get(i);
+  int32_t hi = 0;
+  int32_t lo = 0;
+  if (character == '%' &&
+      i <= length - 6 &&
+      source->Get(i + 1) == 'u' &&
+      (hi = TwoDigitHex(source->Get(i + 2),
+                        source->Get(i + 3))) != -1 &&
+      (lo = TwoDigitHex(source->Get(i + 4),
+                        source->Get(i + 5))) != -1) {
+    *step = 6;
+    return (hi << 8) + lo;
+  } else if (character == '%' &&
+      i <= length - 3 &&
+      (lo = TwoDigitHex(source->Get(i + 1),
+                        source->Get(i + 2))) != -1) {
+    *step = 3;
+    return lo;
+  } else {
+    *step = 1;
+    return character;
+  }
+}
+
+
+static Object* Runtime_URIUnescape(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(String, source, args[0]);
+
+  source->TryFlattenIfNotFlat();
+
+  bool ascii = true;
+  int length = source->length();
+
+  int unescaped_length = 0;
+  for (int i = 0; i < length; unescaped_length++) {
+    int step;
+    if (Unescape(source, i, length, &step) > String::kMaxAsciiCharCode) {
+      ascii = false;
+    }
+    i += step;
+  }
+
+  // No length change implies no change.  Return original string if no change.
+  if (unescaped_length == length)
+    return source;
+
+  Object* o = ascii ?
+              Heap::AllocateRawAsciiString(unescaped_length) :
+              Heap::AllocateRawTwoByteString(unescaped_length);
+  if (o->IsFailure()) return o;
+  String* destination = String::cast(o);
+
+  int dest_position = 0;
+  for (int i = 0; i < length; dest_position++) {
+    int step;
+    destination->Set(dest_position, Unescape(source, i, length, &step));
+    i += step;
+  }
+  return destination;
+}
+
+
+static Object* Runtime_StringParseInt(Arguments args) {
+  NoHandleAllocation ha;
+
+  CONVERT_CHECKED(String, s, args[0]);
+  CONVERT_DOUBLE_CHECKED(n, args[1]);
+  int radix = FastD2I(n);
+
+  s->TryFlattenIfNotFlat();
+
+  int len = s->length();
+  int i;
+
+  // Skip leading white space.
+  for (i = 0; i < len && Scanner::kIsWhiteSpace.get(s->Get(i)); i++) ;
+  if (i == len) return Heap::nan_value();
+
+  // Compute the sign (default to +).
+  int sign = 1;
+  if (s->Get(i) == '-') {
+    sign = -1;
+    i++;
+  } else if (s->Get(i) == '+') {
+    i++;
+  }
+
+  // Compute the radix if 0.
+  if (radix == 0) {
+    radix = 10;
+    if (i < len && s->Get(i) == '0') {
+      radix = 8;
+      if (i + 1 < len) {
+        int c = s->Get(i + 1);
+        if (c == 'x' || c == 'X') {
+          radix = 16;
+          i += 2;
+        }
+      }
+    }
+  } else if (radix == 16) {
+    // Allow 0x or 0X prefix if radix is 16.
+    if (i + 1 < len && s->Get(i) == '0') {
+      int c = s->Get(i + 1);
+      if (c == 'x' || c == 'X') i += 2;
+    }
+  }
+
+  RUNTIME_ASSERT(2 <= radix && radix <= 36);
+  double value;
+  int end_index = StringToInt(s, i, radix, &value);
+  if (end_index != i) {
+    return Heap::NumberFromDouble(sign * value);
+  }
+  return Heap::nan_value();
+}
+
+
+static Object* Runtime_StringParseFloat(Arguments args) {
+  NoHandleAllocation ha;
+  CONVERT_CHECKED(String, str, args[0]);
+
+  // ECMA-262 section 15.1.2.3, empty string is NaN
+  double value = StringToDouble(str, ALLOW_TRAILING_JUNK, OS::nan_value());
+
+  // Create a number object from the value.
+  return Heap::NumberFromDouble(value);
+}
+
+
+static unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping;
+static unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping;
+
+
+template <class Converter>
+static Object* ConvertCaseHelper(String* s,
+                                 int length,
+                                 int input_string_length,
+                                 unibrow::Mapping<Converter, 128>* mapping) {
+  // We try this twice, once with the assumption that the result is no longer
+  // than the input and, if that assumption breaks, again with the exact
+  // length.  This may not be pretty, but it is nicer than what was here before
+  // and I hereby claim my vaffel-is.
+  //
+  // Allocate the resulting string.
+  //
+  // NOTE: This assumes that the upper/lower case of an ascii
+  // character is also ascii.  This is currently the case, but it
+  // might break in the future if we implement more context and locale
+  // dependent upper/lower conversions.
+  Object* o = s->IsAsciiRepresentation()
+      ? Heap::AllocateRawAsciiString(length)
+      : Heap::AllocateRawTwoByteString(length);
+  if (o->IsFailure()) return o;
+  String* result = String::cast(o);
+  bool has_changed_character = false;
+
+  // Convert all characters to upper case, assuming that they will fit
+  // in the buffer
+  Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
+  buffer->Reset(s);
+  unibrow::uchar chars[Converter::kMaxWidth];
+  // We can assume that the string is not empty
+  uc32 current = buffer->GetNext();
+  for (int i = 0; i < length;) {
+    bool has_next = buffer->has_more();
+    uc32 next = has_next ? buffer->GetNext() : 0;
+    int char_length = mapping->get(current, next, chars);
+    if (char_length == 0) {
+      // The case conversion of this character is the character itself.
+      result->Set(i, current);
+      i++;
+    } else if (char_length == 1) {
+      // Common case: converting the letter resulted in one character.
+      ASSERT(static_cast<uc32>(chars[0]) != current);
+      result->Set(i, chars[0]);
+      has_changed_character = true;
+      i++;
+    } else if (length == input_string_length) {
+      // We've assumed that the result would be as long as the
+      // input but here is a character that converts to several
+      // characters.  No matter, we calculate the exact length
+      // of the result and try the whole thing again.
+      //
+      // Note that this leaves room for optimization.  We could just
+      // memcpy what we already have to the result string.  Also,
+      // the result string is the last object allocated we could
+      // "realloc" it and probably, in the vast majority of cases,
+      // extend the existing string to be able to hold the full
+      // result.
+      int next_length = 0;
+      if (has_next) {
+        next_length = mapping->get(next, 0, chars);
+        if (next_length == 0) next_length = 1;
+      }
+      int current_length = i + char_length + next_length;
+      while (buffer->has_more()) {
+        current = buffer->GetNext();
+        // NOTE: we use 0 as the next character here because, while
+        // the next character may affect what a character converts to,
+        // it does not in any case affect the length of what it convert
+        // to.
+        int char_length = mapping->get(current, 0, chars);
+        if (char_length == 0) char_length = 1;
+        current_length += char_length;
+        if (current_length > Smi::kMaxValue) {
+          Top::context()->mark_out_of_memory();
+          return Failure::OutOfMemoryException();
+        }
+      }
+      // Try again with the real length.
+      return Smi::FromInt(current_length);
+    } else {
+      for (int j = 0; j < char_length; j++) {
+        result->Set(i, chars[j]);
+        i++;
+      }
+      has_changed_character = true;
+    }
+    current = next;
+  }
+  if (has_changed_character) {
+    return result;
+  } else {
+    // If we didn't actually change anything in doing the conversion
+    // we simple return the result and let the converted string
+    // become garbage; there is no reason to keep two identical strings
+    // alive.
+    return s;
+  }
+}
+
+
+template <class Converter>
+static Object* ConvertCase(Arguments args,
+                           unibrow::Mapping<Converter, 128>* mapping) {
+  NoHandleAllocation ha;
+
+  CONVERT_CHECKED(String, s, args[0]);
+  s->TryFlattenIfNotFlat();
+
+  int input_string_length = s->length();
+  // Assume that the string is not empty; we need this assumption later
+  if (input_string_length == 0) return s;
+  int length = input_string_length;
+
+  Object* answer = ConvertCaseHelper(s, length, length, mapping);
+  if (answer->IsSmi()) {
+    // Retry with correct length.
+    answer = ConvertCaseHelper(s, Smi::cast(answer)->value(), length, mapping);
+  }
+  return answer;  // This may be a failure.
+}
+
+
+static Object* Runtime_StringToLowerCase(Arguments args) {
+  return ConvertCase<unibrow::ToLowercase>(args, &to_lower_mapping);
+}
+
+
+static Object* Runtime_StringToUpperCase(Arguments args) {
+  return ConvertCase<unibrow::ToUppercase>(args, &to_upper_mapping);
+}
+
+
+bool Runtime::IsUpperCaseChar(uint16_t ch) {
+  unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
+  int char_length = to_upper_mapping.get(ch, 0, chars);
+  return char_length == 0;
+}
+
+
+static Object* Runtime_NumberToString(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  Object* number = args[0];
+  RUNTIME_ASSERT(number->IsNumber());
+
+  return Heap::NumberToString(number);
+}
+
+
+static Object* Runtime_NumberToInteger(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  Object* obj = args[0];
+  if (obj->IsSmi()) return obj;
+  CONVERT_DOUBLE_CHECKED(number, obj);
+  return Heap::NumberFromDouble(DoubleToInteger(number));
+}
+
+
+static Object* Runtime_NumberToJSUint32(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  Object* obj = args[0];
+  if (obj->IsSmi() && Smi::cast(obj)->value() >= 0) return obj;
+  CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, obj);
+  return Heap::NumberFromUint32(number);
+}
+
+
+static Object* Runtime_NumberToJSInt32(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  Object* obj = args[0];
+  if (obj->IsSmi()) return obj;
+  CONVERT_DOUBLE_CHECKED(number, obj);
+  return Heap::NumberFromInt32(DoubleToInt32(number));
+}
+
+
+// Converts a Number to a Smi, if possible. Returns NaN if the number is not
+// a small integer.
+static Object* Runtime_NumberToSmi(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  Object* obj = args[0];
+  if (obj->IsSmi()) {
+    return obj;
+  }
+  if (obj->IsHeapNumber()) {
+    double value = HeapNumber::cast(obj)->value();
+    int int_value = FastD2I(value);
+    if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
+      return Smi::FromInt(int_value);
+    }
+  }
+  return Heap::nan_value();
+}
+
+
+static Object* Runtime_NumberAdd(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  return Heap::AllocateHeapNumber(x + y);
+}
+
+
+static Object* Runtime_NumberSub(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  return Heap::AllocateHeapNumber(x - y);
+}
+
+
+static Object* Runtime_NumberMul(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  return Heap::AllocateHeapNumber(x * y);
+}
+
+
+static Object* Runtime_NumberUnaryMinus(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return Heap::AllocateHeapNumber(-x);
+}
+
+
+static Object* Runtime_NumberDiv(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  return Heap::NewNumberFromDouble(x / y);
+}
+
+
+static Object* Runtime_NumberMod(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_CHECKED(y, args[1]);
+
+#if defined WIN32 || defined _WIN64
+  // Workaround MS fmod bugs. ECMA-262 says:
+  // dividend is finite and divisor is an infinity => result equals dividend
+  // dividend is a zero and divisor is nonzero finite => result equals dividend
+  if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) &&
+      !(x == 0 && (y != 0 && isfinite(y))))
+#endif
+  x = fmod(x, y);
+  // NewNumberFromDouble may return a Smi instead of a Number object
+  return Heap::NewNumberFromDouble(x);
+}
+
+
+static Object* Runtime_StringAdd(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(String, str1, args[0]);
+  CONVERT_CHECKED(String, str2, args[1]);
+  return Heap::AllocateConsString(str1, str2);
+}
+
+
+template<typename sinkchar>
+static inline void StringBuilderConcatHelper(String* special,
+                                             sinkchar* sink,
+                                             FixedArray* fixed_array,
+                                             int array_length) {
+  int position = 0;
+  for (int i = 0; i < array_length; i++) {
+    Object* element = fixed_array->get(i);
+    if (element->IsSmi()) {
+      int encoded_slice = Smi::cast(element)->value();
+      int pos = StringBuilderSubstringPosition::decode(encoded_slice);
+      int len = StringBuilderSubstringLength::decode(encoded_slice);
+      String::WriteToFlat(special,
+                          sink + position,
+                          pos,
+                          pos + len);
+      position += len;
+    } else {
+      String* string = String::cast(element);
+      int element_length = string->length();
+      String::WriteToFlat(string, sink + position, 0, element_length);
+      position += element_length;
+    }
+  }
+}
+
+
+static Object* Runtime_StringBuilderConcat(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSArray, array, args[0]);
+  CONVERT_CHECKED(String, special, args[1]);
+  int special_length = special->length();
+  Object* smi_array_length = array->length();
+  if (!smi_array_length->IsSmi()) {
+    Top::context()->mark_out_of_memory();
+    return Failure::OutOfMemoryException();
+  }
+  int array_length = Smi::cast(smi_array_length)->value();
+  if (!array->HasFastElements()) {
+    return Top::Throw(Heap::illegal_argument_symbol());
+  }
+  FixedArray* fixed_array = FixedArray::cast(array->elements());
+  if (fixed_array->length() < array_length) {
+    array_length = fixed_array->length();
+  }
+
+  if (array_length == 0) {
+    return Heap::empty_string();
+  } else if (array_length == 1) {
+    Object* first = fixed_array->get(0);
+    if (first->IsString()) return first;
+  }
+
+  bool ascii = special->IsAsciiRepresentation();
+  int position = 0;
+  for (int i = 0; i < array_length; i++) {
+    Object* elt = fixed_array->get(i);
+    if (elt->IsSmi()) {
+      int len = Smi::cast(elt)->value();
+      int pos = len >> 11;
+      len &= 0x7ff;
+      if (pos + len > special_length) {
+        return Top::Throw(Heap::illegal_argument_symbol());
+      }
+      position += len;
+    } else if (elt->IsString()) {
+      String* element = String::cast(elt);
+      int element_length = element->length();
+      if (!Smi::IsValid(element_length + position)) {
+        Top::context()->mark_out_of_memory();
+        return Failure::OutOfMemoryException();
+      }
+      position += element_length;
+      if (ascii && !element->IsAsciiRepresentation()) {
+        ascii = false;
+      }
+    } else {
+      return Top::Throw(Heap::illegal_argument_symbol());
+    }
+  }
+
+  int length = position;
+  Object* object;
+
+  if (ascii) {
+    object = Heap::AllocateRawAsciiString(length);
+    if (object->IsFailure()) return object;
+    SeqAsciiString* answer = SeqAsciiString::cast(object);
+    StringBuilderConcatHelper(special,
+                              answer->GetChars(),
+                              fixed_array,
+                              array_length);
+    return answer;
+  } else {
+    object = Heap::AllocateRawTwoByteString(length);
+    if (object->IsFailure()) return object;
+    SeqTwoByteString* answer = SeqTwoByteString::cast(object);
+    StringBuilderConcatHelper(special,
+                              answer->GetChars(),
+                              fixed_array,
+                              array_length);
+    return answer;
+  }
+}
+
+
+static Object* Runtime_NumberOr(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+  CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+  return Heap::NumberFromInt32(x | y);
+}
+
+
+static Object* Runtime_NumberAnd(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+  CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+  return Heap::NumberFromInt32(x & y);
+}
+
+
+static Object* Runtime_NumberXor(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+  CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+  return Heap::NumberFromInt32(x ^ y);
+}
+
+
+static Object* Runtime_NumberNot(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+  return Heap::NumberFromInt32(~x);
+}
+
+
+static Object* Runtime_NumberShl(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+  CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+  return Heap::NumberFromInt32(x << (y & 0x1f));
+}
+
+
+static Object* Runtime_NumberShr(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
+  CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+  return Heap::NumberFromUint32(x >> (y & 0x1f));
+}
+
+
+static Object* Runtime_NumberSar(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+  CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+  return Heap::NumberFromInt32(ArithmeticShiftRight(x, y & 0x1f));
+}
+
+
+static Object* Runtime_NumberEquals(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  if (isnan(x)) return Smi::FromInt(NOT_EQUAL);
+  if (isnan(y)) return Smi::FromInt(NOT_EQUAL);
+  if (x == y) return Smi::FromInt(EQUAL);
+  Object* result;
+  if ((fpclassify(x) == FP_ZERO) && (fpclassify(y) == FP_ZERO)) {
+    result = Smi::FromInt(EQUAL);
+  } else {
+    result = Smi::FromInt(NOT_EQUAL);
+  }
+  return result;
+}
+
+
+static Object* Runtime_StringEquals(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_CHECKED(String, x, args[0]);
+  CONVERT_CHECKED(String, y, args[1]);
+
+  bool not_equal = !x->Equals(y);
+  // This is slightly convoluted because the value that signifies
+  // equality is 0 and inequality is 1 so we have to negate the result
+  // from String::Equals.
+  ASSERT(not_equal == 0 || not_equal == 1);
+  STATIC_CHECK(EQUAL == 0);
+  STATIC_CHECK(NOT_EQUAL == 1);
+  return Smi::FromInt(not_equal);
+}
+
+
+static Object* Runtime_NumberCompare(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 3);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  if (isnan(x) || isnan(y)) return args[2];
+  if (x == y) return Smi::FromInt(EQUAL);
+  if (isless(x, y)) return Smi::FromInt(LESS);
+  return Smi::FromInt(GREATER);
+}
+
+
+// Compare two Smis as if they were converted to strings and then
+// compared lexicographically.
+static Object* Runtime_SmiLexicographicCompare(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  // Arrays for the individual characters of the two Smis.  Smis are
+  // 31 bit integers and 10 decimal digits are therefore enough.
+  static int x_elms[10];
+  static int y_elms[10];
+
+  // Extract the integer values from the Smis.
+  CONVERT_CHECKED(Smi, x, args[0]);
+  CONVERT_CHECKED(Smi, y, args[1]);
+  int x_value = x->value();
+  int y_value = y->value();
+
+  // If the integers are equal so are the string representations.
+  if (x_value == y_value) return Smi::FromInt(EQUAL);
+
+  // If one of the integers are zero the normal integer order is the
+  // same as the lexicographic order of the string representations.
+  if (x_value == 0 || y_value == 0) return Smi::FromInt(x_value - y_value);
+
+  // If only one of the integers is negative the negative number is
+  // smallest because the char code of '-' is less than the char code
+  // of any digit.  Otherwise, we make both values positive.
+  if (x_value < 0 || y_value < 0) {
+    if (y_value >= 0) return Smi::FromInt(LESS);
+    if (x_value >= 0) return Smi::FromInt(GREATER);
+    x_value = -x_value;
+    y_value = -y_value;
+  }
+
+  // Convert the integers to arrays of their decimal digits.
+  int x_index = 0;
+  int y_index = 0;
+  while (x_value > 0) {
+    x_elms[x_index++] = x_value % 10;
+    x_value /= 10;
+  }
+  while (y_value > 0) {
+    y_elms[y_index++] = y_value % 10;
+    y_value /= 10;
+  }
+
+  // Loop through the arrays of decimal digits finding the first place
+  // where they differ.
+  while (--x_index >= 0 && --y_index >= 0) {
+    int diff = x_elms[x_index] - y_elms[y_index];
+    if (diff != 0) return Smi::FromInt(diff);
+  }
+
+  // If one array is a suffix of the other array, the longest array is
+  // the representation of the largest of the Smis in the
+  // lexicographic ordering.
+  return Smi::FromInt(x_index - y_index);
+}
+
+
+static Object* Runtime_StringCompare(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_CHECKED(String, x, args[0]);
+  CONVERT_CHECKED(String, y, args[1]);
+
+  // A few fast case tests before we flatten.
+  if (x == y) return Smi::FromInt(EQUAL);
+  if (y->length() == 0) {
+    if (x->length() == 0) return Smi::FromInt(EQUAL);
+    return Smi::FromInt(GREATER);
+  } else if (x->length() == 0) {
+    return Smi::FromInt(LESS);
+  }
+
+  int d = x->Get(0) - y->Get(0);
+  if (d < 0) return Smi::FromInt(LESS);
+  else if (d > 0) return Smi::FromInt(GREATER);
+
+  x->TryFlattenIfNotFlat();
+  y->TryFlattenIfNotFlat();
+
+  static StringInputBuffer bufx;
+  static StringInputBuffer bufy;
+  bufx.Reset(x);
+  bufy.Reset(y);
+  while (bufx.has_more() && bufy.has_more()) {
+    int d = bufx.GetNext() - bufy.GetNext();
+    if (d < 0) return Smi::FromInt(LESS);
+    else if (d > 0) return Smi::FromInt(GREATER);
+  }
+
+  // x is (non-trivial) prefix of y:
+  if (bufy.has_more()) return Smi::FromInt(LESS);
+  // y is prefix of x:
+  return Smi::FromInt(bufx.has_more() ? GREATER : EQUAL);
+}
+
+
+static Object* Runtime_Math_abs(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return Heap::AllocateHeapNumber(fabs(x));
+}
+
+
+static Object* Runtime_Math_acos(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return TranscendentalCache::Get(TranscendentalCache::ACOS, x);
+}
+
+
+static Object* Runtime_Math_asin(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return TranscendentalCache::Get(TranscendentalCache::ASIN, x);
+}
+
+
+static Object* Runtime_Math_atan(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return TranscendentalCache::Get(TranscendentalCache::ATAN, x);
+}
+
+
+static Object* Runtime_Math_atan2(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  double result;
+  if (isinf(x) && isinf(y)) {
+    // Make sure that the result in case of two infinite arguments
+    // is a multiple of Pi / 4. The sign of the result is determined
+    // by the first argument (x) and the sign of the second argument
+    // determines the multiplier: one or three.
+    static double kPiDividedBy4 = 0.78539816339744830962;
+    int multiplier = (x < 0) ? -1 : 1;
+    if (y < 0) multiplier *= 3;
+    result = multiplier * kPiDividedBy4;
+  } else {
+    result = atan2(x, y);
+  }
+  return Heap::AllocateHeapNumber(result);
+}
+
+
+static Object* Runtime_Math_ceil(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return Heap::NumberFromDouble(ceiling(x));
+}
+
+
+static Object* Runtime_Math_cos(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return TranscendentalCache::Get(TranscendentalCache::COS, x);
+}
+
+
+static Object* Runtime_Math_exp(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return TranscendentalCache::Get(TranscendentalCache::EXP, x);
+}
+
+
+static Object* Runtime_Math_floor(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return Heap::NumberFromDouble(floor(x));
+}
+
+
+static Object* Runtime_Math_log(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return TranscendentalCache::Get(TranscendentalCache::LOG, x);
+}
+
+
+// Helper function to compute x^y, where y is known to be an
+// integer. Uses binary decomposition to limit the number of
+// multiplications; see the discussion in "Hacker's Delight" by Henry
+// S. Warren, Jr., figure 11-6, page 213.
+static double powi(double x, int y) {
+  ASSERT(y != kMinInt);
+  unsigned n = (y < 0) ? -y : y;
+  double m = x;
+  double p = 1;
+  while (true) {
+    if ((n & 1) != 0) p *= m;
+    n >>= 1;
+    if (n == 0) {
+      if (y < 0) {
+        // Unfortunately, we have to be careful when p has reached
+        // infinity in the computation, because sometimes the higher
+        // internal precision in the pow() implementation would have
+        // given us a finite p. This happens very rarely.
+        double result = 1.0 / p;
+        return (result == 0 && isinf(p))
+            ? pow(x, static_cast<double>(y))  // Avoid pow(double, int).
+            : result;
+      } else {
+        return p;
+      }
+    }
+    m *= m;
+  }
+}
+
+
+static Object* Runtime_Math_pow(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+
+  // If the second argument is a smi, it is much faster to call the
+  // custom powi() function than the generic pow().
+  if (args[1]->IsSmi()) {
+    int y = Smi::cast(args[1])->value();
+    return Heap::AllocateHeapNumber(powi(x, y));
+  }
+
+  CONVERT_DOUBLE_CHECKED(y, args[1]);
+
+  if (!isinf(x)) {
+    if (y == 0.5) {
+      // It's not uncommon to use Math.pow(x, 0.5) to compute the
+      // square root of a number. To speed up such computations, we
+      // explictly check for this case and use the sqrt() function
+      // which is faster than pow().
+      return Heap::AllocateHeapNumber(sqrt(x));
+    } else if (y == -0.5) {
+      // Optimized using Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5).
+      return Heap::AllocateHeapNumber(1.0 / sqrt(x));
+    }
+  }
+
+  if (y == 0) {
+    return Smi::FromInt(1);
+  } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
+    return Heap::nan_value();
+  } else {
+    return Heap::AllocateHeapNumber(pow(x, y));
+  }
+}
+
+
+static Object* Runtime_Math_round(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  if (signbit(x) && x >= -0.5) return Heap::minus_zero_value();
+  return Heap::NumberFromDouble(floor(x + 0.5));
+}
+
+
+static Object* Runtime_Math_sin(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return TranscendentalCache::Get(TranscendentalCache::SIN, x);
+}
+
+
+static Object* Runtime_Math_sqrt(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return Heap::AllocateHeapNumber(sqrt(x));
+}
+
+
+static Object* Runtime_Math_tan(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return TranscendentalCache::Get(TranscendentalCache::TAN, x);
+}
+
+
+// The NewArguments function is only used when constructing the
+// arguments array when calling non-functions from JavaScript in
+// runtime.js:CALL_NON_FUNCTION.
+static Object* Runtime_NewArguments(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  // ECMA-262, 3rd., 10.1.8, p.39
+  CONVERT_CHECKED(JSFunction, callee, args[0]);
+
+  // Compute the frame holding the arguments.
+  JavaScriptFrameIterator it;
+  it.AdvanceToArgumentsFrame();
+  JavaScriptFrame* frame = it.frame();
+
+  const int length = frame->GetProvidedParametersCount();
+  Object* result = Heap::AllocateArgumentsObject(callee, length);
+  if (result->IsFailure()) return result;
+  if (length > 0) {
+    Object* obj =  Heap::AllocateFixedArray(length);
+    if (obj->IsFailure()) return obj;
+    FixedArray* array = FixedArray::cast(obj);
+    ASSERT(array->length() == length);
+    WriteBarrierMode mode = array->GetWriteBarrierMode();
+    for (int i = 0; i < length; i++) {
+      array->set(i, frame->GetParameter(i), mode);
+    }
+    JSObject::cast(result)->set_elements(array);
+  }
+  return result;
+}
+
+
+static Object* Runtime_NewArgumentsFast(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 3);
+
+  JSFunction* callee = JSFunction::cast(args[0]);
+  Object** parameters = reinterpret_cast<Object**>(args[1]);
+  const int length = Smi::cast(args[2])->value();
+
+  Object* result = Heap::AllocateArgumentsObject(callee, length);
+  if (result->IsFailure()) return result;
+  ASSERT(Heap::InNewSpace(result));
+
+  // Allocate the elements if needed.
+  if (length > 0) {
+    // Allocate the fixed array.
+    Object* obj = Heap::AllocateRawFixedArray(length);
+    if (obj->IsFailure()) return obj;
+    reinterpret_cast<Array*>(obj)->set_map(Heap::fixed_array_map());
+    FixedArray* array = FixedArray::cast(obj);
+    array->set_length(length);
+    WriteBarrierMode mode = array->GetWriteBarrierMode();
+    for (int i = 0; i < length; i++) {
+      array->set(i, *--parameters, mode);
+    }
+    JSObject::cast(result)->set_elements(FixedArray::cast(obj),
+                                         SKIP_WRITE_BARRIER);
+  }
+  return result;
+}
+
+
+static Object* Runtime_NewClosure(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSFunction, boilerplate, 0);
+  CONVERT_ARG_CHECKED(Context, context, 1);
+
+  Handle<JSFunction> result =
+      Factory::NewFunctionFromBoilerplate(boilerplate, context);
+  return *result;
+}
+
+
+static Code* ComputeConstructStub(Handle<SharedFunctionInfo> shared) {
+  // TODO(385): Change this to create a construct stub specialized for
+  // the given map to make allocation of simple objects - and maybe
+  // arrays - much faster.
+  if (FLAG_inline_new
+      && shared->has_only_simple_this_property_assignments()) {
+    ConstructStubCompiler compiler;
+    Object* code = compiler.CompileConstructStub(*shared);
+    if (code->IsFailure()) {
+      return Builtins::builtin(Builtins::JSConstructStubGeneric);
+    }
+    return Code::cast(code);
+  }
+
+  return Builtins::builtin(Builtins::JSConstructStubGeneric);
+}
+
+
+static Object* Runtime_NewObject(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+
+  Handle<Object> constructor = args.at<Object>(0);
+
+  // If the constructor isn't a proper function we throw a type error.
+  if (!constructor->IsJSFunction()) {
+    Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
+    Handle<Object> type_error =
+        Factory::NewTypeError("not_constructor", arguments);
+    return Top::Throw(*type_error);
+  }
+
+  Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Handle stepping into constructors if step into is active.
+  if (Debug::StepInActive()) {
+    Debug::HandleStepIn(function, Handle<Object>::null(), 0, true);
+  }
+#endif
+
+  if (function->has_initial_map()) {
+    if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
+      // The 'Function' function ignores the receiver object when
+      // called using 'new' and creates a new JSFunction object that
+      // is returned.  The receiver object is only used for error
+      // reporting if an error occurs when constructing the new
+      // JSFunction. Factory::NewJSObject() should not be used to
+      // allocate JSFunctions since it does not properly initialize
+      // the shared part of the function. Since the receiver is
+      // ignored anyway, we use the global object as the receiver
+      // instead of a new JSFunction object. This way, errors are
+      // reported the same way whether or not 'Function' is called
+      // using 'new'.
+      return Top::context()->global();
+    }
+  }
+
+  // The function should be compiled for the optimization hints to be available.
+  if (!function->shared()->is_compiled()) {
+    CompileLazyShared(Handle<SharedFunctionInfo>(function->shared()),
+                                                 CLEAR_EXCEPTION,
+                                                 0);
+  }
+
+  bool first_allocation = !function->has_initial_map();
+  Handle<JSObject> result = Factory::NewJSObject(function);
+  if (first_allocation) {
+    Handle<Map> map = Handle<Map>(function->initial_map());
+    Handle<Code> stub = Handle<Code>(
+        ComputeConstructStub(Handle<SharedFunctionInfo>(function->shared())));
+    function->shared()->set_construct_stub(*stub);
+  }
+
+  Counters::constructed_objects.Increment();
+  Counters::constructed_objects_runtime.Increment();
+
+  return *result;
+}
+
+
+static Object* Runtime_LazyCompile(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+
+  Handle<JSFunction> function = args.at<JSFunction>(0);
+#ifdef DEBUG
+  if (FLAG_trace_lazy) {
+    PrintF("[lazy: ");
+    function->shared()->name()->Print();
+    PrintF("]\n");
+  }
+#endif
+
+  // Compile the target function.  Here we compile using CompileLazyInLoop in
+  // order to get the optimized version.  This helps code like delta-blue
+  // that calls performance-critical routines through constructors.  A
+  // constructor call doesn't use a CallIC, it uses a LoadIC followed by a
+  // direct call.  Since the in-loop tracking takes place through CallICs
+  // this means that things called through constructors are never known to
+  // be in loops.  We compile them as if they are in loops here just in case.
+  ASSERT(!function->is_compiled());
+  if (!CompileLazyInLoop(function, KEEP_EXCEPTION)) {
+    return Failure::Exception();
+  }
+
+  return function->code();
+}
+
+
+static Object* Runtime_GetCalledFunction(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 0);
+  StackFrameIterator it;
+  // Get past the JS-to-C exit frame.
+  ASSERT(it.frame()->is_exit());
+  it.Advance();
+  // Get past the CALL_NON_FUNCTION activation frame.
+  ASSERT(it.frame()->is_java_script());
+  it.Advance();
+  // Argument adaptor frames do not copy the function; we have to skip
+  // past them to get to the real calling frame.
+  if (it.frame()->is_arguments_adaptor()) it.Advance();
+  // Get the function from the top of the expression stack of the
+  // calling frame.
+  StandardFrame* frame = StandardFrame::cast(it.frame());
+  int index = frame->ComputeExpressionsCount() - 1;
+  Object* result = frame->GetExpression(index);
+  return result;
+}
+
+
+static Object* Runtime_GetFunctionDelegate(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  RUNTIME_ASSERT(!args[0]->IsJSFunction());
+  return *Execution::GetFunctionDelegate(args.at<Object>(0));
+}
+
+
+static Object* Runtime_GetConstructorDelegate(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  RUNTIME_ASSERT(!args[0]->IsJSFunction());
+  return *Execution::GetConstructorDelegate(args.at<Object>(0));
+}
+
+
+static Object* Runtime_NewContext(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, function, args[0]);
+  int length = ScopeInfo<>::NumberOfContextSlots(function->code());
+  Object* result = Heap::AllocateFunctionContext(length, function);
+  if (result->IsFailure()) return result;
+
+  Top::set_context(Context::cast(result));
+
+  return result;  // non-failure
+}
+
+static Object* PushContextHelper(Object* object, bool is_catch_context) {
+  // Convert the object to a proper JavaScript object.
+  Object* js_object = object;
+  if (!js_object->IsJSObject()) {
+    js_object = js_object->ToObject();
+    if (js_object->IsFailure()) {
+      if (!Failure::cast(js_object)->IsInternalError()) return js_object;
+      HandleScope scope;
+      Handle<Object> handle(object);
+      Handle<Object> result =
+          Factory::NewTypeError("with_expression", HandleVector(&handle, 1));
+      return Top::Throw(*result);
+    }
+  }
+
+  Object* result =
+      Heap::AllocateWithContext(Top::context(),
+                                JSObject::cast(js_object),
+                                is_catch_context);
+  if (result->IsFailure()) return result;
+
+  Context* context = Context::cast(result);
+  Top::set_context(context);
+
+  return result;
+}
+
+
+static Object* Runtime_PushContext(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+  return PushContextHelper(args[0], false);
+}
+
+
+static Object* Runtime_PushCatchContext(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+  return PushContextHelper(args[0], true);
+}
+
+
+static Object* Runtime_LookupContext(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+
+  CONVERT_ARG_CHECKED(Context, context, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
+
+  int index;
+  PropertyAttributes attributes;
+  ContextLookupFlags flags = FOLLOW_CHAINS;
+  Handle<Object> holder =
+      context->Lookup(name, flags, &index, &attributes);
+
+  if (index < 0 && !holder.is_null()) {
+    ASSERT(holder->IsJSObject());
+    return *holder;
+  }
+
+  // No intermediate context found. Use global object by default.
+  return Top::context()->global();
+}
+
+
+// A mechanism to return a pair of Object pointers in registers (if possible).
+// How this is achieved is calling convention-dependent.
+// All currently supported x86 compiles uses calling conventions that are cdecl
+// variants where a 64-bit value is returned in two 32-bit registers
+// (edx:eax on ia32, r1:r0 on ARM).
+// In AMD-64 calling convention a struct of two pointers is returned in rdx:rax.
+// In Win64 calling convention, a struct of two pointers is returned in memory,
+// allocated by the caller, and passed as a pointer in a hidden first parameter.
+#ifdef V8_HOST_ARCH_64_BIT
+struct ObjectPair {
+  Object* x;
+  Object* y;
+};
+
+static inline ObjectPair MakePair(Object* x, Object* y) {
+  ObjectPair result = {x, y};
+  // Pointers x and y returned in rax and rdx, in AMD-x64-abi.
+  // In Win64 they are assigned to a hidden first argument.
+  return result;
+}
+#else
+typedef uint64_t ObjectPair;
+static inline ObjectPair MakePair(Object* x, Object* y) {
+  return reinterpret_cast<uint32_t>(x) |
+      (reinterpret_cast<ObjectPair>(y) << 32);
+}
+#endif
+
+
+static inline Object* Unhole(Object* x, PropertyAttributes attributes) {
+  ASSERT(!x->IsTheHole() || (attributes & READ_ONLY) != 0);
+  USE(attributes);
+  return x->IsTheHole() ? Heap::undefined_value() : x;
+}
+
+
+static JSObject* ComputeReceiverForNonGlobal(JSObject* holder) {
+  ASSERT(!holder->IsGlobalObject());
+  Context* top = Top::context();
+  // Get the context extension function.
+  JSFunction* context_extension_function =
+      top->global_context()->context_extension_function();
+  // If the holder isn't a context extension object, we just return it
+  // as the receiver. This allows arguments objects to be used as
+  // receivers, but only if they are put in the context scope chain
+  // explicitly via a with-statement.
+  Object* constructor = holder->map()->constructor();
+  if (constructor != context_extension_function) return holder;
+  // Fall back to using the global object as the receiver if the
+  // property turns out to be a local variable allocated in a context
+  // extension object - introduced via eval.
+  return top->global()->global_receiver();
+}
+
+
+static ObjectPair LoadContextSlotHelper(Arguments args, bool throw_error) {
+  HandleScope scope;
+  ASSERT_EQ(2, args.length());
+
+  if (!args[0]->IsContext() || !args[1]->IsString()) {
+    return MakePair(Top::ThrowIllegalOperation(), NULL);
+  }
+  Handle<Context> context = args.at<Context>(0);
+  Handle<String> name = args.at<String>(1);
+
+  int index;
+  PropertyAttributes attributes;
+  ContextLookupFlags flags = FOLLOW_CHAINS;
+  Handle<Object> holder =
+      context->Lookup(name, flags, &index, &attributes);
+
+  // If the index is non-negative, the slot has been found in a local
+  // variable or a parameter. Read it from the context object or the
+  // arguments object.
+  if (index >= 0) {
+    // If the "property" we were looking for is a local variable or an
+    // argument in a context, the receiver is the global object; see
+    // ECMA-262, 3rd., 10.1.6 and 10.2.3.
+    JSObject* receiver = Top::context()->global()->global_receiver();
+    Object* value = (holder->IsContext())
+        ? Context::cast(*holder)->get(index)
+        : JSObject::cast(*holder)->GetElement(index);
+    return MakePair(Unhole(value, attributes), receiver);
+  }
+
+  // If the holder is found, we read the property from it.
+  if (!holder.is_null() && holder->IsJSObject()) {
+    ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name));
+    JSObject* object = JSObject::cast(*holder);
+    JSObject* receiver;
+    if (object->IsGlobalObject()) {
+      receiver = GlobalObject::cast(object)->global_receiver();
+    } else if (context->is_exception_holder(*holder)) {
+      receiver = Top::context()->global()->global_receiver();
+    } else {
+      receiver = ComputeReceiverForNonGlobal(object);
+    }
+    // No need to unhole the value here. This is taken care of by the
+    // GetProperty function.
+    Object* value = object->GetProperty(*name);
+    return MakePair(value, receiver);
+  }
+
+  if (throw_error) {
+    // The property doesn't exist - throw exception.
+    Handle<Object> reference_error =
+        Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
+    return MakePair(Top::Throw(*reference_error), NULL);
+  } else {
+    // The property doesn't exist - return undefined
+    return MakePair(Heap::undefined_value(), Heap::undefined_value());
+  }
+}
+
+
+static ObjectPair Runtime_LoadContextSlot(Arguments args) {
+  return LoadContextSlotHelper(args, true);
+}
+
+
+static ObjectPair Runtime_LoadContextSlotNoReferenceError(Arguments args) {
+  return LoadContextSlotHelper(args, false);
+}
+
+
+static Object* Runtime_StoreContextSlot(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+
+  Handle<Object> value(args[0]);
+  CONVERT_ARG_CHECKED(Context, context, 1);
+  CONVERT_ARG_CHECKED(String, name, 2);
+
+  int index;
+  PropertyAttributes attributes;
+  ContextLookupFlags flags = FOLLOW_CHAINS;
+  Handle<Object> holder =
+      context->Lookup(name, flags, &index, &attributes);
+
+  if (index >= 0) {
+    if (holder->IsContext()) {
+      // Ignore if read_only variable.
+      if ((attributes & READ_ONLY) == 0) {
+        Handle<Context>::cast(holder)->set(index, *value);
+      }
+    } else {
+      ASSERT((attributes & READ_ONLY) == 0);
+      Object* result =
+          Handle<JSObject>::cast(holder)->SetElement(index, *value);
+      USE(result);
+      ASSERT(!result->IsFailure());
+    }
+    return *value;
+  }
+
+  // Slow case: The property is not in a FixedArray context.
+  // It is either in an JSObject extension context or it was not found.
+  Handle<JSObject> context_ext;
+
+  if (!holder.is_null()) {
+    // The property exists in the extension context.
+    context_ext = Handle<JSObject>::cast(holder);
+  } else {
+    // The property was not found. It needs to be stored in the global context.
+    ASSERT(attributes == ABSENT);
+    attributes = NONE;
+    context_ext = Handle<JSObject>(Top::context()->global());
+  }
+
+  // Set the property, but ignore if read_only variable on the context
+  // extension object itself.
+  if ((attributes & READ_ONLY) == 0 ||
+      (context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) {
+    Handle<Object> set = SetProperty(context_ext, name, value, attributes);
+    if (set.is_null()) {
+      // Failure::Exception is converted to a null handle in the
+      // handle-based methods such as SetProperty.  We therefore need
+      // to convert null handles back to exceptions.
+      ASSERT(Top::has_pending_exception());
+      return Failure::Exception();
+    }
+  }
+  return *value;
+}
+
+
+static Object* Runtime_Throw(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+
+  return Top::Throw(args[0]);
+}
+
+
+static Object* Runtime_ReThrow(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+
+  return Top::ReThrow(args[0]);
+}
+
+
+static Object* Runtime_ThrowReferenceError(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+
+  Handle<Object> name(args[0]);
+  Handle<Object> reference_error =
+    Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
+  return Top::Throw(*reference_error);
+}
+
+
+static Object* Runtime_StackOverflow(Arguments args) {
+  NoHandleAllocation na;
+  return Top::StackOverflow();
+}
+
+
+static Object* Runtime_StackGuard(Arguments args) {
+  ASSERT(args.length() == 1);
+
+  // First check if this is a real stack overflow.
+  if (StackGuard::IsStackOverflow()) {
+    return Runtime_StackOverflow(args);
+  }
+
+  return Execution::HandleStackGuardInterrupt();
+}
+
+
+// NOTE: These PrintXXX functions are defined for all builds (not just
+// DEBUG builds) because we may want to be able to trace function
+// calls in all modes.
+static void PrintString(String* str) {
+  // not uncommon to have empty strings
+  if (str->length() > 0) {
+    SmartPointer<char> s =
+        str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+    PrintF("%s", *s);
+  }
+}
+
+
+static void PrintObject(Object* obj) {
+  if (obj->IsSmi()) {
+    PrintF("%d", Smi::cast(obj)->value());
+  } else if (obj->IsString() || obj->IsSymbol()) {
+    PrintString(String::cast(obj));
+  } else if (obj->IsNumber()) {
+    PrintF("%g", obj->Number());
+  } else if (obj->IsFailure()) {
+    PrintF("<failure>");
+  } else if (obj->IsUndefined()) {
+    PrintF("<undefined>");
+  } else if (obj->IsNull()) {
+    PrintF("<null>");
+  } else if (obj->IsTrue()) {
+    PrintF("<true>");
+  } else if (obj->IsFalse()) {
+    PrintF("<false>");
+  } else {
+    PrintF("%p", obj);
+  }
+}
+
+
+static int StackSize() {
+  int n = 0;
+  for (JavaScriptFrameIterator it; !it.done(); it.Advance()) n++;
+  return n;
+}
+
+
+static void PrintTransition(Object* result) {
+  // indentation
+  { const int nmax = 80;
+    int n = StackSize();
+    if (n <= nmax)
+      PrintF("%4d:%*s", n, n, "");
+    else
+      PrintF("%4d:%*s", n, nmax, "...");
+  }
+
+  if (result == NULL) {
+    // constructor calls
+    JavaScriptFrameIterator it;
+    JavaScriptFrame* frame = it.frame();
+    if (frame->IsConstructor()) PrintF("new ");
+    // function name
+    Object* fun = frame->function();
+    if (fun->IsJSFunction()) {
+      PrintObject(JSFunction::cast(fun)->shared()->name());
+    } else {
+      PrintObject(fun);
+    }
+    // function arguments
+    // (we are intentionally only printing the actually
+    // supplied parameters, not all parameters required)
+    PrintF("(this=");
+    PrintObject(frame->receiver());
+    const int length = frame->GetProvidedParametersCount();
+    for (int i = 0; i < length; i++) {
+      PrintF(", ");
+      PrintObject(frame->GetParameter(i));
+    }
+    PrintF(") {\n");
+
+  } else {
+    // function result
+    PrintF("} -> ");
+    PrintObject(result);
+    PrintF("\n");
+  }
+}
+
+
+static Object* Runtime_TraceEnter(Arguments args) {
+  ASSERT(args.length() == 0);
+  NoHandleAllocation ha;
+  PrintTransition(NULL);
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_TraceExit(Arguments args) {
+  NoHandleAllocation ha;
+  PrintTransition(args[0]);
+  return args[0];  // return TOS
+}
+
+
+static Object* Runtime_DebugPrint(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+#ifdef DEBUG
+  if (args[0]->IsString()) {
+    // If we have a string, assume it's a code "marker"
+    // and print some interesting cpu debugging info.
+    JavaScriptFrameIterator it;
+    JavaScriptFrame* frame = it.frame();
+    PrintF("fp = %p, sp = %p, caller_sp = %p: ",
+           frame->fp(), frame->sp(), frame->caller_sp());
+  } else {
+    PrintF("DebugPrint: ");
+  }
+  args[0]->Print();
+#else
+  // ShortPrint is available in release mode. Print is not.
+  args[0]->ShortPrint();
+#endif
+  PrintF("\n");
+  Flush();
+
+  return args[0];  // return TOS
+}
+
+
+static Object* Runtime_DebugTrace(Arguments args) {
+  ASSERT(args.length() == 0);
+  NoHandleAllocation ha;
+  Top::PrintStack();
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_DateCurrentTime(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 0);
+
+  // According to ECMA-262, section 15.9.1, page 117, the precision of
+  // the number in a Date object representing a particular instant in
+  // time is milliseconds. Therefore, we floor the result of getting
+  // the OS time.
+  double millis = floor(OS::TimeCurrentMillis());
+  return Heap::NumberFromDouble(millis);
+}
+
+
+static Object* Runtime_DateParseString(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+
+  CONVERT_ARG_CHECKED(String, str, 0);
+  FlattenString(str);
+
+  CONVERT_ARG_CHECKED(JSArray, output, 1);
+  RUNTIME_ASSERT(output->HasFastElements());
+
+  AssertNoAllocation no_allocation;
+
+  FixedArray* output_array = FixedArray::cast(output->elements());
+  RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
+  bool result;
+  if (str->IsAsciiRepresentation()) {
+    result = DateParser::Parse(str->ToAsciiVector(), output_array);
+  } else {
+    ASSERT(str->IsTwoByteRepresentation());
+    result = DateParser::Parse(str->ToUC16Vector(), output_array);
+  }
+
+  if (result) {
+    return *output;
+  } else {
+    return Heap::null_value();
+  }
+}
+
+
+static Object* Runtime_DateLocalTimezone(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  const char* zone = OS::LocalTimezone(x);
+  return Heap::AllocateStringFromUtf8(CStrVector(zone));
+}
+
+
+static Object* Runtime_DateLocalTimeOffset(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 0);
+
+  return Heap::NumberFromDouble(OS::LocalTimeOffset());
+}
+
+
+static Object* Runtime_DateDaylightSavingsOffset(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  return Heap::NumberFromDouble(OS::DaylightSavingsOffset(x));
+}
+
+
+static Object* Runtime_NumberIsFinite(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_DOUBLE_CHECKED(value, args[0]);
+  Object* result;
+  if (isnan(value) || (fpclassify(value) == FP_INFINITE)) {
+    result = Heap::false_value();
+  } else {
+    result = Heap::true_value();
+  }
+  return result;
+}
+
+
+static Object* Runtime_GlobalReceiver(Arguments args) {
+  ASSERT(args.length() == 1);
+  Object* global = args[0];
+  if (!global->IsJSGlobalObject()) return Heap::null_value();
+  return JSGlobalObject::cast(global)->global_receiver();
+}
+
+
+static Object* Runtime_CompileString(Arguments args) {
+  HandleScope scope;
+  ASSERT_EQ(2, args.length());
+  CONVERT_ARG_CHECKED(String, source, 0);
+  CONVERT_ARG_CHECKED(Oddball, is_json, 1)
+
+  // Compile source string in the global context.
+  Handle<Context> context(Top::context()->global_context());
+  Compiler::ValidationState validate = (is_json->IsTrue())
+    ? Compiler::VALIDATE_JSON : Compiler::DONT_VALIDATE_JSON;
+  Handle<JSFunction> boilerplate = Compiler::CompileEval(source,
+                                                         context,
+                                                         true,
+                                                         validate);
+  if (boilerplate.is_null()) return Failure::Exception();
+  Handle<JSFunction> fun =
+      Factory::NewFunctionFromBoilerplate(boilerplate, context);
+  return *fun;
+}
+
+
+static Handle<JSFunction> GetBuiltinFunction(String* name) {
+  LookupResult result;
+  Top::global_context()->builtins()->LocalLookup(name, &result);
+  return Handle<JSFunction>(JSFunction::cast(result.GetValue()));
+}
+
+
+static Object* CompileDirectEval(Handle<String> source) {
+  // Compute the eval context.
+  HandleScope scope;
+  StackFrameLocator locator;
+  JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
+  Handle<Context> context(Context::cast(frame->context()));
+  bool is_global = context->IsGlobalContext();
+
+  // Compile source string in the current context.
+  Handle<JSFunction> boilerplate = Compiler::CompileEval(
+      source,
+      context,
+      is_global,
+      Compiler::DONT_VALIDATE_JSON);
+  if (boilerplate.is_null()) return Failure::Exception();
+  Handle<JSFunction> fun =
+    Factory::NewFunctionFromBoilerplate(boilerplate, context);
+  return *fun;
+}
+
+
+static Object* Runtime_ResolvePossiblyDirectEval(Arguments args) {
+  ASSERT(args.length() == 2);
+
+  HandleScope scope;
+
+  CONVERT_ARG_CHECKED(JSFunction, callee, 0);
+
+  Handle<Object> receiver;
+
+  // Find where the 'eval' symbol is bound. It is unaliased only if
+  // it is bound in the global context.
+  StackFrameLocator locator;
+  JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
+  Handle<Context> context(Context::cast(frame->context()));
+  int index;
+  PropertyAttributes attributes;
+  while (!context.is_null()) {
+    receiver = context->Lookup(Factory::eval_symbol(), FOLLOW_PROTOTYPE_CHAIN,
+                               &index, &attributes);
+    // Stop search when eval is found or when the global context is
+    // reached.
+    if (attributes != ABSENT || context->IsGlobalContext()) break;
+    if (context->is_function_context()) {
+      context = Handle<Context>(Context::cast(context->closure()->context()));
+    } else {
+      context = Handle<Context>(context->previous());
+    }
+  }
+
+  // If eval could not be resolved, it has been deleted and we need to
+  // throw a reference error.
+  if (attributes == ABSENT) {
+    Handle<Object> name = Factory::eval_symbol();
+    Handle<Object> reference_error =
+        Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
+    return Top::Throw(*reference_error);
+  }
+
+  if (context->IsGlobalContext()) {
+    // 'eval' is bound in the global context, but it may have been overwritten.
+    // Compare it to the builtin 'GlobalEval' function to make sure.
+    Handle<JSFunction> global_eval =
+      GetBuiltinFunction(Heap::global_eval_symbol());
+    if (global_eval.is_identical_to(callee)) {
+      // A direct eval call.
+      if (args[1]->IsString()) {
+        CONVERT_ARG_CHECKED(String, source, 1);
+        // A normal eval call on a string. Compile it and return the
+        // compiled function bound in the local context.
+        Object* compiled_source = CompileDirectEval(source);
+        if (compiled_source->IsFailure()) return compiled_source;
+        receiver = Handle<Object>(frame->receiver());
+        callee = Handle<JSFunction>(JSFunction::cast(compiled_source));
+      } else {
+        // An eval call that is not called on a string. Global eval
+        // deals better with this.
+        receiver = Handle<Object>(Top::global_context()->global());
+      }
+    } else {
+      // 'eval' is overwritten. Just call the function with the given arguments.
+      receiver = Handle<Object>(Top::global_context()->global());
+    }
+  } else {
+    // 'eval' is not bound in the global context. Just call the function
+    // with the given arguments. This is not necessarily the global eval.
+    if (receiver->IsContext()) {
+      context = Handle<Context>::cast(receiver);
+      receiver = Handle<Object>(context->get(index));
+    }
+  }
+
+  Handle<FixedArray> call = Factory::NewFixedArray(2);
+  call->set(0, *callee);
+  call->set(1, *receiver);
+  return *call;
+}
+
+
+static Object* Runtime_SetNewFunctionAttributes(Arguments args) {
+  // This utility adjusts the property attributes for newly created Function
+  // object ("new Function(...)") by changing the map.
+  // All it does is changing the prototype property to enumerable
+  // as specified in ECMA262, 15.3.5.2.
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSFunction, func, 0);
+  ASSERT(func->map()->instance_type() ==
+         Top::function_instance_map()->instance_type());
+  ASSERT(func->map()->instance_size() ==
+         Top::function_instance_map()->instance_size());
+  func->set_map(*Top::function_instance_map());
+  return *func;
+}
+
+
+// Push an array unto an array of arrays if it is not already in the
+// array.  Returns true if the element was pushed on the stack and
+// false otherwise.
+static Object* Runtime_PushIfAbsent(Arguments args) {
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSArray, array, args[0]);
+  CONVERT_CHECKED(JSArray, element, args[1]);
+  RUNTIME_ASSERT(array->HasFastElements());
+  int length = Smi::cast(array->length())->value();
+  FixedArray* elements = FixedArray::cast(array->elements());
+  for (int i = 0; i < length; i++) {
+    if (elements->get(i) == element) return Heap::false_value();
+  }
+  Object* obj = array->SetFastElement(length, element);
+  if (obj->IsFailure()) return obj;
+  return Heap::true_value();
+}
+
+
+/**
+ * A simple visitor visits every element of Array's.
+ * The backend storage can be a fixed array for fast elements case,
+ * or a dictionary for sparse array. Since Dictionary is a subtype
+ * of FixedArray, the class can be used by both fast and slow cases.
+ * The second parameter of the constructor, fast_elements, specifies
+ * whether the storage is a FixedArray or Dictionary.
+ *
+ * An index limit is used to deal with the situation that a result array
+ * length overflows 32-bit non-negative integer.
+ */
+class ArrayConcatVisitor {
+ public:
+  ArrayConcatVisitor(Handle<FixedArray> storage,
+                     uint32_t index_limit,
+                     bool fast_elements) :
+      storage_(storage), index_limit_(index_limit),
+      fast_elements_(fast_elements), index_offset_(0) { }
+
+  void visit(uint32_t i, Handle<Object> elm) {
+    uint32_t index = i + index_offset_;
+    if (index >= index_limit_) return;
+
+    if (fast_elements_) {
+      ASSERT(index < static_cast<uint32_t>(storage_->length()));
+      storage_->set(index, *elm);
+
+    } else {
+      Handle<NumberDictionary> dict = Handle<NumberDictionary>::cast(storage_);
+      Handle<NumberDictionary> result =
+          Factory::DictionaryAtNumberPut(dict, index, elm);
+      if (!result.is_identical_to(dict))
+        storage_ = result;
+    }
+  }
+
+  void increase_index_offset(uint32_t delta) {
+    index_offset_ += delta;
+  }
+
+ private:
+  Handle<FixedArray> storage_;
+  uint32_t index_limit_;
+  bool fast_elements_;
+  uint32_t index_offset_;
+};
+
+
+/**
+ * A helper function that visits elements of a JSObject. Only elements
+ * whose index between 0 and range (exclusive) are visited.
+ *
+ * If the third parameter, visitor, is not NULL, the visitor is called
+ * with parameters, 'visitor_index_offset + element index' and the element.
+ *
+ * It returns the number of visisted elements.
+ */
+static uint32_t IterateElements(Handle<JSObject> receiver,
+                                uint32_t range,
+                                ArrayConcatVisitor* visitor) {
+  uint32_t num_of_elements = 0;
+
+  switch (receiver->GetElementsKind()) {
+    case JSObject::FAST_ELEMENTS: {
+      Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
+      uint32_t len = elements->length();
+      if (range < len) {
+        len = range;
+      }
+
+      for (uint32_t j = 0; j < len; j++) {
+        Handle<Object> e(elements->get(j));
+        if (!e->IsTheHole()) {
+          num_of_elements++;
+          if (visitor) {
+            visitor->visit(j, e);
+          }
+        }
+      }
+      break;
+    }
+    case JSObject::PIXEL_ELEMENTS: {
+      Handle<PixelArray> pixels(PixelArray::cast(receiver->elements()));
+      uint32_t len = pixels->length();
+      if (range < len) {
+        len = range;
+      }
+
+      for (uint32_t j = 0; j < len; j++) {
+        num_of_elements++;
+        if (visitor != NULL) {
+          Handle<Smi> e(Smi::FromInt(pixels->get(j)));
+          visitor->visit(j, e);
+        }
+      }
+      break;
+    }
+    case JSObject::DICTIONARY_ELEMENTS: {
+      Handle<NumberDictionary> dict(receiver->element_dictionary());
+      uint32_t capacity = dict->Capacity();
+      for (uint32_t j = 0; j < capacity; j++) {
+        Handle<Object> k(dict->KeyAt(j));
+        if (dict->IsKey(*k)) {
+          ASSERT(k->IsNumber());
+          uint32_t index = static_cast<uint32_t>(k->Number());
+          if (index < range) {
+            num_of_elements++;
+            if (visitor) {
+              visitor->visit(index, Handle<Object>(dict->ValueAt(j)));
+            }
+          }
+        }
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  return num_of_elements;
+}
+
+
+/**
+ * A helper function that visits elements of an Array object, and elements
+ * on its prototypes.
+ *
+ * Elements on prototypes are visited first, and only elements whose indices
+ * less than Array length are visited.
+ *
+ * If a ArrayConcatVisitor object is given, the visitor is called with
+ * parameters, element's index + visitor_index_offset and the element.
+ */
+static uint32_t IterateArrayAndPrototypeElements(Handle<JSArray> array,
+                                                 ArrayConcatVisitor* visitor) {
+  uint32_t range = static_cast<uint32_t>(array->length()->Number());
+  Handle<Object> obj = array;
+
+  static const int kEstimatedPrototypes = 3;
+  List< Handle<JSObject> > objects(kEstimatedPrototypes);
+
+  // Visit prototype first. If an element on the prototype is shadowed by
+  // the inheritor using the same index, the ArrayConcatVisitor visits
+  // the prototype element before the shadowing element.
+  // The visitor can simply overwrite the old value by new value using
+  // the same index.  This follows Array::concat semantics.
+  while (!obj->IsNull()) {
+    objects.Add(Handle<JSObject>::cast(obj));
+    obj = Handle<Object>(obj->GetPrototype());
+  }
+
+  uint32_t nof_elements = 0;
+  for (int i = objects.length() - 1; i >= 0; i--) {
+    Handle<JSObject> obj = objects[i];
+    nof_elements +=
+        IterateElements(Handle<JSObject>::cast(obj), range, visitor);
+  }
+
+  return nof_elements;
+}
+
+
+/**
+ * A helper function of Runtime_ArrayConcat.
+ *
+ * The first argument is an Array of arrays and objects. It is the
+ * same as the arguments array of Array::concat JS function.
+ *
+ * If an argument is an Array object, the function visits array
+ * elements.  If an argument is not an Array object, the function
+ * visits the object as if it is an one-element array.
+ *
+ * If the result array index overflows 32-bit integer, the rounded
+ * non-negative number is used as new length. For example, if one
+ * array length is 2^32 - 1, second array length is 1, the
+ * concatenated array length is 0.
+ */
+static uint32_t IterateArguments(Handle<JSArray> arguments,
+                                 ArrayConcatVisitor* visitor) {
+  uint32_t visited_elements = 0;
+  uint32_t num_of_args = static_cast<uint32_t>(arguments->length()->Number());
+
+  for (uint32_t i = 0; i < num_of_args; i++) {
+    Handle<Object> obj(arguments->GetElement(i));
+    if (obj->IsJSArray()) {
+      Handle<JSArray> array = Handle<JSArray>::cast(obj);
+      uint32_t len = static_cast<uint32_t>(array->length()->Number());
+      uint32_t nof_elements =
+          IterateArrayAndPrototypeElements(array, visitor);
+      // Total elements of array and its prototype chain can be more than
+      // the array length, but ArrayConcat can only concatenate at most
+      // the array length number of elements.
+      visited_elements += (nof_elements > len) ? len : nof_elements;
+      if (visitor) visitor->increase_index_offset(len);
+
+    } else {
+      if (visitor) {
+        visitor->visit(0, obj);
+        visitor->increase_index_offset(1);
+      }
+      visited_elements++;
+    }
+  }
+  return visited_elements;
+}
+
+
+/**
+ * Array::concat implementation.
+ * See ECMAScript 262, 15.4.4.4.
+ */
+static Object* Runtime_ArrayConcat(Arguments args) {
+  ASSERT(args.length() == 1);
+  HandleScope handle_scope;
+
+  CONVERT_CHECKED(JSArray, arg_arrays, args[0]);
+  Handle<JSArray> arguments(arg_arrays);
+
+  // Pass 1: estimate the number of elements of the result
+  // (it could be more than real numbers if prototype has elements).
+  uint32_t result_length = 0;
+  uint32_t num_of_args = static_cast<uint32_t>(arguments->length()->Number());
+
+  { AssertNoAllocation nogc;
+    for (uint32_t i = 0; i < num_of_args; i++) {
+      Object* obj = arguments->GetElement(i);
+      if (obj->IsJSArray()) {
+        result_length +=
+            static_cast<uint32_t>(JSArray::cast(obj)->length()->Number());
+      } else {
+        result_length++;
+      }
+    }
+  }
+
+  // Allocate an empty array, will set length and content later.
+  Handle<JSArray> result = Factory::NewJSArray(0);
+
+  uint32_t estimate_nof_elements = IterateArguments(arguments, NULL);
+  // If estimated number of elements is more than half of length, a
+  // fixed array (fast case) is more time and space-efficient than a
+  // dictionary.
+  bool fast_case = (estimate_nof_elements * 2) >= result_length;
+
+  Handle<FixedArray> storage;
+  if (fast_case) {
+    // The backing storage array must have non-existing elements to
+    // preserve holes across concat operations.
+    storage = Factory::NewFixedArrayWithHoles(result_length);
+
+  } else {
+    // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
+    uint32_t at_least_space_for = estimate_nof_elements +
+                                  (estimate_nof_elements >> 2);
+    storage = Handle<FixedArray>::cast(
+                  Factory::NewNumberDictionary(at_least_space_for));
+  }
+
+  Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length));
+
+  ArrayConcatVisitor visitor(storage, result_length, fast_case);
+
+  IterateArguments(arguments, &visitor);
+
+  result->set_length(*len);
+  result->set_elements(*storage);
+
+  return *result;
+}
+
+
+// This will not allocate (flatten the string), but it may run
+// very slowly for very deeply nested ConsStrings.  For debugging use only.
+static Object* Runtime_GlobalPrint(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(String, string, args[0]);
+  StringInputBuffer buffer(string);
+  while (buffer.has_more()) {
+    uint16_t character = buffer.GetNext();
+    PrintF("%c", character);
+  }
+  return string;
+}
+
+// Moves all own elements of an object, that are below a limit, to positions
+// starting at zero. All undefined values are placed after non-undefined values,
+// and are followed by non-existing element. Does not change the length
+// property.
+// Returns the number of non-undefined elements collected.
+static Object* Runtime_RemoveArrayHoles(Arguments args) {
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSObject, object, args[0]);
+  CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
+  return object->PrepareElementsForSort(limit);
+}
+
+
+// Move contents of argument 0 (an array) to argument 1 (an array)
+static Object* Runtime_MoveArrayContents(Arguments args) {
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSArray, from, args[0]);
+  CONVERT_CHECKED(JSArray, to, args[1]);
+  to->SetContent(FixedArray::cast(from->elements()));
+  to->set_length(from->length());
+  from->SetContent(Heap::empty_fixed_array());
+  from->set_length(0);
+  return to;
+}
+
+
+// How many elements does this array have?
+static Object* Runtime_EstimateNumberOfElements(Arguments args) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSArray, array, args[0]);
+  HeapObject* elements = array->elements();
+  if (elements->IsDictionary()) {
+    return Smi::FromInt(NumberDictionary::cast(elements)->NumberOfElements());
+  } else {
+    return array->length();
+  }
+}
+
+
+// Returns an array that tells you where in the [0, length) interval an array
+// might have elements.  Can either return keys or intervals.  Keys can have
+// gaps in (undefined).  Intervals can also span over some undefined keys.
+static Object* Runtime_GetArrayKeys(Arguments args) {
+  ASSERT(args.length() == 2);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSObject, array, 0);
+  CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
+  if (array->elements()->IsDictionary()) {
+    // Create an array and get all the keys into it, then remove all the
+    // keys that are not integers in the range 0 to length-1.
+    Handle<FixedArray> keys = GetKeysInFixedArrayFor(array, INCLUDE_PROTOS);
+    int keys_length = keys->length();
+    for (int i = 0; i < keys_length; i++) {
+      Object* key = keys->get(i);
+      uint32_t index;
+      if (!Array::IndexFromObject(key, &index) || index >= length) {
+        // Zap invalid keys.
+        keys->set_undefined(i);
+      }
+    }
+    return *Factory::NewJSArrayWithElements(keys);
+  } else {
+    Handle<FixedArray> single_interval = Factory::NewFixedArray(2);
+    // -1 means start of array.
+    single_interval->set(0,
+                         Smi::FromInt(-1),
+                         SKIP_WRITE_BARRIER);
+    uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
+    uint32_t min_length = actual_length < length ? actual_length : length;
+    Handle<Object> length_object =
+        Factory::NewNumber(static_cast<double>(min_length));
+    single_interval->set(1, *length_object);
+    return *Factory::NewJSArrayWithElements(single_interval);
+  }
+}
+
+
+// DefineAccessor takes an optional final argument which is the
+// property attributes (eg, DONT_ENUM, DONT_DELETE).  IMPORTANT: due
+// to the way accessors are implemented, it is set for both the getter
+// and setter on the first call to DefineAccessor and ignored on
+// subsequent calls.
+static Object* Runtime_DefineAccessor(Arguments args) {
+  RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
+  // Compute attributes.
+  PropertyAttributes attributes = NONE;
+  if (args.length() == 5) {
+    CONVERT_CHECKED(Smi, attrs, args[4]);
+    int value = attrs->value();
+    // Only attribute bits should be set.
+    ASSERT((value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+    attributes = static_cast<PropertyAttributes>(value);
+  }
+
+  CONVERT_CHECKED(JSObject, obj, args[0]);
+  CONVERT_CHECKED(String, name, args[1]);
+  CONVERT_CHECKED(Smi, flag, args[2]);
+  CONVERT_CHECKED(JSFunction, fun, args[3]);
+  return obj->DefineAccessor(name, flag->value() == 0, fun, attributes);
+}
+
+
+static Object* Runtime_LookupAccessor(Arguments args) {
+  ASSERT(args.length() == 3);
+  CONVERT_CHECKED(JSObject, obj, args[0]);
+  CONVERT_CHECKED(String, name, args[1]);
+  CONVERT_CHECKED(Smi, flag, args[2]);
+  return obj->LookupAccessor(name, flag->value() == 0);
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+static Object* Runtime_DebugBreak(Arguments args) {
+  ASSERT(args.length() == 0);
+  return Execution::DebugBreakHelper();
+}
+
+
+// Helper functions for wrapping and unwrapping stack frame ids.
+static Smi* WrapFrameId(StackFrame::Id id) {
+  ASSERT(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4)));
+  return Smi::FromInt(id >> 2);
+}
+
+
+static StackFrame::Id UnwrapFrameId(Smi* wrapped) {
+  return static_cast<StackFrame::Id>(wrapped->value() << 2);
+}
+
+
+// Adds a JavaScript function as a debug event listener.
+// args[0]: debug event listener function to set or null or undefined for
+//          clearing the event listener function
+// args[1]: object supplied during callback
+static Object* Runtime_SetDebugEventListener(Arguments args) {
+  ASSERT(args.length() == 2);
+  RUNTIME_ASSERT(args[0]->IsJSFunction() ||
+                 args[0]->IsUndefined() ||
+                 args[0]->IsNull());
+  Handle<Object> callback = args.at<Object>(0);
+  Handle<Object> data = args.at<Object>(1);
+  Debugger::SetEventListener(callback, data);
+
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_Break(Arguments args) {
+  ASSERT(args.length() == 0);
+  StackGuard::DebugBreak();
+  return Heap::undefined_value();
+}
+
+
+// Find the length of the prototype chain that is to to handled as one. If a
+// prototype object is hidden it is to be viewed as part of the the object it
+// is prototype for.
+static int LocalPrototypeChainLength(JSObject* obj) {
+  int count = 1;
+  Object* proto = obj->GetPrototype();
+  while (proto->IsJSObject() &&
+         JSObject::cast(proto)->map()->is_hidden_prototype()) {
+    count++;
+    proto = JSObject::cast(proto)->GetPrototype();
+  }
+  return count;
+}
+
+
+static Object* DebugLookupResultValue(Object* receiver, String* name,
+                                      LookupResult* result,
+                                      bool* caught_exception) {
+  Object* value;
+  switch (result->type()) {
+    case NORMAL:
+      value = result->holder()->GetNormalizedProperty(result);
+      if (value->IsTheHole()) {
+        return Heap::undefined_value();
+      }
+      return value;
+    case FIELD:
+      value =
+          JSObject::cast(
+              result->holder())->FastPropertyAt(result->GetFieldIndex());
+      if (value->IsTheHole()) {
+        return Heap::undefined_value();
+      }
+      return value;
+    case CONSTANT_FUNCTION:
+      return result->GetConstantFunction();
+    case CALLBACKS: {
+      Object* structure = result->GetCallbackObject();
+      if (structure->IsProxy() || structure->IsAccessorInfo()) {
+        value = receiver->GetPropertyWithCallback(
+            receiver, structure, name, result->holder());
+        if (value->IsException()) {
+          value = Top::pending_exception();
+          Top::clear_pending_exception();
+          if (caught_exception != NULL) {
+            *caught_exception = true;
+          }
+        }
+        return value;
+      } else {
+        return Heap::undefined_value();
+      }
+    }
+    case INTERCEPTOR:
+    case MAP_TRANSITION:
+    case CONSTANT_TRANSITION:
+    case NULL_DESCRIPTOR:
+      return Heap::undefined_value();
+    default:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return Heap::undefined_value();
+}
+
+
+// Get debugger related details for an object property.
+// args[0]: object holding property
+// args[1]: name of the property
+//
+// The array returned contains the following information:
+// 0: Property value
+// 1: Property details
+// 2: Property value is exception
+// 3: Getter function if defined
+// 4: Setter function if defined
+// Items 2-4 are only filled if the property has either a getter or a setter
+// defined through __defineGetter__ and/or __defineSetter__.
+static Object* Runtime_DebugGetPropertyDetails(Arguments args) {
+  HandleScope scope;
+
+  ASSERT(args.length() == 2);
+
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
+
+  // Make sure to set the current context to the context before the debugger was
+  // entered (if the debugger is entered). The reason for switching context here
+  // is that for some property lookups (accessors and interceptors) callbacks
+  // into the embedding application can occour, and the embedding application
+  // could have the assumption that its own global context is the current
+  // context and not some internal debugger context.
+  SaveContext save;
+  if (Debug::InDebugger()) {
+    Top::set_context(*Debug::debugger_entry()->GetContext());
+  }
+
+  // Skip the global proxy as it has no properties and always delegates to the
+  // real global object.
+  if (obj->IsJSGlobalProxy()) {
+    obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
+  }
+
+
+  // Check if the name is trivially convertible to an index and get the element
+  // if so.
+  uint32_t index;
+  if (name->AsArrayIndex(&index)) {
+    Handle<FixedArray> details = Factory::NewFixedArray(2);
+    details->set(0, Runtime::GetElementOrCharAt(obj, index));
+    details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
+    return *Factory::NewJSArrayWithElements(details);
+  }
+
+  // Find the number of objects making up this.
+  int length = LocalPrototypeChainLength(*obj);
+
+  // Try local lookup on each of the objects.
+  Handle<JSObject> jsproto = obj;
+  for (int i = 0; i < length; i++) {
+    LookupResult result;
+    jsproto->LocalLookup(*name, &result);
+    if (result.IsProperty()) {
+      // LookupResult is not GC safe as it holds raw object pointers.
+      // GC can happen later in this code so put the required fields into
+      // local variables using handles when required for later use.
+      PropertyType result_type = result.type();
+      Handle<Object> result_callback_obj;
+      if (result_type == CALLBACKS) {
+        result_callback_obj = Handle<Object>(result.GetCallbackObject());
+      }
+      Smi* property_details = result.GetPropertyDetails().AsSmi();
+      // DebugLookupResultValue can cause GC so details from LookupResult needs
+      // to be copied to handles before this.
+      bool caught_exception = false;
+      Object* raw_value = DebugLookupResultValue(*obj, *name, &result,
+                                                 &caught_exception);
+      if (raw_value->IsFailure()) return raw_value;
+      Handle<Object> value(raw_value);
+
+      // If the callback object is a fixed array then it contains JavaScript
+      // getter and/or setter.
+      bool hasJavaScriptAccessors = result_type == CALLBACKS &&
+                                    result_callback_obj->IsFixedArray();
+      Handle<FixedArray> details =
+          Factory::NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
+      details->set(0, *value);
+      details->set(1, property_details);
+      if (hasJavaScriptAccessors) {
+        details->set(2,
+                     caught_exception ? Heap::true_value()
+                                      : Heap::false_value());
+        details->set(3, FixedArray::cast(*result_callback_obj)->get(0));
+        details->set(4, FixedArray::cast(*result_callback_obj)->get(1));
+      }
+
+      return *Factory::NewJSArrayWithElements(details);
+    }
+    if (i < length - 1) {
+      jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
+    }
+  }
+
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_DebugGetProperty(Arguments args) {
+  HandleScope scope;
+
+  ASSERT(args.length() == 2);
+
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
+
+  LookupResult result;
+  obj->Lookup(*name, &result);
+  if (result.IsProperty()) {
+    return DebugLookupResultValue(*obj, *name, &result, NULL);
+  }
+  return Heap::undefined_value();
+}
+
+
+// Return the names of the local named properties.
+// args[0]: object
+static Object* Runtime_DebugLocalPropertyNames(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  if (!args[0]->IsJSObject()) {
+    return Heap::undefined_value();
+  }
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+  // Skip the global proxy as it has no properties and always delegates to the
+  // real global object.
+  if (obj->IsJSGlobalProxy()) {
+    obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
+  }
+
+  // Find the number of objects making up this.
+  int length = LocalPrototypeChainLength(*obj);
+
+  // Find the number of local properties for each of the objects.
+  int* local_property_count = NewArray<int>(length);
+  int total_property_count = 0;
+  Handle<JSObject> jsproto = obj;
+  for (int i = 0; i < length; i++) {
+    int n;
+    n = jsproto->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
+    local_property_count[i] = n;
+    total_property_count += n;
+    if (i < length - 1) {
+      jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
+    }
+  }
+
+  // Allocate an array with storage for all the property names.
+  Handle<FixedArray> names = Factory::NewFixedArray(total_property_count);
+
+  // Get the property names.
+  jsproto = obj;
+  for (int i = 0; i < length; i++) {
+    jsproto->GetLocalPropertyNames(*names,
+                                   i == 0 ? 0 : local_property_count[i - 1]);
+    if (i < length - 1) {
+      jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
+    }
+  }
+
+  DeleteArray(local_property_count);
+  return *Factory::NewJSArrayWithElements(names);
+}
+
+
+// Return the names of the local indexed properties.
+// args[0]: object
+static Object* Runtime_DebugLocalElementNames(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  if (!args[0]->IsJSObject()) {
+    return Heap::undefined_value();
+  }
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+  int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
+  Handle<FixedArray> names = Factory::NewFixedArray(n);
+  obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE));
+  return *Factory::NewJSArrayWithElements(names);
+}
+
+
+// Return the property type calculated from the property details.
+// args[0]: smi with property details.
+static Object* Runtime_DebugPropertyTypeFromDetails(Arguments args) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(Smi, details, args[0]);
+  PropertyType type = PropertyDetails(details).type();
+  return Smi::FromInt(static_cast<int>(type));
+}
+
+
+// Return the property attribute calculated from the property details.
+// args[0]: smi with property details.
+static Object* Runtime_DebugPropertyAttributesFromDetails(Arguments args) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(Smi, details, args[0]);
+  PropertyAttributes attributes = PropertyDetails(details).attributes();
+  return Smi::FromInt(static_cast<int>(attributes));
+}
+
+
+// Return the property insertion index calculated from the property details.
+// args[0]: smi with property details.
+static Object* Runtime_DebugPropertyIndexFromDetails(Arguments args) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(Smi, details, args[0]);
+  int index = PropertyDetails(details).index();
+  return Smi::FromInt(index);
+}
+
+
+// Return information on whether an object has a named or indexed interceptor.
+// args[0]: object
+static Object* Runtime_DebugInterceptorInfo(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  if (!args[0]->IsJSObject()) {
+    return Smi::FromInt(0);
+  }
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+  int result = 0;
+  if (obj->HasNamedInterceptor()) result |= 2;
+  if (obj->HasIndexedInterceptor()) result |= 1;
+
+  return Smi::FromInt(result);
+}
+
+
+// Return property names from named interceptor.
+// args[0]: object
+static Object* Runtime_DebugNamedInterceptorPropertyNames(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+  if (obj->HasNamedInterceptor()) {
+    v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
+    if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+  }
+  return Heap::undefined_value();
+}
+
+
+// Return element names from indexed interceptor.
+// args[0]: object
+static Object* Runtime_DebugIndexedInterceptorElementNames(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+  if (obj->HasIndexedInterceptor()) {
+    v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
+    if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+  }
+  return Heap::undefined_value();
+}
+
+
+// Return property value from named interceptor.
+// args[0]: object
+// args[1]: property name
+static Object* Runtime_DebugNamedInterceptorPropertyValue(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  RUNTIME_ASSERT(obj->HasNamedInterceptor());
+  CONVERT_ARG_CHECKED(String, name, 1);
+
+  PropertyAttributes attributes;
+  return obj->GetPropertyWithInterceptor(*obj, *name, &attributes);
+}
+
+
+// Return element value from indexed interceptor.
+// args[0]: object
+// args[1]: index
+static Object* Runtime_DebugIndexedInterceptorElementValue(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  RUNTIME_ASSERT(obj->HasIndexedInterceptor());
+  CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
+
+  return obj->GetElementWithInterceptor(*obj, index);
+}
+
+
+static Object* Runtime_CheckExecutionState(Arguments args) {
+  ASSERT(args.length() >= 1);
+  CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+  // Check that the break id is valid.
+  if (Debug::break_id() == 0 || break_id != Debug::break_id()) {
+    return Top::Throw(Heap::illegal_execution_state_symbol());
+  }
+
+  return Heap::true_value();
+}
+
+
+static Object* Runtime_GetFrameCount(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+
+  // Check arguments.
+  Object* result = Runtime_CheckExecutionState(args);
+  if (result->IsFailure()) return result;
+
+  // Count all frames which are relevant to debugging stack trace.
+  int n = 0;
+  StackFrame::Id id = Debug::break_frame_id();
+  if (id == StackFrame::NO_ID) {
+    // If there is no JavaScript stack frame count is 0.
+    return Smi::FromInt(0);
+  }
+  for (JavaScriptFrameIterator it(id); !it.done(); it.Advance()) n++;
+  return Smi::FromInt(n);
+}
+
+
+static const int kFrameDetailsFrameIdIndex = 0;
+static const int kFrameDetailsReceiverIndex = 1;
+static const int kFrameDetailsFunctionIndex = 2;
+static const int kFrameDetailsArgumentCountIndex = 3;
+static const int kFrameDetailsLocalCountIndex = 4;
+static const int kFrameDetailsSourcePositionIndex = 5;
+static const int kFrameDetailsConstructCallIndex = 6;
+static const int kFrameDetailsDebuggerFrameIndex = 7;
+static const int kFrameDetailsFirstDynamicIndex = 8;
+
+// Return an array with frame details
+// args[0]: number: break id
+// args[1]: number: frame index
+//
+// The array returned contains the following information:
+// 0: Frame id
+// 1: Receiver
+// 2: Function
+// 3: Argument count
+// 4: Local count
+// 5: Source position
+// 6: Constructor call
+// 7: Debugger frame
+// Arguments name, value
+// Locals name, value
+static Object* Runtime_GetFrameDetails(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+
+  // Check arguments.
+  Object* check = Runtime_CheckExecutionState(args);
+  if (check->IsFailure()) return check;
+  CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+
+  // Find the relevant frame with the requested index.
+  StackFrame::Id id = Debug::break_frame_id();
+  if (id == StackFrame::NO_ID) {
+    // If there are no JavaScript stack frames return undefined.
+    return Heap::undefined_value();
+  }
+  int count = 0;
+  JavaScriptFrameIterator it(id);
+  for (; !it.done(); it.Advance()) {
+    if (count == index) break;
+    count++;
+  }
+  if (it.done()) return Heap::undefined_value();
+
+  // Traverse the saved contexts chain to find the active context for the
+  // selected frame.
+  SaveContext* save = Top::save_context();
+  while (save != NULL && !save->below(it.frame())) {
+    save = save->prev();
+  }
+  ASSERT(save != NULL);
+
+  // Get the frame id.
+  Handle<Object> frame_id(WrapFrameId(it.frame()->id()));
+
+  // Find source position.
+  int position = it.frame()->code()->SourcePosition(it.frame()->pc());
+
+  // Check for constructor frame.
+  bool constructor = it.frame()->IsConstructor();
+
+  // Get code and read scope info from it for local variable information.
+  Handle<Code> code(it.frame()->code());
+  ScopeInfo<> info(*code);
+
+  // Get the context.
+  Handle<Context> context(Context::cast(it.frame()->context()));
+
+  // Get the locals names and values into a temporary array.
+  //
+  // TODO(1240907): Hide compiler-introduced stack variables
+  // (e.g. .result)?  For users of the debugger, they will probably be
+  // confusing.
+  Handle<FixedArray> locals = Factory::NewFixedArray(info.NumberOfLocals() * 2);
+  for (int i = 0; i < info.NumberOfLocals(); i++) {
+    // Name of the local.
+    locals->set(i * 2, *info.LocalName(i));
+
+    // Fetch the value of the local - either from the stack or from a
+    // heap-allocated context.
+    if (i < info.number_of_stack_slots()) {
+      locals->set(i * 2 + 1, it.frame()->GetExpression(i));
+    } else {
+      Handle<String> name = info.LocalName(i);
+      // Traverse the context chain to the function context as all local
+      // variables stored in the context will be on the function context.
+      while (!context->is_function_context()) {
+        context = Handle<Context>(context->previous());
+      }
+      ASSERT(context->is_function_context());
+      locals->set(i * 2 + 1,
+                  context->get(ScopeInfo<>::ContextSlotIndex(*code, *name,
+                                                             NULL)));
+    }
+  }
+
+  // Now advance to the arguments adapter frame (if any). If contains all
+  // the provided parameters and
+
+  // Now advance to the arguments adapter frame (if any). It contains all
+  // the provided parameters whereas the function frame always have the number
+  // of arguments matching the functions parameters. The rest of the
+  // information (except for what is collected above) is the same.
+  it.AdvanceToArgumentsFrame();
+
+  // Find the number of arguments to fill. At least fill the number of
+  // parameters for the function and fill more if more parameters are provided.
+  int argument_count = info.number_of_parameters();
+  if (argument_count < it.frame()->GetProvidedParametersCount()) {
+    argument_count = it.frame()->GetProvidedParametersCount();
+  }
+
+  // Calculate the size of the result.
+  int details_size = kFrameDetailsFirstDynamicIndex +
+                     2 * (argument_count + info.NumberOfLocals());
+  Handle<FixedArray> details = Factory::NewFixedArray(details_size);
+
+  // Add the frame id.
+  details->set(kFrameDetailsFrameIdIndex, *frame_id);
+
+  // Add the function (same as in function frame).
+  details->set(kFrameDetailsFunctionIndex, it.frame()->function());
+
+  // Add the arguments count.
+  details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count));
+
+  // Add the locals count
+  details->set(kFrameDetailsLocalCountIndex,
+               Smi::FromInt(info.NumberOfLocals()));
+
+  // Add the source position.
+  if (position != RelocInfo::kNoPosition) {
+    details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
+  } else {
+    details->set(kFrameDetailsSourcePositionIndex, Heap::undefined_value());
+  }
+
+  // Add the constructor information.
+  details->set(kFrameDetailsConstructCallIndex, Heap::ToBoolean(constructor));
+
+  // Add information on whether this frame is invoked in the debugger context.
+  details->set(kFrameDetailsDebuggerFrameIndex,
+               Heap::ToBoolean(*save->context() == *Debug::debug_context()));
+
+  // Fill the dynamic part.
+  int details_index = kFrameDetailsFirstDynamicIndex;
+
+  // Add arguments name and value.
+  for (int i = 0; i < argument_count; i++) {
+    // Name of the argument.
+    if (i < info.number_of_parameters()) {
+      details->set(details_index++, *info.parameter_name(i));
+    } else {
+      details->set(details_index++, Heap::undefined_value());
+    }
+
+    // Parameter value.
+    if (i < it.frame()->GetProvidedParametersCount()) {
+      details->set(details_index++, it.frame()->GetParameter(i));
+    } else {
+      details->set(details_index++, Heap::undefined_value());
+    }
+  }
+
+  // Add locals name and value from the temporary copy from the function frame.
+  for (int i = 0; i < info.NumberOfLocals() * 2; i++) {
+    details->set(details_index++, locals->get(i));
+  }
+
+  // Add the receiver (same as in function frame).
+  // THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
+  // THE FRAME ITERATOR TO WRAP THE RECEIVER.
+  Handle<Object> receiver(it.frame()->receiver());
+  if (!receiver->IsJSObject()) {
+    // If the receiver is NOT a JSObject we have hit an optimization
+    // where a value object is not converted into a wrapped JS objects.
+    // To hide this optimization from the debugger, we wrap the receiver
+    // by creating correct wrapper object based on the calling frame's
+    // global context.
+    it.Advance();
+    Handle<Context> calling_frames_global_context(
+        Context::cast(Context::cast(it.frame()->context())->global_context()));
+    receiver = Factory::ToObject(receiver, calling_frames_global_context);
+  }
+  details->set(kFrameDetailsReceiverIndex, *receiver);
+
+  ASSERT_EQ(details_size, details_index);
+  return *Factory::NewJSArrayWithElements(details);
+}
+
+
+// Copy all the context locals into an object used to materialize a scope.
+static void CopyContextLocalsToScopeObject(Handle<Code> code,
+                                           ScopeInfo<>& scope_info,
+                                           Handle<Context> context,
+                                           Handle<JSObject> scope_object) {
+  // Fill all context locals to the context extension.
+  for (int i = Context::MIN_CONTEXT_SLOTS;
+       i < scope_info.number_of_context_slots();
+       i++) {
+    int context_index =
+        ScopeInfo<>::ContextSlotIndex(*code,
+                                      *scope_info.context_slot_name(i),
+                                      NULL);
+
+    // Don't include the arguments shadow (.arguments) context variable.
+    if (*scope_info.context_slot_name(i) != Heap::arguments_shadow_symbol()) {
+      SetProperty(scope_object,
+                  scope_info.context_slot_name(i),
+                  Handle<Object>(context->get(context_index)), NONE);
+    }
+  }
+}
+
+
+// Create a plain JSObject which materializes the local scope for the specified
+// frame.
+static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
+  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<Code> code(function->code());
+  ScopeInfo<> scope_info(*code);
+
+  // Allocate and initialize a JSObject with all the arguments, stack locals
+  // heap locals and extension properties of the debugged function.
+  Handle<JSObject> local_scope = Factory::NewJSObject(Top::object_function());
+
+  // First fill all parameters.
+  for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
+    SetProperty(local_scope,
+                scope_info.parameter_name(i),
+                Handle<Object>(frame->GetParameter(i)), NONE);
+  }
+
+  // Second fill all stack locals.
+  for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
+    SetProperty(local_scope,
+                scope_info.stack_slot_name(i),
+                Handle<Object>(frame->GetExpression(i)), NONE);
+  }
+
+  // Third fill all context locals.
+  Handle<Context> frame_context(Context::cast(frame->context()));
+  Handle<Context> function_context(frame_context->fcontext());
+  CopyContextLocalsToScopeObject(code, scope_info,
+                                 function_context, local_scope);
+
+  // Finally copy any properties from the function context extension. This will
+  // be variables introduced by eval.
+  if (function_context->closure() == *function) {
+    if (function_context->has_extension() &&
+        !function_context->IsGlobalContext()) {
+      Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+      Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
+      for (int i = 0; i < keys->length(); i++) {
+        // Names of variables introduced by eval are strings.
+        ASSERT(keys->get(i)->IsString());
+        Handle<String> key(String::cast(keys->get(i)));
+        SetProperty(local_scope, key, GetProperty(ext, key), NONE);
+      }
+    }
+  }
+  return local_scope;
+}
+
+
+// Create a plain JSObject which materializes the closure content for the
+// context.
+static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
+  ASSERT(context->is_function_context());
+
+  Handle<Code> code(context->closure()->code());
+  ScopeInfo<> scope_info(*code);
+
+  // Allocate and initialize a JSObject with all the content of theis function
+  // closure.
+  Handle<JSObject> closure_scope = Factory::NewJSObject(Top::object_function());
+
+  // Check whether the arguments shadow object exists.
+  int arguments_shadow_index =
+      ScopeInfo<>::ContextSlotIndex(*code,
+                                    Heap::arguments_shadow_symbol(),
+                                    NULL);
+  if (arguments_shadow_index >= 0) {
+    // In this case all the arguments are available in the arguments shadow
+    // object.
+    Handle<JSObject> arguments_shadow(
+        JSObject::cast(context->get(arguments_shadow_index)));
+    for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
+      SetProperty(closure_scope,
+                  scope_info.parameter_name(i),
+                  Handle<Object>(arguments_shadow->GetElement(i)), NONE);
+    }
+  }
+
+  // Fill all context locals to the context extension.
+  CopyContextLocalsToScopeObject(code, scope_info, context, closure_scope);
+
+  // Finally copy any properties from the function context extension. This will
+  // be variables introduced by eval.
+  if (context->has_extension()) {
+    Handle<JSObject> ext(JSObject::cast(context->extension()));
+    Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
+    for (int i = 0; i < keys->length(); i++) {
+      // Names of variables introduced by eval are strings.
+      ASSERT(keys->get(i)->IsString());
+      Handle<String> key(String::cast(keys->get(i)));
+      SetProperty(closure_scope, key, GetProperty(ext, key), NONE);
+    }
+  }
+
+  return closure_scope;
+}
+
+
+// Iterate over the actual scopes visible from a stack frame. All scopes are
+// backed by an actual context except the local scope, which is inserted
+// "artifically" in the context chain.
+class ScopeIterator {
+ public:
+  enum ScopeType {
+    ScopeTypeGlobal = 0,
+    ScopeTypeLocal,
+    ScopeTypeWith,
+    ScopeTypeClosure,
+    // Every catch block contains an implicit with block (its parameter is
+    // a JSContextExtensionObject) that extends current scope with a variable
+    // holding exception object. Such with blocks are treated as scopes of their
+    // own type.
+    ScopeTypeCatch
+  };
+
+  explicit ScopeIterator(JavaScriptFrame* frame)
+    : frame_(frame),
+      function_(JSFunction::cast(frame->function())),
+      context_(Context::cast(frame->context())),
+      local_done_(false),
+      at_local_(false) {
+
+    // Check whether the first scope is actually a local scope.
+    if (context_->IsGlobalContext()) {
+      // If there is a stack slot for .result then this local scope has been
+      // created for evaluating top level code and it is not a real local scope.
+      // Checking for the existence of .result seems fragile, but the scope info
+      // saved with the code object does not otherwise have that information.
+      Handle<Code> code(function_->code());
+      int index = ScopeInfo<>::StackSlotIndex(*code, Heap::result_symbol());
+      at_local_ = index < 0;
+    } else if (context_->is_function_context()) {
+      at_local_ = true;
+    }
+  }
+
+  // More scopes?
+  bool Done() { return context_.is_null(); }
+
+  // Move to the next scope.
+  void Next() {
+    // If at a local scope mark the local scope as passed.
+    if (at_local_) {
+      at_local_ = false;
+      local_done_ = true;
+
+      // If the current context is not associated with the local scope the
+      // current context is the next real scope, so don't move to the next
+      // context in this case.
+      if (context_->closure() != *function_) {
+        return;
+      }
+    }
+
+    // The global scope is always the last in the chain.
+    if (context_->IsGlobalContext()) {
+      context_ = Handle<Context>();
+      return;
+    }
+
+    // Move to the next context.
+    if (context_->is_function_context()) {
+      context_ = Handle<Context>(Context::cast(context_->closure()->context()));
+    } else {
+      context_ = Handle<Context>(context_->previous());
+    }
+
+    // If passing the local scope indicate that the current scope is now the
+    // local scope.
+    if (!local_done_ &&
+        (context_->IsGlobalContext() || (context_->is_function_context()))) {
+      at_local_ = true;
+    }
+  }
+
+  // Return the type of the current scope.
+  int Type() {
+    if (at_local_) {
+      return ScopeTypeLocal;
+    }
+    if (context_->IsGlobalContext()) {
+      ASSERT(context_->global()->IsGlobalObject());
+      return ScopeTypeGlobal;
+    }
+    if (context_->is_function_context()) {
+      return ScopeTypeClosure;
+    }
+    ASSERT(context_->has_extension());
+    // Current scope is either an explicit with statement or a with statement
+    // implicitely generated for a catch block.
+    // If the extension object here is a JSContextExtensionObject then
+    // current with statement is one frome a catch block otherwise it's a
+    // regular with statement.
+    if (context_->extension()->IsJSContextExtensionObject()) {
+      return ScopeTypeCatch;
+    }
+    return ScopeTypeWith;
+  }
+
+  // Return the JavaScript object with the content of the current scope.
+  Handle<JSObject> ScopeObject() {
+    switch (Type()) {
+      case ScopeIterator::ScopeTypeGlobal:
+        return Handle<JSObject>(CurrentContext()->global());
+        break;
+      case ScopeIterator::ScopeTypeLocal:
+        // Materialize the content of the local scope into a JSObject.
+        return MaterializeLocalScope(frame_);
+        break;
+      case ScopeIterator::ScopeTypeWith:
+      case ScopeIterator::ScopeTypeCatch:
+        // Return the with object.
+        return Handle<JSObject>(CurrentContext()->extension());
+        break;
+      case ScopeIterator::ScopeTypeClosure:
+        // Materialize the content of the closure scope into a JSObject.
+        return MaterializeClosure(CurrentContext());
+        break;
+    }
+    UNREACHABLE();
+    return Handle<JSObject>();
+  }
+
+  // Return the context for this scope. For the local context there might not
+  // be an actual context.
+  Handle<Context> CurrentContext() {
+    if (at_local_ && context_->closure() != *function_) {
+      return Handle<Context>();
+    }
+    return context_;
+  }
+
+#ifdef DEBUG
+  // Debug print of the content of the current scope.
+  void DebugPrint() {
+    switch (Type()) {
+      case ScopeIterator::ScopeTypeGlobal:
+        PrintF("Global:\n");
+        CurrentContext()->Print();
+        break;
+
+      case ScopeIterator::ScopeTypeLocal: {
+        PrintF("Local:\n");
+        Handle<Code> code(function_->code());
+        ScopeInfo<> scope_info(*code);
+        scope_info.Print();
+        if (!CurrentContext().is_null()) {
+          CurrentContext()->Print();
+          if (CurrentContext()->has_extension()) {
+            Handle<JSObject> extension =
+                Handle<JSObject>(CurrentContext()->extension());
+            if (extension->IsJSContextExtensionObject()) {
+              extension->Print();
+            }
+          }
+        }
+        break;
+      }
+
+      case ScopeIterator::ScopeTypeWith: {
+        PrintF("With:\n");
+        Handle<JSObject> extension =
+            Handle<JSObject>(CurrentContext()->extension());
+        extension->Print();
+        break;
+      }
+
+      case ScopeIterator::ScopeTypeCatch: {
+        PrintF("Catch:\n");
+        Handle<JSObject> extension =
+            Handle<JSObject>(CurrentContext()->extension());
+        extension->Print();
+        break;
+      }
+
+      case ScopeIterator::ScopeTypeClosure: {
+        PrintF("Closure:\n");
+        CurrentContext()->Print();
+        if (CurrentContext()->has_extension()) {
+          Handle<JSObject> extension =
+              Handle<JSObject>(CurrentContext()->extension());
+          if (extension->IsJSContextExtensionObject()) {
+            extension->Print();
+          }
+        }
+        break;
+      }
+
+      default:
+        UNREACHABLE();
+    }
+    PrintF("\n");
+  }
+#endif
+
+ private:
+  JavaScriptFrame* frame_;
+  Handle<JSFunction> function_;
+  Handle<Context> context_;
+  bool local_done_;
+  bool at_local_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
+};
+
+
+static Object* Runtime_GetScopeCount(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+
+  // Check arguments.
+  Object* check = Runtime_CheckExecutionState(args);
+  if (check->IsFailure()) return check;
+  CONVERT_CHECKED(Smi, wrapped_id, args[1]);
+
+  // Get the frame where the debugging is performed.
+  StackFrame::Id id = UnwrapFrameId(wrapped_id);
+  JavaScriptFrameIterator it(id);
+  JavaScriptFrame* frame = it.frame();
+
+  // Count the visible scopes.
+  int n = 0;
+  for (ScopeIterator it(frame); !it.Done(); it.Next()) {
+    n++;
+  }
+
+  return Smi::FromInt(n);
+}
+
+
+static const int kScopeDetailsTypeIndex = 0;
+static const int kScopeDetailsObjectIndex = 1;
+static const int kScopeDetailsSize = 2;
+
+// Return an array with scope details
+// args[0]: number: break id
+// args[1]: number: frame index
+// args[2]: number: scope index
+//
+// The array returned contains the following information:
+// 0: Scope type
+// 1: Scope object
+static Object* Runtime_GetScopeDetails(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+
+  // Check arguments.
+  Object* check = Runtime_CheckExecutionState(args);
+  if (check->IsFailure()) return check;
+  CONVERT_CHECKED(Smi, wrapped_id, args[1]);
+  CONVERT_NUMBER_CHECKED(int, index, Int32, args[2]);
+
+  // Get the frame where the debugging is performed.
+  StackFrame::Id id = UnwrapFrameId(wrapped_id);
+  JavaScriptFrameIterator frame_it(id);
+  JavaScriptFrame* frame = frame_it.frame();
+
+  // Find the requested scope.
+  int n = 0;
+  ScopeIterator it(frame);
+  for (; !it.Done() && n < index; it.Next()) {
+    n++;
+  }
+  if (it.Done()) {
+    return Heap::undefined_value();
+  }
+
+  // Calculate the size of the result.
+  int details_size = kScopeDetailsSize;
+  Handle<FixedArray> details = Factory::NewFixedArray(details_size);
+
+  // Fill in scope details.
+  details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
+  details->set(kScopeDetailsObjectIndex, *it.ScopeObject());
+
+  return *Factory::NewJSArrayWithElements(details);
+}
+
+
+static Object* Runtime_DebugPrintScopes(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 0);
+
+#ifdef DEBUG
+  // Print the scopes for the top frame.
+  StackFrameLocator locator;
+  JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
+  for (ScopeIterator it(frame); !it.Done(); it.Next()) {
+    it.DebugPrint();
+  }
+#endif
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_GetCFrames(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  Object* result = Runtime_CheckExecutionState(args);
+  if (result->IsFailure()) return result;
+
+#if V8_HOST_ARCH_64_BIT
+  UNIMPLEMENTED();
+  return Heap::undefined_value();
+#else
+
+  static const int kMaxCFramesSize = 200;
+  ScopedVector<OS::StackFrame> frames(kMaxCFramesSize);
+  int frames_count = OS::StackWalk(frames);
+  if (frames_count == OS::kStackWalkError) {
+    return Heap::undefined_value();
+  }
+
+  Handle<String> address_str = Factory::LookupAsciiSymbol("address");
+  Handle<String> text_str = Factory::LookupAsciiSymbol("text");
+  Handle<FixedArray> frames_array = Factory::NewFixedArray(frames_count);
+  for (int i = 0; i < frames_count; i++) {
+    Handle<JSObject> frame_value = Factory::NewJSObject(Top::object_function());
+    frame_value->SetProperty(
+        *address_str,
+        *Factory::NewNumberFromInt(reinterpret_cast<int>(frames[i].address)),
+        NONE);
+
+    // Get the stack walk text for this frame.
+    Handle<String> frame_text;
+    if (strlen(frames[i].text) > 0) {
+      Vector<const char> str(frames[i].text, strlen(frames[i].text));
+      frame_text = Factory::NewStringFromAscii(str);
+    }
+
+    if (!frame_text.is_null()) {
+      frame_value->SetProperty(*text_str, *frame_text, NONE);
+    }
+
+    frames_array->set(i, *frame_value);
+  }
+  return *Factory::NewJSArrayWithElements(frames_array);
+#endif  // V8_HOST_ARCH_64_BIT
+}
+
+
+static Object* Runtime_GetThreadCount(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+
+  // Check arguments.
+  Object* result = Runtime_CheckExecutionState(args);
+  if (result->IsFailure()) return result;
+
+  // Count all archived V8 threads.
+  int n = 0;
+  for (ThreadState* thread = ThreadState::FirstInUse();
+       thread != NULL;
+       thread = thread->Next()) {
+    n++;
+  }
+
+  // Total number of threads is current thread and archived threads.
+  return Smi::FromInt(n + 1);
+}
+
+
+static const int kThreadDetailsCurrentThreadIndex = 0;
+static const int kThreadDetailsThreadIdIndex = 1;
+static const int kThreadDetailsSize = 2;
+
+// Return an array with thread details
+// args[0]: number: break id
+// args[1]: number: thread index
+//
+// The array returned contains the following information:
+// 0: Is current thread?
+// 1: Thread id
+static Object* Runtime_GetThreadDetails(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+
+  // Check arguments.
+  Object* check = Runtime_CheckExecutionState(args);
+  if (check->IsFailure()) return check;
+  CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+
+  // Allocate array for result.
+  Handle<FixedArray> details = Factory::NewFixedArray(kThreadDetailsSize);
+
+  // Thread index 0 is current thread.
+  if (index == 0) {
+    // Fill the details.
+    details->set(kThreadDetailsCurrentThreadIndex, Heap::true_value());
+    details->set(kThreadDetailsThreadIdIndex,
+                 Smi::FromInt(ThreadManager::CurrentId()));
+  } else {
+    // Find the thread with the requested index.
+    int n = 1;
+    ThreadState* thread = ThreadState::FirstInUse();
+    while (index != n && thread != NULL) {
+      thread = thread->Next();
+      n++;
+    }
+    if (thread == NULL) {
+      return Heap::undefined_value();
+    }
+
+    // Fill the details.
+    details->set(kThreadDetailsCurrentThreadIndex, Heap::false_value());
+    details->set(kThreadDetailsThreadIdIndex, Smi::FromInt(thread->id()));
+  }
+
+  // Convert to JS array and return.
+  return *Factory::NewJSArrayWithElements(details);
+}
+
+
+static Object* Runtime_GetBreakLocations(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+
+  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+  Handle<SharedFunctionInfo> shared(fun->shared());
+  // Find the number of break points
+  Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
+  if (break_locations->IsUndefined()) return Heap::undefined_value();
+  // Return array as JS array
+  return *Factory::NewJSArrayWithElements(
+      Handle<FixedArray>::cast(break_locations));
+}
+
+
+// Set a break point in a function
+// args[0]: function
+// args[1]: number: break source position (within the function source)
+// args[2]: number: break point object
+static Object* Runtime_SetFunctionBreakPoint(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+  Handle<SharedFunctionInfo> shared(fun->shared());
+  CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
+  RUNTIME_ASSERT(source_position >= 0);
+  Handle<Object> break_point_object_arg = args.at<Object>(2);
+
+  // Set break point.
+  Debug::SetBreakPoint(shared, source_position, break_point_object_arg);
+
+  return Heap::undefined_value();
+}
+
+
+Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
+                                                int position) {
+  // Iterate the heap looking for SharedFunctionInfo generated from the
+  // script. The inner most SharedFunctionInfo containing the source position
+  // for the requested break point is found.
+  // NOTE: This might reqire several heap iterations. If the SharedFunctionInfo
+  // which is found is not compiled it is compiled and the heap is iterated
+  // again as the compilation might create inner functions from the newly
+  // compiled function and the actual requested break point might be in one of
+  // these functions.
+  bool done = false;
+  // The current candidate for the source position:
+  int target_start_position = RelocInfo::kNoPosition;
+  Handle<SharedFunctionInfo> target;
+  // The current candidate for the last function in script:
+  Handle<SharedFunctionInfo> last;
+  while (!done) {
+    HeapIterator iterator;
+    while (iterator.has_next()) {
+      HeapObject* obj = iterator.next();
+      ASSERT(obj != NULL);
+      if (obj->IsSharedFunctionInfo()) {
+        Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
+        if (shared->script() == *script) {
+          // If the SharedFunctionInfo found has the requested script data and
+          // contains the source position it is a candidate.
+          int start_position = shared->function_token_position();
+          if (start_position == RelocInfo::kNoPosition) {
+            start_position = shared->start_position();
+          }
+          if (start_position <= position &&
+              position <= shared->end_position()) {
+            // If there is no candidate or this function is within the current
+            // candidate this is the new candidate.
+            if (target.is_null()) {
+              target_start_position = start_position;
+              target = shared;
+            } else {
+              if (target_start_position == start_position &&
+                  shared->end_position() == target->end_position()) {
+                  // If a top-level function contain only one function
+                  // declartion the source for the top-level and the function is
+                  // the same. In that case prefer the non top-level function.
+                if (!shared->is_toplevel()) {
+                  target_start_position = start_position;
+                  target = shared;
+                }
+              } else if (target_start_position <= start_position &&
+                         shared->end_position() <= target->end_position()) {
+                // This containment check includes equality as a function inside
+                // a top-level function can share either start or end position
+                // with the top-level function.
+                target_start_position = start_position;
+                target = shared;
+              }
+            }
+          }
+
+          // Keep track of the last function in the script.
+          if (last.is_null() ||
+              shared->end_position() > last->start_position()) {
+            last = shared;
+          }
+        }
+      }
+    }
+
+    // Make sure some candidate is selected.
+    if (target.is_null()) {
+      if (!last.is_null()) {
+        // Position after the last function - use last.
+        target = last;
+      } else {
+        // Unable to find function - possibly script without any function.
+        return Heap::undefined_value();
+      }
+    }
+
+    // If the candidate found is compiled we are done. NOTE: when lazy
+    // compilation of inner functions is introduced some additional checking
+    // needs to be done here to compile inner functions.
+    done = target->is_compiled();
+    if (!done) {
+      // If the candidate is not compiled compile it to reveal any inner
+      // functions which might contain the requested source position.
+      CompileLazyShared(target, KEEP_EXCEPTION, 0);
+    }
+  }
+
+  return *target;
+}
+
+
+// Change the state of a break point in a script. NOTE: Regarding performance
+// see the NOTE for GetScriptFromScriptData.
+// args[0]: script to set break point in
+// args[1]: number: break source position (within the script source)
+// args[2]: number: break point object
+static Object* Runtime_SetScriptBreakPoint(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(JSValue, wrapper, 0);
+  CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
+  RUNTIME_ASSERT(source_position >= 0);
+  Handle<Object> break_point_object_arg = args.at<Object>(2);
+
+  // Get the script from the script wrapper.
+  RUNTIME_ASSERT(wrapper->value()->IsScript());
+  Handle<Script> script(Script::cast(wrapper->value()));
+
+  Object* result = Runtime::FindSharedFunctionInfoInScript(
+      script, source_position);
+  if (!result->IsUndefined()) {
+    Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
+    // Find position within function. The script position might be before the
+    // source position of the first function.
+    int position;
+    if (shared->start_position() > source_position) {
+      position = 0;
+    } else {
+      position = source_position - shared->start_position();
+    }
+    Debug::SetBreakPoint(shared, position, break_point_object_arg);
+  }
+  return  Heap::undefined_value();
+}
+
+
+// Clear a break point
+// args[0]: number: break point object
+static Object* Runtime_ClearBreakPoint(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  Handle<Object> break_point_object_arg = args.at<Object>(0);
+
+  // Clear break point.
+  Debug::ClearBreakPoint(break_point_object_arg);
+
+  return Heap::undefined_value();
+}
+
+
+// Change the state of break on exceptions
+// args[0]: boolean indicating uncaught exceptions
+// args[1]: boolean indicating on/off
+static Object* Runtime_ChangeBreakOnException(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+  ASSERT(args[0]->IsNumber());
+  ASSERT(args[1]->IsBoolean());
+
+  // Update break point state
+  ExceptionBreakType type =
+      static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
+  bool enable = args[1]->ToBoolean()->IsTrue();
+  Debug::ChangeBreakOnException(type, enable);
+  return Heap::undefined_value();
+}
+
+
+// Prepare for stepping
+// args[0]: break id for checking execution state
+// args[1]: step action from the enumeration StepAction
+// args[2]: number of times to perform the step, for step out it is the number
+//          of frames to step down.
+static Object* Runtime_PrepareStep(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  // Check arguments.
+  Object* check = Runtime_CheckExecutionState(args);
+  if (check->IsFailure()) return check;
+  if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
+    return Top::Throw(Heap::illegal_argument_symbol());
+  }
+
+  // Get the step action and check validity.
+  StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
+  if (step_action != StepIn &&
+      step_action != StepNext &&
+      step_action != StepOut &&
+      step_action != StepInMin &&
+      step_action != StepMin) {
+    return Top::Throw(Heap::illegal_argument_symbol());
+  }
+
+  // Get the number of steps.
+  int step_count = NumberToInt32(args[2]);
+  if (step_count < 1) {
+    return Top::Throw(Heap::illegal_argument_symbol());
+  }
+
+  // Clear all current stepping setup.
+  Debug::ClearStepping();
+
+  // Prepare step.
+  Debug::PrepareStep(static_cast<StepAction>(step_action), step_count);
+  return Heap::undefined_value();
+}
+
+
+// Clear all stepping set by PrepareStep.
+static Object* Runtime_ClearStepping(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 0);
+  Debug::ClearStepping();
+  return Heap::undefined_value();
+}
+
+
+// Creates a copy of the with context chain. The copy of the context chain is
+// is linked to the function context supplied.
+static Handle<Context> CopyWithContextChain(Handle<Context> context_chain,
+                                            Handle<Context> function_context) {
+  // At the bottom of the chain. Return the function context to link to.
+  if (context_chain->is_function_context()) {
+    return function_context;
+  }
+
+  // Recursively copy the with contexts.
+  Handle<Context> previous(context_chain->previous());
+  Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
+  return Factory::NewWithContext(
+      CopyWithContextChain(function_context, previous),
+      extension,
+      context_chain->IsCatchContext());
+}
+
+
+// Helper function to find or create the arguments object for
+// Runtime_DebugEvaluate.
+static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
+                                         Handle<JSFunction> function,
+                                         Handle<Code> code,
+                                         const ScopeInfo<>* sinfo,
+                                         Handle<Context> function_context) {
+  // Try to find the value of 'arguments' to pass as parameter. If it is not
+  // found (that is the debugged function does not reference 'arguments' and
+  // does not support eval) then create an 'arguments' object.
+  int index;
+  if (sinfo->number_of_stack_slots() > 0) {
+    index = ScopeInfo<>::StackSlotIndex(*code, Heap::arguments_symbol());
+    if (index != -1) {
+      return Handle<Object>(frame->GetExpression(index));
+    }
+  }
+
+  if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+    index = ScopeInfo<>::ContextSlotIndex(*code, Heap::arguments_symbol(),
+                                          NULL);
+    if (index != -1) {
+      return Handle<Object>(function_context->get(index));
+    }
+  }
+
+  const int length = frame->GetProvidedParametersCount();
+  Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
+  Handle<FixedArray> array = Factory::NewFixedArray(length);
+  WriteBarrierMode mode = array->GetWriteBarrierMode();
+  for (int i = 0; i < length; i++) {
+    array->set(i, frame->GetParameter(i), mode);
+  }
+  arguments->set_elements(*array);
+  return arguments;
+}
+
+
+// Evaluate a piece of JavaScript in the context of a stack frame for
+// debugging. This is accomplished by creating a new context which in its
+// extension part has all the parameters and locals of the function on the
+// stack frame. A function which calls eval with the code to evaluate is then
+// compiled in this context and called in this context. As this context
+// replaces the context of the function on the stack frame a new (empty)
+// function is created as well to be used as the closure for the context.
+// This function and the context acts as replacements for the function on the
+// stack frame presenting the same view of the values of parameters and
+// local variables as if the piece of JavaScript was evaluated at the point
+// where the function on the stack frame is currently stopped.
+static Object* Runtime_DebugEvaluate(Arguments args) {
+  HandleScope scope;
+
+  // Check the execution state and decode arguments frame and source to be
+  // evaluated.
+  ASSERT(args.length() == 4);
+  Object* check_result = Runtime_CheckExecutionState(args);
+  if (check_result->IsFailure()) return check_result;
+  CONVERT_CHECKED(Smi, wrapped_id, args[1]);
+  CONVERT_ARG_CHECKED(String, source, 2);
+  CONVERT_BOOLEAN_CHECKED(disable_break, args[3]);
+
+  // Handle the processing of break.
+  DisableBreak disable_break_save(disable_break);
+
+  // Get the frame where the debugging is performed.
+  StackFrame::Id id = UnwrapFrameId(wrapped_id);
+  JavaScriptFrameIterator it(id);
+  JavaScriptFrame* frame = it.frame();
+  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<Code> code(function->code());
+  ScopeInfo<> sinfo(*code);
+
+  // Traverse the saved contexts chain to find the active context for the
+  // selected frame.
+  SaveContext* save = Top::save_context();
+  while (save != NULL && !save->below(frame)) {
+    save = save->prev();
+  }
+  ASSERT(save != NULL);
+  SaveContext savex;
+  Top::set_context(*(save->context()));
+
+  // Create the (empty) function replacing the function on the stack frame for
+  // the purpose of evaluating in the context created below. It is important
+  // that this function does not describe any parameters and local variables
+  // in the context. If it does then this will cause problems with the lookup
+  // in Context::Lookup, where context slots for parameters and local variables
+  // are looked at before the extension object.
+  Handle<JSFunction> go_between =
+      Factory::NewFunction(Factory::empty_string(), Factory::undefined_value());
+  go_between->set_context(function->context());
+#ifdef DEBUG
+  ScopeInfo<> go_between_sinfo(go_between->shared()->code());
+  ASSERT(go_between_sinfo.number_of_parameters() == 0);
+  ASSERT(go_between_sinfo.number_of_context_slots() == 0);
+#endif
+
+  // Materialize the content of the local scope into a JSObject.
+  Handle<JSObject> local_scope = MaterializeLocalScope(frame);
+
+  // Allocate a new context for the debug evaluation and set the extension
+  // object build.
+  Handle<Context> context =
+      Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
+  context->set_extension(*local_scope);
+  // Copy any with contexts present and chain them in front of this context.
+  Handle<Context> frame_context(Context::cast(frame->context()));
+  Handle<Context> function_context(frame_context->fcontext());
+  context = CopyWithContextChain(frame_context, context);
+
+  // Wrap the evaluation statement in a new function compiled in the newly
+  // created context. The function has one parameter which has to be called
+  // 'arguments'. This it to have access to what would have been 'arguments' in
+  // the function being debugged.
+  // function(arguments,__source__) {return eval(__source__);}
+  static const char* source_str =
+      "(function(arguments,__source__){return eval(__source__);})";
+  static const int source_str_length = strlen(source_str);
+  Handle<String> function_source =
+      Factory::NewStringFromAscii(Vector<const char>(source_str,
+                                                     source_str_length));
+  Handle<JSFunction> boilerplate =
+      Compiler::CompileEval(function_source,
+                            context,
+                            context->IsGlobalContext(),
+                            Compiler::DONT_VALIDATE_JSON);
+  if (boilerplate.is_null()) return Failure::Exception();
+  Handle<JSFunction> compiled_function =
+      Factory::NewFunctionFromBoilerplate(boilerplate, context);
+
+  // Invoke the result of the compilation to get the evaluation function.
+  bool has_pending_exception;
+  Handle<Object> receiver(frame->receiver());
+  Handle<Object> evaluation_function =
+      Execution::Call(compiled_function, receiver, 0, NULL,
+                      &has_pending_exception);
+  if (has_pending_exception) return Failure::Exception();
+
+  Handle<Object> arguments = GetArgumentsObject(frame, function, code, &sinfo,
+                                                function_context);
+
+  // Invoke the evaluation function and return the result.
+  const int argc = 2;
+  Object** argv[argc] = { arguments.location(),
+                          Handle<Object>::cast(source).location() };
+  Handle<Object> result =
+      Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver,
+                      argc, argv, &has_pending_exception);
+  if (has_pending_exception) return Failure::Exception();
+
+  // Skip the global proxy as it has no properties and always delegates to the
+  // real global object.
+  if (result->IsJSGlobalProxy()) {
+    result = Handle<JSObject>(JSObject::cast(result->GetPrototype()));
+  }
+
+  return *result;
+}
+
+
+static Object* Runtime_DebugEvaluateGlobal(Arguments args) {
+  HandleScope scope;
+
+  // Check the execution state and decode arguments frame and source to be
+  // evaluated.
+  ASSERT(args.length() == 3);
+  Object* check_result = Runtime_CheckExecutionState(args);
+  if (check_result->IsFailure()) return check_result;
+  CONVERT_ARG_CHECKED(String, source, 1);
+  CONVERT_BOOLEAN_CHECKED(disable_break, args[2]);
+
+  // Handle the processing of break.
+  DisableBreak disable_break_save(disable_break);
+
+  // Enter the top context from before the debugger was invoked.
+  SaveContext save;
+  SaveContext* top = &save;
+  while (top != NULL && *top->context() == *Debug::debug_context()) {
+    top = top->prev();
+  }
+  if (top != NULL) {
+    Top::set_context(*top->context());
+  }
+
+  // Get the global context now set to the top context from before the
+  // debugger was invoked.
+  Handle<Context> context = Top::global_context();
+
+  // Compile the source to be evaluated.
+  Handle<JSFunction> boilerplate =
+      Handle<JSFunction>(Compiler::CompileEval(source,
+                                               context,
+                                               true,
+                                               Compiler::DONT_VALIDATE_JSON));
+  if (boilerplate.is_null()) return Failure::Exception();
+  Handle<JSFunction> compiled_function =
+      Handle<JSFunction>(Factory::NewFunctionFromBoilerplate(boilerplate,
+                                                             context));
+
+  // Invoke the result of the compilation to get the evaluation function.
+  bool has_pending_exception;
+  Handle<Object> receiver = Top::global();
+  Handle<Object> result =
+    Execution::Call(compiled_function, receiver, 0, NULL,
+                    &has_pending_exception);
+  if (has_pending_exception) return Failure::Exception();
+  return *result;
+}
+
+
+static Object* Runtime_DebugGetLoadedScripts(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 0);
+
+  // Fill the script objects.
+  Handle<FixedArray> instances = Debug::GetLoadedScripts();
+
+  // Convert the script objects to proper JS objects.
+  for (int i = 0; i < instances->length(); i++) {
+    Handle<Script> script = Handle<Script>(Script::cast(instances->get(i)));
+    // Get the script wrapper in a local handle before calling GetScriptWrapper,
+    // because using
+    //   instances->set(i, *GetScriptWrapper(script))
+    // is unsafe as GetScriptWrapper might call GC and the C++ compiler might
+    // already have deferenced the instances handle.
+    Handle<JSValue> wrapper = GetScriptWrapper(script);
+    instances->set(i, *wrapper);
+  }
+
+  // Return result as a JS array.
+  Handle<JSObject> result = Factory::NewJSObject(Top::array_function());
+  Handle<JSArray>::cast(result)->SetContent(*instances);
+  return *result;
+}
+
+
+// Helper function used by Runtime_DebugReferencedBy below.
+static int DebugReferencedBy(JSObject* target,
+                             Object* instance_filter, int max_references,
+                             FixedArray* instances, int instances_size,
+                             JSFunction* arguments_function) {
+  NoHandleAllocation ha;
+  AssertNoAllocation no_alloc;
+
+  // Iterate the heap.
+  int count = 0;
+  JSObject* last = NULL;
+  HeapIterator iterator;
+  while (iterator.has_next() &&
+         (max_references == 0 || count < max_references)) {
+    // Only look at all JSObjects.
+    HeapObject* heap_obj = iterator.next();
+    if (heap_obj->IsJSObject()) {
+      // Skip context extension objects and argument arrays as these are
+      // checked in the context of functions using them.
+      JSObject* obj = JSObject::cast(heap_obj);
+      if (obj->IsJSContextExtensionObject() ||
+          obj->map()->constructor() == arguments_function) {
+        continue;
+      }
+
+      // Check if the JS object has a reference to the object looked for.
+      if (obj->ReferencesObject(target)) {
+        // Check instance filter if supplied. This is normally used to avoid
+        // references from mirror objects (see Runtime_IsInPrototypeChain).
+        if (!instance_filter->IsUndefined()) {
+          Object* V = obj;
+          while (true) {
+            Object* prototype = V->GetPrototype();
+            if (prototype->IsNull()) {
+              break;
+            }
+            if (instance_filter == prototype) {
+              obj = NULL;  // Don't add this object.
+              break;
+            }
+            V = prototype;
+          }
+        }
+
+        if (obj != NULL) {
+          // Valid reference found add to instance array if supplied an update
+          // count.
+          if (instances != NULL && count < instances_size) {
+            instances->set(count, obj);
+          }
+          last = obj;
+          count++;
+        }
+      }
+    }
+  }
+
+  // Check for circular reference only. This can happen when the object is only
+  // referenced from mirrors and has a circular reference in which case the
+  // object is not really alive and would have been garbage collected if not
+  // referenced from the mirror.
+  if (count == 1 && last == target) {
+    count = 0;
+  }
+
+  // Return the number of referencing objects found.
+  return count;
+}
+
+
+// Scan the heap for objects with direct references to an object
+// args[0]: the object to find references to
+// args[1]: constructor function for instances to exclude (Mirror)
+// args[2]: the the maximum number of objects to return
+static Object* Runtime_DebugReferencedBy(Arguments args) {
+  ASSERT(args.length() == 3);
+
+  // First perform a full GC in order to avoid references from dead objects.
+  Heap::CollectAllGarbage(false);
+
+  // Check parameters.
+  CONVERT_CHECKED(JSObject, target, args[0]);
+  Object* instance_filter = args[1];
+  RUNTIME_ASSERT(instance_filter->IsUndefined() ||
+                 instance_filter->IsJSObject());
+  CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
+  RUNTIME_ASSERT(max_references >= 0);
+
+  // Get the constructor function for context extension and arguments array.
+  JSObject* arguments_boilerplate =
+      Top::context()->global_context()->arguments_boilerplate();
+  JSFunction* arguments_function =
+      JSFunction::cast(arguments_boilerplate->map()->constructor());
+
+  // Get the number of referencing objects.
+  int count;
+  count = DebugReferencedBy(target, instance_filter, max_references,
+                            NULL, 0, arguments_function);
+
+  // Allocate an array to hold the result.
+  Object* object = Heap::AllocateFixedArray(count);
+  if (object->IsFailure()) return object;
+  FixedArray* instances = FixedArray::cast(object);
+
+  // Fill the referencing objects.
+  count = DebugReferencedBy(target, instance_filter, max_references,
+                            instances, count, arguments_function);
+
+  // Return result as JS array.
+  Object* result =
+      Heap::AllocateJSObject(
+          Top::context()->global_context()->array_function());
+  if (!result->IsFailure()) JSArray::cast(result)->SetContent(instances);
+  return result;
+}
+
+
+// Helper function used by Runtime_DebugConstructedBy below.
+static int DebugConstructedBy(JSFunction* constructor, int max_references,
+                              FixedArray* instances, int instances_size) {
+  AssertNoAllocation no_alloc;
+
+  // Iterate the heap.
+  int count = 0;
+  HeapIterator iterator;
+  while (iterator.has_next() &&
+         (max_references == 0 || count < max_references)) {
+    // Only look at all JSObjects.
+    HeapObject* heap_obj = iterator.next();
+    if (heap_obj->IsJSObject()) {
+      JSObject* obj = JSObject::cast(heap_obj);
+      if (obj->map()->constructor() == constructor) {
+        // Valid reference found add to instance array if supplied an update
+        // count.
+        if (instances != NULL && count < instances_size) {
+          instances->set(count, obj);
+        }
+        count++;
+      }
+    }
+  }
+
+  // Return the number of referencing objects found.
+  return count;
+}
+
+
+// Scan the heap for objects constructed by a specific function.
+// args[0]: the constructor to find instances of
+// args[1]: the the maximum number of objects to return
+static Object* Runtime_DebugConstructedBy(Arguments args) {
+  ASSERT(args.length() == 2);
+
+  // First perform a full GC in order to avoid dead objects.
+  Heap::CollectAllGarbage(false);
+
+  // Check parameters.
+  CONVERT_CHECKED(JSFunction, constructor, args[0]);
+  CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
+  RUNTIME_ASSERT(max_references >= 0);
+
+  // Get the number of referencing objects.
+  int count;
+  count = DebugConstructedBy(constructor, max_references, NULL, 0);
+
+  // Allocate an array to hold the result.
+  Object* object = Heap::AllocateFixedArray(count);
+  if (object->IsFailure()) return object;
+  FixedArray* instances = FixedArray::cast(object);
+
+  // Fill the referencing objects.
+  count = DebugConstructedBy(constructor, max_references, instances, count);
+
+  // Return result as JS array.
+  Object* result =
+      Heap::AllocateJSObject(
+          Top::context()->global_context()->array_function());
+  if (!result->IsFailure()) JSArray::cast(result)->SetContent(instances);
+  return result;
+}
+
+
+// Find the effective prototype object as returned by __proto__.
+// args[0]: the object to find the prototype for.
+static Object* Runtime_DebugGetPrototype(Arguments args) {
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSObject, obj, args[0]);
+
+  // Use the __proto__ accessor.
+  return Accessors::ObjectPrototype.getter(obj, NULL);
+}
+
+
+static Object* Runtime_SystemBreak(Arguments args) {
+  ASSERT(args.length() == 0);
+  CPU::DebugBreak();
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_DebugDisassembleFunction(Arguments args) {
+#ifdef DEBUG
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  // Get the function and make sure it is compiled.
+  CONVERT_ARG_CHECKED(JSFunction, func, 0);
+  if (!func->is_compiled() && !CompileLazy(func, KEEP_EXCEPTION)) {
+    return Failure::Exception();
+  }
+  func->code()->PrintLn();
+#endif  // DEBUG
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_DebugDisassembleConstructor(Arguments args) {
+#ifdef DEBUG
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  // Get the function and make sure it is compiled.
+  CONVERT_ARG_CHECKED(JSFunction, func, 0);
+  if (!func->is_compiled() && !CompileLazy(func, KEEP_EXCEPTION)) {
+    return Failure::Exception();
+  }
+  func->shared()->construct_stub()->PrintLn();
+#endif  // DEBUG
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_FunctionGetInferredName(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  return f->shared()->inferred_name();
+}
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
+// Finds the script object from the script data. NOTE: This operation uses
+// heap traversal to find the function generated for the source position
+// for the requested break point. For lazily compiled functions several heap
+// traversals might be required rendering this operation as a rather slow
+// operation. However for setting break points which is normally done through
+// some kind of user interaction the performance is not crucial.
+static Handle<Object> Runtime_GetScriptFromScriptName(
+    Handle<String> script_name) {
+  // Scan the heap for Script objects to find the script with the requested
+  // script data.
+  Handle<Script> script;
+  HeapIterator iterator;
+  while (script.is_null() && iterator.has_next()) {
+    HeapObject* obj = iterator.next();
+    // If a script is found check if it has the script data requested.
+    if (obj->IsScript()) {
+      if (Script::cast(obj)->name()->IsString()) {
+        if (String::cast(Script::cast(obj)->name())->Equals(*script_name)) {
+          script = Handle<Script>(Script::cast(obj));
+        }
+      }
+    }
+  }
+
+  // If no script with the requested script data is found return undefined.
+  if (script.is_null()) return Factory::undefined_value();
+
+  // Return the script found.
+  return GetScriptWrapper(script);
+}
+
+
+// Get the script object from script data. NOTE: Regarding performance
+// see the NOTE for GetScriptFromScriptData.
+// args[0]: script data for the script to find the source for
+static Object* Runtime_GetScript(Arguments args) {
+  HandleScope scope;
+
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(String, script_name, args[0]);
+
+  // Find the requested script.
+  Handle<Object> result =
+      Runtime_GetScriptFromScriptName(Handle<String>(script_name));
+  return *result;
+}
+
+
+// Determines whether the given stack frame should be displayed in
+// a stack trace.  The caller is the error constructor that asked
+// for the stack trace to be collected.  The first time a construct
+// call to this function is encountered it is skipped.  The seen_caller
+// in/out parameter is used to remember if the caller has been seen
+// yet.
+static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
+    bool* seen_caller) {
+  // Only display JS frames.
+  if (!raw_frame->is_java_script())
+    return false;
+  JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+  Object* raw_fun = frame->function();
+  // Not sure when this can happen but skip it just in case.
+  if (!raw_fun->IsJSFunction())
+    return false;
+  if ((raw_fun == caller) && !(*seen_caller)) {
+    *seen_caller = true;
+    return false;
+  }
+  // Skip all frames until we've seen the caller.  Also, skip the most
+  // obvious builtin calls.  Some builtin calls (such as Number.ADD
+  // which is invoked using 'call') are very difficult to recognize
+  // so we're leaving them in for now.
+  return *seen_caller && !frame->receiver()->IsJSBuiltinsObject();
+}
+
+
+// Collect the raw data for a stack trace.  Returns an array of three
+// element segments each containing a receiver, function and native
+// code offset.
+static Object* Runtime_CollectStackTrace(Arguments args) {
+  ASSERT_EQ(args.length(), 2);
+  Handle<Object> caller = args.at<Object>(0);
+  CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
+
+  HandleScope scope;
+
+  int initial_size = limit < 10 ? limit : 10;
+  Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
+
+  StackFrameIterator iter;
+  // If the caller parameter is a function we skip frames until we're
+  // under it before starting to collect.
+  bool seen_caller = !caller->IsJSFunction();
+  int cursor = 0;
+  int frames_seen = 0;
+  while (!iter.done() && frames_seen < limit) {
+    StackFrame* raw_frame = iter.frame();
+    if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) {
+      frames_seen++;
+      JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+      Object* recv = frame->receiver();
+      Object* fun = frame->function();
+      Address pc = frame->pc();
+      Address start = frame->code()->address();
+      Smi* offset = Smi::FromInt(pc - start);
+      FixedArray* elements = FixedArray::cast(result->elements());
+      if (cursor + 2 < elements->length()) {
+        elements->set(cursor++, recv);
+        elements->set(cursor++, fun);
+        elements->set(cursor++, offset, SKIP_WRITE_BARRIER);
+      } else {
+        HandleScope scope;
+        Handle<Object> recv_handle(recv);
+        Handle<Object> fun_handle(fun);
+        SetElement(result, cursor++, recv_handle);
+        SetElement(result, cursor++, fun_handle);
+        SetElement(result, cursor++, Handle<Smi>(offset));
+      }
+    }
+    iter.Advance();
+  }
+
+  result->set_length(Smi::FromInt(cursor), SKIP_WRITE_BARRIER);
+
+  return *result;
+}
+
+
+static Object* Runtime_Abort(Arguments args) {
+  ASSERT(args.length() == 2);
+  OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
+                                    Smi::cast(args[1])->value());
+  Top::PrintStack();
+  OS::Abort();
+  UNREACHABLE();
+  return NULL;
+}
+
+
+#ifdef DEBUG
+// ListNatives is ONLY used by the fuzz-natives.js in debug mode
+// Exclude the code in release mode.
+static Object* Runtime_ListNatives(Arguments args) {
+  ASSERT(args.length() == 0);
+  HandleScope scope;
+  Handle<JSArray> result = Factory::NewJSArray(0);
+  int index = 0;
+#define ADD_ENTRY(Name, argc, ressize)                                       \
+  {                                                                          \
+    HandleScope inner;                                                       \
+    Handle<String> name =                                                    \
+      Factory::NewStringFromAscii(Vector<const char>(#Name, strlen(#Name))); \
+    Handle<JSArray> pair = Factory::NewJSArray(0);                           \
+    SetElement(pair, 0, name);                                               \
+    SetElement(pair, 1, Handle<Smi>(Smi::FromInt(argc)));                    \
+    SetElement(result, index++, pair);                                       \
+  }
+  RUNTIME_FUNCTION_LIST(ADD_ENTRY)
+#undef ADD_ENTRY
+  return *result;
+}
+#endif
+
+
+static Object* Runtime_Log(Arguments args) {
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(String, format, args[0]);
+  CONVERT_CHECKED(JSArray, elms, args[1]);
+  Vector<const char> chars = format->ToAsciiVector();
+  Logger::LogRuntime(chars, elms);
+  return Heap::undefined_value();
+}
+
+
+static Object* Runtime_IS_VAR(Arguments args) {
+  UNREACHABLE();  // implemented as macro in the parser
+  return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// Implementation of Runtime
+
+#define F(name, nargs, ressize)                                           \
+  { #name, "RuntimeStub_" #name, FUNCTION_ADDR(Runtime_##name), nargs, \
+    static_cast<int>(Runtime::k##name), ressize },
+
+static Runtime::Function Runtime_functions[] = {
+  RUNTIME_FUNCTION_LIST(F)
+  { NULL, NULL, NULL, 0, -1, 0 }
+};
+
+#undef F
+
+
+Runtime::Function* Runtime::FunctionForId(FunctionId fid) {
+  ASSERT(0 <= fid && fid < kNofFunctions);
+  return &Runtime_functions[fid];
+}
+
+
+Runtime::Function* Runtime::FunctionForName(const char* name) {
+  for (Function* f = Runtime_functions; f->name != NULL; f++) {
+    if (strcmp(f->name, name) == 0) {
+      return f;
+    }
+  }
+  return NULL;
+}
+
+
+void Runtime::PerformGC(Object* result) {
+  Failure* failure = Failure::cast(result);
+  if (failure->IsRetryAfterGC()) {
+    // Try to do a garbage collection; ignore it if it fails. The C
+    // entry stub will throw an out-of-memory exception in that case.
+    Heap::CollectGarbage(failure->requested(), failure->allocation_space());
+  } else {
+    // Handle last resort GC and make sure to allow future allocations
+    // to grow the heap without causing GCs (if possible).
+    Counters::gc_last_resort_from_js.Increment();
+    Heap::CollectAllGarbage(false);
+  }
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/runtime.h b/src/runtime.h
new file mode 100644
index 0000000..afa278b
--- /dev/null
+++ b/src/runtime.h
@@ -0,0 +1,411 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_RUNTIME_H_
+#define V8_RUNTIME_H_
+
+namespace v8 {
+namespace internal {
+
+// The interface to C++ runtime functions.
+
+// ----------------------------------------------------------------------------
+// RUNTIME_FUNCTION_LIST_ALWAYS defines runtime calls available in both
+// release and debug mode.
+// This macro should only be used by the macro RUNTIME_FUNCTION_LIST.
+
+// WARNING: RUNTIME_FUNCTION_LIST_ALWAYS_* is a very large macro that caused
+// MSVC Intellisense to crash.  It was broken into two macros to work around
+// this problem. Please avoid large recursive macros whenever possible.
+#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
+  /* Property access */ \
+  F(GetProperty, 2, 1) \
+  F(KeyedGetProperty, 2, 1) \
+  F(DeleteProperty, 2, 1) \
+  F(HasLocalProperty, 2, 1) \
+  F(HasProperty, 2, 1) \
+  F(HasElement, 2, 1) \
+  F(IsPropertyEnumerable, 2, 1) \
+  F(GetPropertyNames, 1, 1) \
+  F(GetPropertyNamesFast, 1, 1) \
+  F(GetArgumentsProperty, 1, 1) \
+  F(ToFastProperties, 1, 1) \
+  F(ToSlowProperties, 1, 1) \
+  \
+  F(IsInPrototypeChain, 2, 1) \
+  F(SetHiddenPrototype, 2, 1) \
+  \
+  F(IsConstructCall, 0, 1) \
+  \
+  /* Utilities */ \
+  F(GetCalledFunction, 0, 1) \
+  F(GetFunctionDelegate, 1, 1) \
+  F(GetConstructorDelegate, 1, 1) \
+  F(NewArguments, 1, 1) \
+  F(NewArgumentsFast, 3, 1) \
+  F(LazyCompile, 1, 1) \
+  F(SetNewFunctionAttributes, 1, 1) \
+  \
+  /* Array join support */ \
+  F(PushIfAbsent, 2, 1) \
+  F(ArrayConcat, 1, 1) \
+  \
+  /* Conversions */ \
+  F(ToBool, 1, 1) \
+  F(Typeof, 1, 1) \
+  \
+  F(StringToNumber, 1, 1) \
+  F(StringFromCharCodeArray, 1, 1) \
+  F(StringParseInt, 2, 1) \
+  F(StringParseFloat, 1, 1) \
+  F(StringToLowerCase, 1, 1) \
+  F(StringToUpperCase, 1, 1) \
+  F(CharFromCode, 1, 1) \
+  F(URIEscape, 1, 1) \
+  F(URIUnescape, 1, 1) \
+  \
+  F(NumberToString, 1, 1) \
+  F(NumberToInteger, 1, 1) \
+  F(NumberToJSUint32, 1, 1) \
+  F(NumberToJSInt32, 1, 1) \
+  F(NumberToSmi, 1, 1) \
+  \
+  /* Arithmetic operations */ \
+  F(NumberAdd, 2, 1) \
+  F(NumberSub, 2, 1) \
+  F(NumberMul, 2, 1) \
+  F(NumberDiv, 2, 1) \
+  F(NumberMod, 2, 1) \
+  F(NumberUnaryMinus, 1, 1) \
+  \
+  F(StringAdd, 2, 1) \
+  F(StringBuilderConcat, 2, 1) \
+  \
+  /* Bit operations */ \
+  F(NumberOr, 2, 1) \
+  F(NumberAnd, 2, 1) \
+  F(NumberXor, 2, 1) \
+  F(NumberNot, 1, 1) \
+  \
+  F(NumberShl, 2, 1) \
+  F(NumberShr, 2, 1) \
+  F(NumberSar, 2, 1) \
+  \
+  /* Comparisons */ \
+  F(NumberEquals, 2, 1) \
+  F(StringEquals, 2, 1) \
+  \
+  F(NumberCompare, 3, 1) \
+  F(SmiLexicographicCompare, 2, 1) \
+  F(StringCompare, 2, 1) \
+  \
+  /* Math */ \
+  F(Math_abs, 1, 1) \
+  F(Math_acos, 1, 1) \
+  F(Math_asin, 1, 1) \
+  F(Math_atan, 1, 1) \
+  F(Math_atan2, 2, 1) \
+  F(Math_ceil, 1, 1) \
+  F(Math_cos, 1, 1) \
+  F(Math_exp, 1, 1) \
+  F(Math_floor, 1, 1) \
+  F(Math_log, 1, 1) \
+  F(Math_pow, 2, 1) \
+  F(Math_round, 1, 1) \
+  F(Math_sin, 1, 1) \
+  F(Math_sqrt, 1, 1) \
+  F(Math_tan, 1, 1) \
+  \
+  /* Regular expressions */ \
+  F(RegExpCompile, 3, 1) \
+  F(RegExpExec, 4, 1) \
+  \
+  /* Strings */ \
+  F(StringCharCodeAt, 2, 1) \
+  F(StringIndexOf, 3, 1) \
+  F(StringLastIndexOf, 3, 1) \
+  F(StringLocaleCompare, 2, 1) \
+  F(StringSlice, 3, 1) \
+  F(StringReplaceRegExpWithString, 4, 1) \
+  F(StringMatch, 3, 1) \
+  \
+  /* Numbers */ \
+  F(NumberToRadixString, 2, 1) \
+  F(NumberToFixed, 2, 1) \
+  F(NumberToExponential, 2, 1) \
+  F(NumberToPrecision, 2, 1)
+
+#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
+  /* Reflection */ \
+  F(FunctionSetInstanceClassName, 2, 1) \
+  F(FunctionSetLength, 2, 1) \
+  F(FunctionSetPrototype, 2, 1) \
+  F(FunctionGetName, 1, 1) \
+  F(FunctionSetName, 2, 1) \
+  F(FunctionGetSourceCode, 1, 1) \
+  F(FunctionGetScript, 1, 1) \
+  F(FunctionGetScriptSourcePosition, 1, 1) \
+  F(FunctionGetPositionForOffset, 2, 1) \
+  F(FunctionIsAPIFunction, 1, 1) \
+  F(FunctionIsBuiltin, 1, 1) \
+  F(GetScript, 1, 1) \
+  F(CollectStackTrace, 2, 1) \
+  \
+  F(ClassOf, 1, 1) \
+  F(SetCode, 2, 1) \
+  \
+  F(CreateApiFunction, 1, 1) \
+  F(IsTemplate, 1, 1) \
+  F(GetTemplateField, 2, 1) \
+  F(DisableAccessChecks, 1, 1) \
+  F(EnableAccessChecks, 1, 1) \
+  \
+  /* Dates */ \
+  F(DateCurrentTime, 0, 1) \
+  F(DateParseString, 2, 1) \
+  F(DateLocalTimezone, 1, 1) \
+  F(DateLocalTimeOffset, 0, 1) \
+  F(DateDaylightSavingsOffset, 1, 1) \
+  \
+  /* Numbers */ \
+  F(NumberIsFinite, 1, 1) \
+  \
+  /* Globals */ \
+  F(CompileString, 2, 1) \
+  F(GlobalPrint, 1, 1) \
+  \
+  /* Eval */ \
+  F(GlobalReceiver, 1, 1) \
+  F(ResolvePossiblyDirectEval, 2, 1) \
+  \
+  F(SetProperty, -1 /* 3 or 4 */, 1) \
+  F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
+  \
+  /* Arrays */ \
+  F(RemoveArrayHoles, 2, 1) \
+  F(GetArrayKeys, 2, 1) \
+  F(MoveArrayContents, 2, 1) \
+  F(EstimateNumberOfElements, 1, 1) \
+  \
+  /* Getters and Setters */ \
+  F(DefineAccessor, -1 /* 4 or 5 */, 1) \
+  F(LookupAccessor, 3, 1) \
+  \
+  /* Literals */ \
+  F(MaterializeRegExpLiteral, 4, 1)\
+  F(CreateArrayLiteralBoilerplate, 3, 1) \
+  F(CreateObjectLiteralBoilerplate, 3, 1) \
+  F(CloneLiteralBoilerplate, 1, 1) \
+  F(CloneShallowLiteralBoilerplate, 1, 1) \
+  \
+  /* Catch context extension objects */ \
+  F(CreateCatchExtensionObject, 2, 1) \
+  \
+  /* Statements */ \
+  F(NewClosure, 2, 1) \
+  F(NewObject, 1, 1) \
+  F(Throw, 1, 1) \
+  F(ReThrow, 1, 1) \
+  F(ThrowReferenceError, 1, 1) \
+  F(StackGuard, 1, 1) \
+  \
+  /* Contexts */ \
+  F(NewContext, 1, 1) \
+  F(PushContext, 1, 1) \
+  F(PushCatchContext, 1, 1) \
+  F(LookupContext, 2, 1) \
+  F(LoadContextSlot, 2, 2) \
+  F(LoadContextSlotNoReferenceError, 2, 2) \
+  F(StoreContextSlot, 3, 1) \
+  \
+  /* Declarations and initialization */ \
+  F(DeclareGlobals, 3, 1) \
+  F(DeclareContextSlot, 4, 1) \
+  F(InitializeVarGlobal, -1 /* 1 or 2 */, 1) \
+  F(InitializeConstGlobal, 2, 1) \
+  F(InitializeConstContextSlot, 3, 1) \
+  F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
+  F(TransformToFastProperties, 1, 1) \
+  \
+  /* Debugging */ \
+  F(DebugPrint, 1, 1) \
+  F(DebugTrace, 0, 1) \
+  F(TraceEnter, 0, 1) \
+  F(TraceExit, 1, 1) \
+  F(Abort, 2, 1) \
+  /* Logging */ \
+  F(Log, 2, 1) \
+  /* ES5 */ \
+  F(LocalKeys, 1, 1) \
+  \
+  /* Pseudo functions - handled as macros by parser */ \
+  F(IS_VAR, 1, 1)
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
+  /* Debugger support*/ \
+  F(DebugBreak, 0, 1) \
+  F(SetDebugEventListener, 2, 1) \
+  F(Break, 0, 1) \
+  F(DebugGetPropertyDetails, 2, 1) \
+  F(DebugGetProperty, 2, 1) \
+  F(DebugLocalPropertyNames, 1, 1) \
+  F(DebugLocalElementNames, 1, 1) \
+  F(DebugPropertyTypeFromDetails, 1, 1) \
+  F(DebugPropertyAttributesFromDetails, 1, 1) \
+  F(DebugPropertyIndexFromDetails, 1, 1) \
+  F(DebugInterceptorInfo, 1, 1) \
+  F(DebugNamedInterceptorPropertyNames, 1, 1) \
+  F(DebugIndexedInterceptorElementNames, 1, 1) \
+  F(DebugNamedInterceptorPropertyValue, 2, 1) \
+  F(DebugIndexedInterceptorElementValue, 2, 1) \
+  F(CheckExecutionState, 1, 1) \
+  F(GetFrameCount, 1, 1) \
+  F(GetFrameDetails, 2, 1) \
+  F(GetScopeCount, 2, 1) \
+  F(GetScopeDetails, 3, 1) \
+  F(DebugPrintScopes, 0, 1) \
+  F(GetCFrames, 1, 1) \
+  F(GetThreadCount, 1, 1) \
+  F(GetThreadDetails, 2, 1) \
+  F(GetBreakLocations, 1, 1) \
+  F(SetFunctionBreakPoint, 3, 1) \
+  F(SetScriptBreakPoint, 3, 1) \
+  F(ClearBreakPoint, 1, 1) \
+  F(ChangeBreakOnException, 2, 1) \
+  F(PrepareStep, 3, 1) \
+  F(ClearStepping, 0, 1) \
+  F(DebugEvaluate, 4, 1) \
+  F(DebugEvaluateGlobal, 3, 1) \
+  F(DebugGetLoadedScripts, 0, 1) \
+  F(DebugReferencedBy, 3, 1) \
+  F(DebugConstructedBy, 2, 1) \
+  F(DebugGetPrototype, 1, 1) \
+  F(SystemBreak, 0, 1) \
+  F(DebugDisassembleFunction, 1, 1) \
+  F(DebugDisassembleConstructor, 1, 1) \
+  F(FunctionGetInferredName, 1, 1)
+#else
+#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
+#endif
+
+#ifdef DEBUG
+#define RUNTIME_FUNCTION_LIST_DEBUG(F) \
+  /* Testing */ \
+  F(ListNatives, 0, 1)
+#else
+#define RUNTIME_FUNCTION_LIST_DEBUG(F)
+#endif
+
+
+// ----------------------------------------------------------------------------
+// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
+// either directly by id (via the code generator), or indirectly
+// via a native call by name (from within JS code).
+
+#define RUNTIME_FUNCTION_LIST(F) \
+  RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
+  RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
+  RUNTIME_FUNCTION_LIST_DEBUG(F) \
+  RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
+
+// ----------------------------------------------------------------------------
+// Runtime provides access to all C++ runtime functions.
+
+class Runtime : public AllStatic {
+ public:
+  enum FunctionId {
+#define F(name, nargs, ressize) k##name,
+    RUNTIME_FUNCTION_LIST(F)
+    kNofFunctions
+#undef F
+  };
+
+  // Runtime function descriptor.
+  struct Function {
+    // The JS name of the function.
+    const char* name;
+
+    // The name of the stub that calls the runtime function.
+    const char* stub_name;
+
+    // The C++ (native) entry point.
+    byte* entry;
+
+    // The number of arguments expected; nargs < 0 if variable no. of
+    // arguments.
+    int nargs;
+    int stub_id;
+    // Size of result, if complex (larger than a single pointer),
+    // otherwise zero.
+    int result_size;
+  };
+
+  // Get the runtime function with the given function id.
+  static Function* FunctionForId(FunctionId fid);
+
+  // Get the runtime function with the given name.
+  static Function* FunctionForName(const char* name);
+
+  static int StringMatch(Handle<String> sub, Handle<String> pat, int index);
+
+  static bool IsUpperCaseChar(uint16_t ch);
+
+  // TODO(1240886): The following three methods are *not* handle safe,
+  // but accept handle arguments. This seems fragile.
+
+  // Support getting the characters in a string using [] notation as
+  // in Firefox/SpiderMonkey, Safari and Opera.
+  static Object* GetElementOrCharAt(Handle<Object> object, uint32_t index);
+
+  static Object* SetObjectProperty(Handle<Object> object,
+                                   Handle<Object> key,
+                                   Handle<Object> value,
+                                   PropertyAttributes attr);
+
+  static Object* ForceSetObjectProperty(Handle<JSObject> object,
+                                        Handle<Object> key,
+                                        Handle<Object> value,
+                                        PropertyAttributes attr);
+
+  static Object* ForceDeleteObjectProperty(Handle<JSObject> object,
+                                           Handle<Object> key);
+
+  static Object* GetObjectProperty(Handle<Object> object, Handle<Object> key);
+
+  // This function is used in FunctionNameUsing* tests.
+  static Object* FindSharedFunctionInfoInScript(Handle<Script> script,
+                                                int position);
+
+  // Helper functions used stubs.
+  static void PerformGC(Object* result);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_RUNTIME_H_
diff --git a/src/runtime.js b/src/runtime.js
new file mode 100644
index 0000000..789bfdb
--- /dev/null
+++ b/src/runtime.js
@@ -0,0 +1,603 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This files contains runtime support implemented in JavaScript.
+
+// CAUTION: Some of the functions specified in this file are called
+// directly from compiled code. These are the functions with names in
+// ALL CAPS. The compiled code passes the first argument in 'this' and
+// it does not push the function onto the stack. This means that you
+// cannot use contexts in all these functions.
+
+
+/* -----------------------------------
+   - - -   C o m p a r i s o n   - - -
+   -----------------------------------
+*/
+
+// The following const declarations are shared with other native JS files.
+// They are all declared at this one spot to avoid const redeclaration errors.
+const $Object = global.Object;
+const $Array = global.Array;
+const $String = global.String;
+const $Number = global.Number;
+const $Function = global.Function;
+const $Boolean = global.Boolean;
+const $NaN = 0/0;
+
+
+// ECMA-262, section 11.9.1, page 55.
+function EQUALS(y) {
+  if (IS_STRING(this) && IS_STRING(y)) return %StringEquals(this, y);
+  var x = this;
+
+  // NOTE: We use iteration instead of recursion, because it is
+  // difficult to call EQUALS with the correct setting of 'this' in
+  // an efficient way.
+  while (true) {
+    if (IS_NUMBER(x)) {
+      if (y == null) return 1;  // not equal
+      return %NumberEquals(x, %ToNumber(y));
+    } else if (IS_STRING(x)) {
+      if (IS_STRING(y)) return %StringEquals(x, y);
+      if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
+      if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
+      if (y == null) return 1;  // not equal
+      y = %ToPrimitive(y, NO_HINT);
+    } else if (IS_BOOLEAN(x)) {
+      if (IS_BOOLEAN(y)) {
+        return %_ObjectEquals(x, y) ? 0 : 1;
+      }
+      if (y == null) return 1;  // not equal
+      return %NumberEquals(%ToNumber(x), %ToNumber(y));
+    } else if (x == null) {
+      // NOTE: This checks for both null and undefined.
+      return (y == null) ? 0 : 1;
+    } else {
+      // x is not a number, boolean, null or undefined.
+      if (y == null) return 1;  // not equal
+      if (IS_OBJECT(y)) {
+        return %_ObjectEquals(x, y) ? 0 : 1;
+      }
+      if (IS_FUNCTION(y)) {
+        return %_ObjectEquals(x, y) ? 0 : 1;
+      }
+
+      x = %ToPrimitive(x, NO_HINT);
+    }
+  }
+}
+
+// ECMA-262, section 11.9.4, page 56.
+function STRICT_EQUALS(x) {
+  if (IS_STRING(this)) {
+    if (!IS_STRING(x)) return 1;  // not equal
+    return %StringEquals(this, x);
+  }
+
+  if (IS_NUMBER(this)) {
+    if (!IS_NUMBER(x)) return 1;  // not equal
+    return %NumberEquals(this, x);
+  }
+
+  // If anything else gets here, we just do simple identity check.
+  // Objects (including functions), null, undefined and booleans were
+  // checked in the CompareStub, so there should be nothing left.
+  return %_ObjectEquals(this, x) ? 0 : 1;
+}
+
+
+// ECMA-262, section 11.8.5, page 53. The 'ncr' parameter is used as
+// the result when either (or both) the operands are NaN.
+function COMPARE(x, ncr) {
+  // Fast case for numbers and strings.
+  if (IS_NUMBER(this) && IS_NUMBER(x)) {
+    return %NumberCompare(this, x, ncr);
+  }
+  if (IS_STRING(this) && IS_STRING(x)) {
+    return %StringCompare(this, x);
+  }
+
+  // Default implementation.
+  var a = %ToPrimitive(this, NUMBER_HINT);
+  var b = %ToPrimitive(x, NUMBER_HINT);
+  if (IS_STRING(a) && IS_STRING(b)) {
+    return %StringCompare(a, b);
+  } else {
+    return %NumberCompare(%ToNumber(a), %ToNumber(b), ncr);
+  }
+}
+
+
+
+/* -----------------------------------
+   - - -   A r i t h m e t i c   - - -
+   -----------------------------------
+*/
+
+// ECMA-262, section 11.6.1, page 50.
+function ADD(x) {
+  // Fast case: Check for number operands and do the addition.
+  if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberAdd(this, x);
+  if (IS_STRING(this) && IS_STRING(x)) return %StringAdd(this, x);
+
+  // Default implementation.
+  var a = %ToPrimitive(this, NO_HINT);
+  var b = %ToPrimitive(x, NO_HINT);
+
+  if (IS_STRING(a)) {
+    return %StringAdd(a, %ToString(b));
+  } else if (IS_STRING(b)) {
+    return %StringAdd(%ToString(a), b);
+  } else {
+    return %NumberAdd(%ToNumber(a), %ToNumber(b));
+  }
+}
+
+
+// Left operand (this) is already a string.
+function STRING_ADD_LEFT(y) {
+  if (!IS_STRING(y)) {
+    if (IS_STRING_WRAPPER(y)) {
+      y = %_ValueOf(y);
+    } else {
+      y = IS_NUMBER(y)
+          ? %NumberToString(y)
+          : %ToString(%ToPrimitive(y, NO_HINT));
+    }
+  }
+  return %StringAdd(this, y);
+}
+
+
+// Right operand (y) is already a string.
+function STRING_ADD_RIGHT(y) {
+  var x = this;
+  if (!IS_STRING(x)) {
+    if (IS_STRING_WRAPPER(x)) {
+      x = %_ValueOf(x);
+    } else {
+      x = IS_NUMBER(x)
+          ? %NumberToString(x)
+          : %ToString(%ToPrimitive(x, NO_HINT));
+    }
+  }
+  return %StringAdd(x, y);
+}
+
+
+// ECMA-262, section 11.6.2, page 50.
+function SUB(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberSub(x, y);
+}
+
+
+// ECMA-262, section 11.5.1, page 48.
+function MUL(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberMul(x, y);
+}
+
+
+// ECMA-262, section 11.5.2, page 49.
+function DIV(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberDiv(x, y);
+}
+
+
+// ECMA-262, section 11.5.3, page 49.
+function MOD(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberMod(x, y);
+}
+
+
+
+/* -------------------------------------------
+   - - -   B i t   o p e r a t i o n s   - - -
+   -------------------------------------------
+*/
+
+// ECMA-262, section 11.10, page 57.
+function BIT_OR(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberOr(x, y);
+}
+
+
+// ECMA-262, section 11.10, page 57.
+function BIT_AND(y) {
+  var x;
+  if (IS_NUMBER(this)) {
+    x = this;
+    if (!IS_NUMBER(y)) y = %ToNumber(y);
+  } else {
+    x = %ToNumber(this);
+    // Make sure to convert the right operand to a number before
+    // bailing out in the fast case, but after converting the
+    // left operand. This ensures that valueOf methods on the right
+    // operand are always executed.
+    if (!IS_NUMBER(y)) y = %ToNumber(y);
+    // Optimize for the case where we end up AND'ing a value
+    // that doesn't convert to a number. This is common in
+    // certain benchmarks.
+    if (NUMBER_IS_NAN(x)) return 0;
+  }
+  return %NumberAnd(x, y);
+}
+
+
+// ECMA-262, section 11.10, page 57.
+function BIT_XOR(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberXor(x, y);
+}
+
+
+// ECMA-262, section 11.4.7, page 47.
+function UNARY_MINUS() {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  return %NumberUnaryMinus(x);
+}
+
+
+// ECMA-262, section 11.4.8, page 48.
+function BIT_NOT() {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  return %NumberNot(x);
+}
+
+
+// ECMA-262, section 11.7.1, page 51.
+function SHL(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberShl(x, y);
+}
+
+
+// ECMA-262, section 11.7.2, page 51.
+function SAR(y) {
+  var x;
+  if (IS_NUMBER(this)) {
+    x = this;
+    if (!IS_NUMBER(y)) y = %ToNumber(y);
+  } else {
+    x = %ToNumber(this);
+    // Make sure to convert the right operand to a number before
+    // bailing out in the fast case, but after converting the
+    // left operand. This ensures that valueOf methods on the right
+    // operand are always executed.
+    if (!IS_NUMBER(y)) y = %ToNumber(y);
+    // Optimize for the case where we end up shifting a value
+    // that doesn't convert to a number. This is common in
+    // certain benchmarks.
+    if (NUMBER_IS_NAN(x)) return 0;
+  }
+  return %NumberSar(x, y);
+}
+
+
+// ECMA-262, section 11.7.3, page 52.
+function SHR(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberShr(x, y);
+}
+
+
+
+/* -----------------------------
+   - - -   H e l p e r s   - - -
+   -----------------------------
+*/
+
+// ECMA-262, section 11.4.1, page 46.
+function DELETE(key) {
+  return %DeleteProperty(%ToObject(this), %ToString(key));
+}
+
+
+// ECMA-262, section 11.8.7, page 54.
+function IN(x) {
+  if (x == null || (!IS_OBJECT(x) && !IS_FUNCTION(x))) {
+    throw %MakeTypeError('invalid_in_operator_use', [this, x]);
+  }
+  return %_IsNonNegativeSmi(this) ? %HasElement(x, this) : %HasProperty(x, %ToString(this));
+}
+
+
+// ECMA-262, section 11.8.6, page 54. To make the implementation more
+// efficient, the return value should be zero if the 'this' is an
+// instance of F, and non-zero if not. This makes it possible to avoid
+// an expensive ToBoolean conversion in the generated code.
+function INSTANCE_OF(F) {
+  var V = this;
+  if (!IS_FUNCTION(F)) {
+    throw %MakeTypeError('instanceof_function_expected', [V]);
+  }
+
+  // If V is not an object, return false.
+  if (IS_NULL(V) || (!IS_OBJECT(V) && !IS_FUNCTION(V))) {
+    return 1;
+  }
+
+  // Get the prototype of F; if it is not an object, throw an error.
+  var O = F.prototype;
+  if (IS_NULL(O) || (!IS_OBJECT(O) && !IS_FUNCTION(O))) {
+    throw %MakeTypeError('instanceof_nonobject_proto', [O]);
+  }
+
+  // Return whether or not O is in the prototype chain of V.
+  return %IsInPrototypeChain(O, V) ? 0 : 1;
+}
+
+
+// Get an array of property keys for the given object. Used in
+// for-in statements.
+function GET_KEYS() {
+  return %GetPropertyNames(this);
+}
+
+
+// Filter a given key against an object by checking if the object
+// has a property with the given key; return the key as a string if
+// it has. Otherwise returns null. Used in for-in statements.
+function FILTER_KEY(key) {
+  var string = %ToString(key);
+  if (%HasProperty(this, string)) return string;
+  return null;
+}
+
+
+function CALL_NON_FUNCTION() {
+  var callee = %GetCalledFunction();
+  var delegate = %GetFunctionDelegate(callee);
+  if (!IS_FUNCTION(delegate)) {
+    throw %MakeTypeError('called_non_callable', [typeof callee]);
+  }
+
+  var parameters = %NewArguments(delegate);
+  return delegate.apply(callee, parameters);
+}
+
+
+function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
+  var callee = %GetCalledFunction();
+  var delegate = %GetConstructorDelegate(callee);
+  if (!IS_FUNCTION(delegate)) {
+    throw %MakeTypeError('called_non_callable', [typeof callee]);
+  }
+
+  var parameters = %NewArguments(delegate);
+  return delegate.apply(callee, parameters);
+}
+
+
+function APPLY_PREPARE(args) {
+  var length;
+  // First check whether length is a positive Smi and args is an
+  // array. This is the fast case. If this fails, we do the slow case
+  // that takes care of more eventualities.
+  if (IS_ARRAY(args)) {
+    length = args.length;
+    if (%_IsSmi(length) && length >= 0 && length < 0x800000 && IS_FUNCTION(this)) {
+      return length;
+    }
+  }
+
+  length = (args == null) ? 0 : %ToUint32(args.length);
+
+  // We can handle any number of apply arguments if the stack is
+  // big enough, but sanity check the value to avoid overflow when
+  // multiplying with pointer size.
+  if (length > 0x800000) {
+    throw %MakeRangeError('apply_overflow', [length]);
+  }
+
+  if (!IS_FUNCTION(this)) {
+    throw %MakeTypeError('apply_non_function', [ %ToString(this), typeof this ]);
+  }
+
+  // Make sure the arguments list has the right type.
+  if (args != null && !IS_ARRAY(args) && !IS_ARGUMENTS(args)) {
+    throw %MakeTypeError('apply_wrong_args', []);
+  }
+
+  // Return the length which is the number of arguments to copy to the
+  // stack. It is guaranteed to be a small integer at this point.
+  return length;
+}
+
+
+function APPLY_OVERFLOW(length) {
+  throw %MakeRangeError('apply_overflow', [length]);
+}
+
+
+// Convert the receiver to an object - forward to ToObject.
+function TO_OBJECT() {
+  return %ToObject(this);
+}
+
+
+// Convert the receiver to a number - forward to ToNumber.
+function TO_NUMBER() {
+  return %ToNumber(this);
+}
+
+
+// Convert the receiver to a string - forward to ToString.
+function TO_STRING() {
+  return %ToString(this);
+}
+
+
+/* -------------------------------------
+   - - -   C o n v e r s i o n s   - - -
+   -------------------------------------
+*/
+
+// ECMA-262, section 9.1, page 30. Use null/undefined for no hint,
+// (1) for number hint, and (2) for string hint.
+function ToPrimitive(x, hint) {
+  // Fast case check.
+  if (IS_STRING(x)) return x;
+  // Normal behavior.
+  if (!IS_OBJECT(x) && !IS_FUNCTION(x)) return x;
+  if (x == null) return x;  // check for null, undefined
+  if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
+  return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
+}
+
+
+// ECMA-262, section 9.3, page 31.
+function ToNumber(x) {
+  if (IS_NUMBER(x)) return x;
+  if (IS_STRING(x)) return %StringToNumber(x);
+  if (IS_BOOLEAN(x)) return x ? 1 : 0;
+  if (IS_UNDEFINED(x)) return $NaN;
+  return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
+}
+
+
+// ECMA-262, section 9.8, page 35.
+function ToString(x) {
+  if (IS_STRING(x)) return x;
+  if (IS_NUMBER(x)) return %NumberToString(x);
+  if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
+  if (IS_UNDEFINED(x)) return 'undefined';
+  return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
+}
+
+
+// ... where did this come from?
+function ToBoolean(x) {
+  if (IS_BOOLEAN(x)) return x;
+  if (IS_STRING(x)) return x.length != 0;
+  if (x == null) return false;
+  if (IS_NUMBER(x)) return !((x == 0) || NUMBER_IS_NAN(x));
+  return true;
+}
+
+
+// ECMA-262, section 9.9, page 36.
+function ToObject(x) {
+  if (IS_STRING(x)) return new $String(x);
+  if (IS_NUMBER(x)) return new $Number(x);
+  if (IS_BOOLEAN(x)) return new $Boolean(x);
+  if (x == null) throw %MakeTypeError('null_to_object', []);
+  return x;
+}
+
+
+// ECMA-262, section 9.4, page 34.
+function ToInteger(x) {
+  if (%_IsSmi(x)) return x;
+  return %NumberToInteger(ToNumber(x));
+}
+
+
+// ECMA-262, section 9.6, page 34.
+function ToUint32(x) {
+  if (%_IsSmi(x) && x >= 0) return x;
+  return %NumberToJSUint32(ToNumber(x));
+}
+
+
+// ECMA-262, section 9.5, page 34
+function ToInt32(x) {
+  if (%_IsSmi(x)) return x;
+  return %NumberToJSInt32(ToNumber(x));
+}
+
+
+
+/* ---------------------------------
+   - - -   U t i l i t i e s   - - -
+   ---------------------------------
+*/
+
+// Returns if the given x is a primitive value - not an object or a
+// function.
+function IsPrimitive(x) {
+  if (!IS_OBJECT(x) && !IS_FUNCTION(x)) {
+    return true;
+  } else {
+    // Even though the type of null is "object", null is still
+    // considered a primitive value.
+    return IS_NULL(x);
+  }
+}
+
+
+// ECMA-262, section 8.6.2.6, page 28.
+function DefaultNumber(x) {
+  if (IS_FUNCTION(x.valueOf)) {
+    var v = x.valueOf();
+    if (%IsPrimitive(v)) return v;
+  }
+
+  if (IS_FUNCTION(x.toString)) {
+    var s = x.toString();
+    if (%IsPrimitive(s)) return s;
+  }
+
+  throw %MakeTypeError('cannot_convert_to_primitive', []);
+}
+
+
+// ECMA-262, section 8.6.2.6, page 28.
+function DefaultString(x) {
+  if (IS_FUNCTION(x.toString)) {
+    var s = x.toString();
+    if (%IsPrimitive(s)) return s;
+  }
+
+  if (IS_FUNCTION(x.valueOf)) {
+    var v = x.valueOf();
+    if (%IsPrimitive(v)) return v;
+  }
+
+  throw %MakeTypeError('cannot_convert_to_primitive', []);
+}
+
+
+// NOTE: Setting the prototype for Array must take place as early as
+// possible due to code generation for array literals.  When
+// generating code for a array literal a boilerplate array is created
+// that is cloned when running the code.  It is essiential that the
+// boilerplate gets the right prototype.
+%FunctionSetPrototype($Array, new $Array(0));
diff --git a/src/scanner.cc b/src/scanner.cc
new file mode 100644
index 0000000..3dae414
--- /dev/null
+++ b/src/scanner.cc
@@ -0,0 +1,975 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "scanner.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Character predicates
+
+
+unibrow::Predicate<IdentifierStart, 128> Scanner::kIsIdentifierStart;
+unibrow::Predicate<IdentifierPart, 128> Scanner::kIsIdentifierPart;
+unibrow::Predicate<unibrow::LineTerminator, 128> Scanner::kIsLineTerminator;
+unibrow::Predicate<unibrow::WhiteSpace, 128> Scanner::kIsWhiteSpace;
+
+
+StaticResource<Scanner::Utf8Decoder> Scanner::utf8_decoder_;
+
+
+// ----------------------------------------------------------------------------
+// UTF8Buffer
+
+UTF8Buffer::UTF8Buffer() {
+  static const int kInitialCapacity = 1 * KB;
+  data_ = NewArray<char>(kInitialCapacity);
+  limit_ = ComputeLimit(data_, kInitialCapacity);
+  Reset();
+  ASSERT(Capacity() == kInitialCapacity && pos() == 0);
+}
+
+
+UTF8Buffer::~UTF8Buffer() {
+  DeleteArray(data_);
+}
+
+
+void UTF8Buffer::AddCharSlow(uc32 c) {
+  static const int kCapacityGrowthLimit = 1 * MB;
+  if (cursor_ > limit_) {
+    int old_capacity = Capacity();
+    int old_position = pos();
+    int new_capacity =
+        Min(old_capacity * 2, old_capacity + kCapacityGrowthLimit);
+    char* new_data = NewArray<char>(new_capacity);
+    memcpy(new_data, data_, old_position);
+    DeleteArray(data_);
+    data_ = new_data;
+    cursor_ = new_data + old_position;
+    limit_ = ComputeLimit(new_data, new_capacity);
+    ASSERT(Capacity() == new_capacity && pos() == old_position);
+  }
+  if (static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
+    *cursor_++ = c;  // Common case: 7-bit ASCII.
+  } else {
+    cursor_ += unibrow::Utf8::Encode(cursor_, c);
+  }
+  ASSERT(pos() <= Capacity());
+}
+
+
+// ----------------------------------------------------------------------------
+// UTF16Buffer
+
+
+UTF16Buffer::UTF16Buffer()
+    : pos_(0), size_(0) { }
+
+
+Handle<String> UTF16Buffer::SubString(int start, int end) {
+  return internal::SubString(data_, start, end);
+}
+
+
+// CharacterStreamUTF16Buffer
+CharacterStreamUTF16Buffer::CharacterStreamUTF16Buffer()
+    : pushback_buffer_(0), last_(0), stream_(NULL) { }
+
+
+void CharacterStreamUTF16Buffer::Initialize(Handle<String> data,
+                                            unibrow::CharacterStream* input) {
+  data_ = data;
+  pos_ = 0;
+  stream_ = input;
+}
+
+
+void CharacterStreamUTF16Buffer::PushBack(uc32 ch) {
+  pushback_buffer()->Add(last_);
+  last_ = ch;
+  pos_--;
+}
+
+
+uc32 CharacterStreamUTF16Buffer::Advance() {
+  // NOTE: It is of importance to Persian / Farsi resources that we do
+  // *not* strip format control characters in the scanner; see
+  //
+  //    https://bugzilla.mozilla.org/show_bug.cgi?id=274152
+  //
+  // So, even though ECMA-262, section 7.1, page 11, dictates that we
+  // must remove Unicode format-control characters, we do not. This is
+  // in line with how IE and SpiderMonkey handles it.
+  if (!pushback_buffer()->is_empty()) {
+    pos_++;
+    return last_ = pushback_buffer()->RemoveLast();
+  } else if (stream_->has_more()) {
+    pos_++;
+    uc32 next = stream_->GetNext();
+    return last_ = next;
+  } else {
+    // Note: currently the following increment is necessary to avoid a
+    // test-parser problem!
+    pos_++;
+    return last_ = static_cast<uc32>(-1);
+  }
+}
+
+
+void CharacterStreamUTF16Buffer::SeekForward(int pos) {
+  pos_ = pos;
+  ASSERT(pushback_buffer()->is_empty());
+  stream_->Seek(pos);
+}
+
+
+// TwoByteStringUTF16Buffer
+TwoByteStringUTF16Buffer::TwoByteStringUTF16Buffer()
+    : raw_data_(NULL) { }
+
+
+void TwoByteStringUTF16Buffer::Initialize(
+     Handle<ExternalTwoByteString> data) {
+  ASSERT(!data.is_null());
+
+  data_ = data;
+  pos_ = 0;
+
+  raw_data_ = data->resource()->data();
+  size_ = data->length();
+}
+
+
+uc32 TwoByteStringUTF16Buffer::Advance() {
+  if (pos_ < size_) {
+    return raw_data_[pos_++];
+  } else {
+    // note: currently the following increment is necessary to avoid a
+    // test-parser problem!
+    pos_++;
+    return static_cast<uc32>(-1);
+  }
+}
+
+
+void TwoByteStringUTF16Buffer::PushBack(uc32 ch) {
+  pos_--;
+  ASSERT(pos_ >= Scanner::kCharacterLookaheadBufferSize);
+  ASSERT(raw_data_[pos_ - Scanner::kCharacterLookaheadBufferSize] == ch);
+}
+
+
+void TwoByteStringUTF16Buffer::SeekForward(int pos) {
+  pos_ = pos;
+}
+
+
+// ----------------------------------------------------------------------------
+// Scanner
+
+Scanner::Scanner(bool pre) : stack_overflow_(false), is_pre_parsing_(pre) {
+  Token::Initialize();
+}
+
+
+void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
+    int position) {
+  // Initialize the source buffer.
+  if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) {
+    two_byte_string_buffer_.Initialize(
+        Handle<ExternalTwoByteString>::cast(source));
+    source_ = &two_byte_string_buffer_;
+  } else {
+    char_stream_buffer_.Initialize(source, stream);
+    source_ = &char_stream_buffer_;
+  }
+
+  position_ = position;
+
+  // Reset literals buffer
+  literals_.Reset();
+
+  // Set c0_ (one character ahead)
+  ASSERT(kCharacterLookaheadBufferSize == 1);
+  Advance();
+
+  // Skip initial whitespace allowing HTML comment ends just like
+  // after a newline and scan first token.
+  has_line_terminator_before_next_ = true;
+  SkipWhiteSpace();
+  Scan();
+}
+
+
+Handle<String> Scanner::SubString(int start, int end) {
+  return source_->SubString(start - position_, end - position_);
+}
+
+
+Token::Value Scanner::Next() {
+  // BUG 1215673: Find a thread safe way to set a stack limit in
+  // pre-parse mode. Otherwise, we cannot safely pre-parse from other
+  // threads.
+  current_ = next_;
+  // Check for stack-overflow before returning any tokens.
+  StackLimitCheck check;
+  if (check.HasOverflowed()) {
+    stack_overflow_ = true;
+    next_.token = Token::ILLEGAL;
+  } else {
+    Scan();
+  }
+  return current_.token;
+}
+
+
+void Scanner::StartLiteral() {
+  next_.literal_pos = literals_.pos();
+}
+
+
+void Scanner::AddChar(uc32 c) {
+  literals_.AddChar(c);
+}
+
+
+void Scanner::TerminateLiteral() {
+  next_.literal_end = literals_.pos();
+  AddChar(0);
+}
+
+
+void Scanner::AddCharAdvance() {
+  AddChar(c0_);
+  Advance();
+}
+
+
+static inline bool IsByteOrderMark(uc32 c) {
+  // The Unicode value U+FFFE is guaranteed never to be assigned as a
+  // Unicode character; this implies that in a Unicode context the
+  // 0xFF, 0xFE byte pattern can only be interpreted as the U+FEFF
+  // character expressed in little-endian byte order (since it could
+  // not be a U+FFFE character expressed in big-endian byte
+  // order). Nevertheless, we check for it to be compatible with
+  // Spidermonkey.
+  return c == 0xFEFF || c == 0xFFFE;
+}
+
+
+bool Scanner::SkipWhiteSpace() {
+  int start_position = source_pos();
+
+  while (true) {
+    // We treat byte-order marks (BOMs) as whitespace for better
+    // compatibility with Spidermonkey and other JavaScript engines.
+    while (kIsWhiteSpace.get(c0_) || IsByteOrderMark(c0_)) {
+      // IsWhiteSpace() includes line terminators!
+      if (kIsLineTerminator.get(c0_)) {
+        // Ignore line terminators, but remember them. This is necessary
+        // for automatic semicolon insertion.
+        has_line_terminator_before_next_ = true;
+      }
+      Advance();
+    }
+
+    // If there is an HTML comment end '-->' at the beginning of a
+    // line (with only whitespace in front of it), we treat the rest
+    // of the line as a comment. This is in line with the way
+    // SpiderMonkey handles it.
+    if (c0_ == '-' && has_line_terminator_before_next_) {
+      Advance();
+      if (c0_ == '-') {
+        Advance();
+        if (c0_ == '>') {
+          // Treat the rest of the line as a comment.
+          SkipSingleLineComment();
+          // Continue skipping white space after the comment.
+          continue;
+        }
+        PushBack('-');  // undo Advance()
+      }
+      PushBack('-');  // undo Advance()
+    }
+    // Return whether or not we skipped any characters.
+    return source_pos() != start_position;
+  }
+}
+
+
+Token::Value Scanner::SkipSingleLineComment() {
+  Advance();
+
+  // The line terminator at the end of the line is not considered
+  // to be part of the single-line comment; it is recognized
+  // separately by the lexical grammar and becomes part of the
+  // stream of input elements for the syntactic grammar (see
+  // ECMA-262, section 7.4, page 12).
+  while (c0_ >= 0 && !kIsLineTerminator.get(c0_)) {
+    Advance();
+  }
+
+  return Token::WHITESPACE;
+}
+
+
+Token::Value Scanner::SkipMultiLineComment() {
+  ASSERT(c0_ == '*');
+  Advance();
+
+  while (c0_ >= 0) {
+    char ch = c0_;
+    Advance();
+    // If we have reached the end of the multi-line comment, we
+    // consume the '/' and insert a whitespace. This way all
+    // multi-line comments are treated as whitespace - even the ones
+    // containing line terminators. This contradicts ECMA-262, section
+    // 7.4, page 12, that says that multi-line comments containing
+    // line terminators should be treated as a line terminator, but it
+    // matches the behaviour of SpiderMonkey and KJS.
+    if (ch == '*' && c0_ == '/') {
+      c0_ = ' ';
+      return Token::WHITESPACE;
+    }
+  }
+
+  // Unterminated multi-line comment.
+  return Token::ILLEGAL;
+}
+
+
+Token::Value Scanner::ScanHtmlComment() {
+  // Check for <!-- comments.
+  ASSERT(c0_ == '!');
+  Advance();
+  if (c0_ == '-') {
+    Advance();
+    if (c0_ == '-') return SkipSingleLineComment();
+    PushBack('-');  // undo Advance()
+  }
+  PushBack('!');  // undo Advance()
+  ASSERT(c0_ == '!');
+  return Token::LT;
+}
+
+
+void Scanner::Scan() {
+  Token::Value token;
+  has_line_terminator_before_next_ = false;
+  do {
+    // Remember the position of the next token
+    next_.location.beg_pos = source_pos();
+
+    switch (c0_) {
+      case ' ':
+      case '\t':
+        Advance();
+        token = Token::WHITESPACE;
+        break;
+
+      case '\n':
+        Advance();
+        has_line_terminator_before_next_ = true;
+        token = Token::WHITESPACE;
+        break;
+
+      case '"': case '\'':
+        token = ScanString();
+        break;
+
+      case '<':
+        // < <= << <<= <!--
+        Advance();
+        if (c0_ == '=') {
+          token = Select(Token::LTE);
+        } else if (c0_ == '<') {
+          token = Select('=', Token::ASSIGN_SHL, Token::SHL);
+        } else if (c0_ == '!') {
+          token = ScanHtmlComment();
+        } else {
+          token = Token::LT;
+        }
+        break;
+
+      case '>':
+        // > >= >> >>= >>> >>>=
+        Advance();
+        if (c0_ == '=') {
+          token = Select(Token::GTE);
+        } else if (c0_ == '>') {
+          // >> >>= >>> >>>=
+          Advance();
+          if (c0_ == '=') {
+            token = Select(Token::ASSIGN_SAR);
+          } else if (c0_ == '>') {
+            token = Select('=', Token::ASSIGN_SHR, Token::SHR);
+          } else {
+            token = Token::SAR;
+          }
+        } else {
+          token = Token::GT;
+        }
+        break;
+
+      case '=':
+        // = == ===
+        Advance();
+        if (c0_ == '=') {
+          token = Select('=', Token::EQ_STRICT, Token::EQ);
+        } else {
+          token = Token::ASSIGN;
+        }
+        break;
+
+      case '!':
+        // ! != !==
+        Advance();
+        if (c0_ == '=') {
+          token = Select('=', Token::NE_STRICT, Token::NE);
+        } else {
+          token = Token::NOT;
+        }
+        break;
+
+      case '+':
+        // + ++ +=
+        Advance();
+        if (c0_ == '+') {
+          token = Select(Token::INC);
+        } else if (c0_ == '=') {
+          token = Select(Token::ASSIGN_ADD);
+        } else {
+          token = Token::ADD;
+        }
+        break;
+
+      case '-':
+        // - -- --> -=
+        Advance();
+        if (c0_ == '-') {
+          Advance();
+          if (c0_ == '>' && has_line_terminator_before_next_) {
+            // For compatibility with SpiderMonkey, we skip lines that
+            // start with an HTML comment end '-->'.
+            token = SkipSingleLineComment();
+          } else {
+            token = Token::DEC;
+          }
+        } else if (c0_ == '=') {
+          token = Select(Token::ASSIGN_SUB);
+        } else {
+          token = Token::SUB;
+        }
+        break;
+
+      case '*':
+        // * *=
+        token = Select('=', Token::ASSIGN_MUL, Token::MUL);
+        break;
+
+      case '%':
+        // % %=
+        token = Select('=', Token::ASSIGN_MOD, Token::MOD);
+        break;
+
+      case '/':
+        // /  // /* /=
+        Advance();
+        if (c0_ == '/') {
+          token = SkipSingleLineComment();
+        } else if (c0_ == '*') {
+          token = SkipMultiLineComment();
+        } else if (c0_ == '=') {
+          token = Select(Token::ASSIGN_DIV);
+        } else {
+          token = Token::DIV;
+        }
+        break;
+
+      case '&':
+        // & && &=
+        Advance();
+        if (c0_ == '&') {
+          token = Select(Token::AND);
+        } else if (c0_ == '=') {
+          token = Select(Token::ASSIGN_BIT_AND);
+        } else {
+          token = Token::BIT_AND;
+        }
+        break;
+
+      case '|':
+        // | || |=
+        Advance();
+        if (c0_ == '|') {
+          token = Select(Token::OR);
+        } else if (c0_ == '=') {
+          token = Select(Token::ASSIGN_BIT_OR);
+        } else {
+          token = Token::BIT_OR;
+        }
+        break;
+
+      case '^':
+        // ^ ^=
+        token = Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
+        break;
+
+      case '.':
+        // . Number
+        Advance();
+        if (IsDecimalDigit(c0_)) {
+          token = ScanNumber(true);
+        } else {
+          token = Token::PERIOD;
+        }
+        break;
+
+      case ':':
+        token = Select(Token::COLON);
+        break;
+
+      case ';':
+        token = Select(Token::SEMICOLON);
+        break;
+
+      case ',':
+        token = Select(Token::COMMA);
+        break;
+
+      case '(':
+        token = Select(Token::LPAREN);
+        break;
+
+      case ')':
+        token = Select(Token::RPAREN);
+        break;
+
+      case '[':
+        token = Select(Token::LBRACK);
+        break;
+
+      case ']':
+        token = Select(Token::RBRACK);
+        break;
+
+      case '{':
+        token = Select(Token::LBRACE);
+        break;
+
+      case '}':
+        token = Select(Token::RBRACE);
+        break;
+
+      case '?':
+        token = Select(Token::CONDITIONAL);
+        break;
+
+      case '~':
+        token = Select(Token::BIT_NOT);
+        break;
+
+      default:
+        if (kIsIdentifierStart.get(c0_)) {
+          token = ScanIdentifier();
+        } else if (IsDecimalDigit(c0_)) {
+          token = ScanNumber(false);
+        } else if (SkipWhiteSpace()) {
+          token = Token::WHITESPACE;
+        } else if (c0_ < 0) {
+          token = Token::EOS;
+        } else {
+          token = Select(Token::ILLEGAL);
+        }
+        break;
+    }
+
+    // Continue scanning for tokens as long as we're just skipping
+    // whitespace.
+  } while (token == Token::WHITESPACE);
+
+  next_.location.end_pos = source_pos();
+  next_.token = token;
+}
+
+
+void Scanner::SeekForward(int pos) {
+  source_->SeekForward(pos - 1);
+  Advance();
+  Scan();
+}
+
+
+uc32 Scanner::ScanHexEscape(uc32 c, int length) {
+  ASSERT(length <= 4);  // prevent overflow
+
+  uc32 digits[4];
+  uc32 x = 0;
+  for (int i = 0; i < length; i++) {
+    digits[i] = c0_;
+    int d = HexValue(c0_);
+    if (d < 0) {
+      // According to ECMA-262, 3rd, 7.8.4, page 18, these hex escapes
+      // should be illegal, but other JS VMs just return the
+      // non-escaped version of the original character.
+
+      // Push back digits read, except the last one (in c0_).
+      for (int j = i-1; j >= 0; j--) {
+        PushBack(digits[j]);
+      }
+      // Notice: No handling of error - treat it as "\u"->"u".
+      return c;
+    }
+    x = x * 16 + d;
+    Advance();
+  }
+
+  return x;
+}
+
+
+// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
+// ECMA-262. Other JS VMs support them.
+uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
+  uc32 x = c - '0';
+  for (int i = 0; i < length; i++) {
+    int d = c0_ - '0';
+    if (d < 0 || d > 7) break;
+    int nx = x * 8 + d;
+    if (nx >= 256) break;
+    x = nx;
+    Advance();
+  }
+  return x;
+}
+
+
+void Scanner::ScanEscape() {
+  uc32 c = c0_;
+  Advance();
+
+  // Skip escaped newlines.
+  if (kIsLineTerminator.get(c)) {
+    // Allow CR+LF newlines in multiline string literals.
+    if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
+    // Allow LF+CR newlines in multiline string literals.
+    if (IsLineFeed(c) && IsCarriageReturn(c0_)) Advance();
+    return;
+  }
+
+  switch (c) {
+    case '\'':  // fall through
+    case '"' :  // fall through
+    case '\\': break;
+    case 'b' : c = '\b'; break;
+    case 'f' : c = '\f'; break;
+    case 'n' : c = '\n'; break;
+    case 'r' : c = '\r'; break;
+    case 't' : c = '\t'; break;
+    case 'u' : c = ScanHexEscape(c, 4); break;
+    case 'v' : c = '\v'; break;
+    case 'x' : c = ScanHexEscape(c, 2); break;
+    case '0' :  // fall through
+    case '1' :  // fall through
+    case '2' :  // fall through
+    case '3' :  // fall through
+    case '4' :  // fall through
+    case '5' :  // fall through
+    case '6' :  // fall through
+    case '7' : c = ScanOctalEscape(c, 2); break;
+  }
+
+  // According to ECMA-262, 3rd, 7.8.4 (p 18ff) these
+  // should be illegal, but they are commonly handled
+  // as non-escaped characters by JS VMs.
+  AddChar(c);
+}
+
+
+Token::Value Scanner::ScanString() {
+  uc32 quote = c0_;
+  Advance();  // consume quote
+
+  StartLiteral();
+  while (c0_ != quote && c0_ >= 0 && !kIsLineTerminator.get(c0_)) {
+    uc32 c = c0_;
+    Advance();
+    if (c == '\\') {
+      if (c0_ < 0) return Token::ILLEGAL;
+      ScanEscape();
+    } else {
+      AddChar(c);
+    }
+  }
+  if (c0_ != quote) {
+    return Token::ILLEGAL;
+  }
+  TerminateLiteral();
+
+  Advance();  // consume quote
+  return Token::STRING;
+}
+
+
+Token::Value Scanner::Select(Token::Value tok) {
+  Advance();
+  return tok;
+}
+
+
+Token::Value Scanner::Select(uc32 next, Token::Value then, Token::Value else_) {
+  Advance();
+  if (c0_ == next) {
+    Advance();
+    return then;
+  } else {
+    return else_;
+  }
+}
+
+
+// Returns true if any decimal digits were scanned, returns false otherwise.
+void Scanner::ScanDecimalDigits() {
+  while (IsDecimalDigit(c0_))
+    AddCharAdvance();
+}
+
+
+Token::Value Scanner::ScanNumber(bool seen_period) {
+  ASSERT(IsDecimalDigit(c0_));  // the first digit of the number or the fraction
+
+  enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
+
+  StartLiteral();
+  if (seen_period) {
+    // we have already seen a decimal point of the float
+    AddChar('.');
+    ScanDecimalDigits();  // we know we have at least one digit
+
+  } else {
+    // if the first character is '0' we must check for octals and hex
+    if (c0_ == '0') {
+      AddCharAdvance();
+
+      // either 0, 0exxx, 0Exxx, 0.xxx, an octal number, or a hex number
+      if (c0_ == 'x' || c0_ == 'X') {
+        // hex number
+        kind = HEX;
+        AddCharAdvance();
+        if (!IsHexDigit(c0_))
+          // we must have at least one hex digit after 'x'/'X'
+          return Token::ILLEGAL;
+        while (IsHexDigit(c0_))
+          AddCharAdvance();
+
+      } else if ('0' <= c0_ && c0_ <= '7') {
+        // (possible) octal number
+        kind = OCTAL;
+        while (true) {
+          if (c0_ == '8' || c0_ == '9') {
+            kind = DECIMAL;
+            break;
+          }
+          if (c0_  < '0' || '7'  < c0_) break;
+          AddCharAdvance();
+        }
+      }
+    }
+
+    // Parse decimal digits and allow trailing fractional part.
+    if (kind == DECIMAL) {
+      ScanDecimalDigits();  // optional
+      if (c0_ == '.') {
+        AddCharAdvance();
+        ScanDecimalDigits();  // optional
+      }
+    }
+  }
+
+  // scan exponent, if any
+  if (c0_ == 'e' || c0_ == 'E') {
+    ASSERT(kind != HEX);  // 'e'/'E' must be scanned as part of the hex number
+    if (kind == OCTAL) return Token::ILLEGAL;  // no exponent for octals allowed
+    // scan exponent
+    AddCharAdvance();
+    if (c0_ == '+' || c0_ == '-')
+      AddCharAdvance();
+    if (!IsDecimalDigit(c0_))
+      // we must have at least one decimal digit after 'e'/'E'
+      return Token::ILLEGAL;
+    ScanDecimalDigits();
+  }
+  TerminateLiteral();
+
+  // The source character immediately following a numeric literal must
+  // not be an identifier start or a decimal digit; see ECMA-262
+  // section 7.8.3, page 17 (note that we read only one decimal digit
+  // if the value is 0).
+  if (IsDecimalDigit(c0_) || kIsIdentifierStart.get(c0_))
+    return Token::ILLEGAL;
+
+  return Token::NUMBER;
+}
+
+
+uc32 Scanner::ScanIdentifierUnicodeEscape() {
+  Advance();
+  if (c0_ != 'u') return unibrow::Utf8::kBadChar;
+  Advance();
+  uc32 c = ScanHexEscape('u', 4);
+  // We do not allow a unicode escape sequence to start another
+  // unicode escape sequence.
+  if (c == '\\') return unibrow::Utf8::kBadChar;
+  return c;
+}
+
+
+Token::Value Scanner::ScanIdentifier() {
+  ASSERT(kIsIdentifierStart.get(c0_));
+  bool has_escapes = false;
+
+  StartLiteral();
+  // Scan identifier start character.
+  if (c0_ == '\\') {
+    has_escapes = true;
+    uc32 c = ScanIdentifierUnicodeEscape();
+    // Only allow legal identifier start characters.
+    if (!kIsIdentifierStart.get(c)) return Token::ILLEGAL;
+    AddChar(c);
+  } else {
+    AddChar(c0_);
+    Advance();
+  }
+
+  // Scan the rest of the identifier characters.
+  while (kIsIdentifierPart.get(c0_)) {
+    if (c0_ == '\\') {
+      has_escapes = true;
+      uc32 c = ScanIdentifierUnicodeEscape();
+      // Only allow legal identifier part characters.
+      if (!kIsIdentifierPart.get(c)) return Token::ILLEGAL;
+      AddChar(c);
+    } else {
+      AddChar(c0_);
+      Advance();
+    }
+  }
+  TerminateLiteral();
+
+  // We don't have any 1-letter keywords (this is probably a common case).
+  if ((next_.literal_end - next_.literal_pos) == 1) {
+    return Token::IDENTIFIER;
+  }
+
+  // If the identifier contains unicode escapes, it must not be
+  // resolved to a keyword.
+  if (has_escapes) {
+    return Token::IDENTIFIER;
+  }
+
+  return Token::Lookup(&literals_.data()[next_.literal_pos]);
+}
+
+
+
+bool Scanner::IsIdentifier(unibrow::CharacterStream* buffer) {
+  // Checks whether the buffer contains an identifier (no escape).
+  if (!buffer->has_more()) return false;
+  if (!kIsIdentifierStart.get(buffer->GetNext())) return false;
+  while (buffer->has_more()) {
+    if (!kIsIdentifierPart.get(buffer->GetNext())) return false;
+  }
+  return true;
+}
+
+
+bool Scanner::ScanRegExpPattern(bool seen_equal) {
+  // Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
+  bool in_character_class = false;
+
+  // Previous token is either '/' or '/=', in the second case, the
+  // pattern starts at =.
+  next_.location.beg_pos = source_pos() - (seen_equal ? 2 : 1);
+  next_.location.end_pos = source_pos() - (seen_equal ? 1 : 0);
+
+  // Scan regular expression body: According to ECMA-262, 3rd, 7.8.5,
+  // the scanner should pass uninterpreted bodies to the RegExp
+  // constructor.
+  StartLiteral();
+  if (seen_equal)
+    AddChar('=');
+
+  while (c0_ != '/' || in_character_class) {
+    if (kIsLineTerminator.get(c0_) || c0_ < 0)
+      return false;
+    if (c0_ == '\\') {  // escaped character
+      AddCharAdvance();
+      if (kIsLineTerminator.get(c0_) || c0_ < 0)
+        return false;
+      AddCharAdvance();
+    } else {  // unescaped character
+      if (c0_ == '[')
+        in_character_class = true;
+      if (c0_ == ']')
+        in_character_class = false;
+      AddCharAdvance();
+    }
+  }
+  Advance();  // consume '/'
+
+  TerminateLiteral();
+
+  return true;
+}
+
+bool Scanner::ScanRegExpFlags() {
+  // Scan regular expression flags.
+  StartLiteral();
+  while (kIsIdentifierPart.get(c0_)) {
+    if (c0_ == '\\') {
+      uc32 c = ScanIdentifierUnicodeEscape();
+      if (c != static_cast<uc32>(unibrow::Utf8::kBadChar)) {
+        // We allow any escaped character, unlike the restriction on
+        // IdentifierPart when it is used to build an IdentifierName.
+        AddChar(c);
+        continue;
+      }
+    }
+    AddCharAdvance();
+  }
+  TerminateLiteral();
+
+  next_.location.end_pos = source_pos() - 1;
+  return true;
+}
+
+} }  // namespace v8::internal
diff --git a/src/scanner.h b/src/scanner.h
new file mode 100644
index 0000000..a201d0e
--- /dev/null
+++ b/src/scanner.h
@@ -0,0 +1,291 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SCANNER_H_
+#define V8_SCANNER_H_
+
+#include "token.h"
+#include "char-predicates-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+class UTF8Buffer {
+ public:
+  UTF8Buffer();
+  ~UTF8Buffer();
+
+  void AddChar(uc32 c) {
+    if (cursor_ <= limit_ &&
+        static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
+      *cursor_++ = static_cast<char>(c);
+    } else {
+      AddCharSlow(c);
+    }
+  }
+
+  void Reset() { cursor_ = data_; }
+  int pos() const { return cursor_ - data_; }
+  char* data() const { return data_; }
+
+ private:
+  char* data_;
+  char* cursor_;
+  char* limit_;
+
+  int Capacity() const {
+    return (limit_ - data_) + unibrow::Utf8::kMaxEncodedSize;
+  }
+
+  static char* ComputeLimit(char* data, int capacity) {
+    return (data + capacity) - unibrow::Utf8::kMaxEncodedSize;
+  }
+
+  void AddCharSlow(uc32 c);
+};
+
+
+class UTF16Buffer {
+ public:
+  UTF16Buffer();
+  virtual ~UTF16Buffer() {}
+
+  virtual void PushBack(uc32 ch) = 0;
+  // returns a value < 0 when the buffer end is reached
+  virtual uc32 Advance() = 0;
+  virtual void SeekForward(int pos) = 0;
+
+  int pos() const { return pos_; }
+  int size() const { return size_; }
+  Handle<String> SubString(int start, int end);
+
+ protected:
+  Handle<String> data_;
+  int pos_;
+  int size_;
+};
+
+
+class CharacterStreamUTF16Buffer: public UTF16Buffer {
+ public:
+  CharacterStreamUTF16Buffer();
+  virtual ~CharacterStreamUTF16Buffer() {}
+  void Initialize(Handle<String> data, unibrow::CharacterStream* stream);
+  virtual void PushBack(uc32 ch);
+  virtual uc32 Advance();
+  virtual void SeekForward(int pos);
+
+ private:
+  List<uc32> pushback_buffer_;
+  uc32 last_;
+  unibrow::CharacterStream* stream_;
+
+  List<uc32>* pushback_buffer() { return &pushback_buffer_; }
+};
+
+
+class TwoByteStringUTF16Buffer: public UTF16Buffer {
+ public:
+  TwoByteStringUTF16Buffer();
+  virtual ~TwoByteStringUTF16Buffer() {}
+  void Initialize(Handle<ExternalTwoByteString> data);
+  virtual void PushBack(uc32 ch);
+  virtual uc32 Advance();
+  virtual void SeekForward(int pos);
+
+ private:
+  const uint16_t* raw_data_;
+};
+
+
+class Scanner {
+ public:
+
+  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+
+  // Construction
+  explicit Scanner(bool is_pre_parsing);
+
+  // Initialize the Scanner to scan source:
+  void Init(Handle<String> source,
+            unibrow::CharacterStream* stream,
+            int position);
+
+  // Returns the next token.
+  Token::Value Next();
+
+  // One token look-ahead (past the token returned by Next()).
+  Token::Value peek() const  { return next_.token; }
+
+  // Returns true if there was a line terminator before the peek'ed token.
+  bool has_line_terminator_before_next() const {
+    return has_line_terminator_before_next_;
+  }
+
+  struct Location {
+    Location(int b, int e) : beg_pos(b), end_pos(e) { }
+    Location() : beg_pos(0), end_pos(0) { }
+    int beg_pos;
+    int end_pos;
+  };
+
+  // Returns the location information for the current token
+  // (the token returned by Next()).
+  Location location() const  { return current_.location; }
+  Location peek_location() const  { return next_.location; }
+
+  // Returns the literal string, if any, for the current token (the
+  // token returned by Next()). The string is 0-terminated and in
+  // UTF-8 format; they may contain 0-characters. Literal strings are
+  // collected for identifiers, strings, and numbers.
+  const char* literal_string() const {
+    return &literals_.data()[current_.literal_pos];
+  }
+  int literal_length() const {
+    return current_.literal_end - current_.literal_pos;
+  }
+
+  Vector<const char> next_literal() const {
+    return Vector<const char>(next_literal_string(), next_literal_length());
+  }
+
+  // Returns the literal string for the next token (the token that
+  // would be returned if Next() were called).
+  const char* next_literal_string() const {
+    return &literals_.data()[next_.literal_pos];
+  }
+  // Returns the length of the next token (that would be returned if
+  // Next() were called).
+  int next_literal_length() const {
+    return next_.literal_end - next_.literal_pos;
+  }
+
+  // Scans the input as a regular expression pattern, previous
+  // character(s) must be /(=). Returns true if a pattern is scanned.
+  bool ScanRegExpPattern(bool seen_equal);
+  // Returns true if regexp flags are scanned (always since flags can
+  // be empty).
+  bool ScanRegExpFlags();
+
+  // Seek forward to the given position.  This operation does not
+  // work in general, for instance when there are pushed back
+  // characters, but works for seeking forward until simple delimiter
+  // tokens, which is what it is used for.
+  void SeekForward(int pos);
+
+  Handle<String> SubString(int start_pos, int end_pos);
+  bool stack_overflow() { return stack_overflow_; }
+
+  static StaticResource<Utf8Decoder>* utf8_decoder() { return &utf8_decoder_; }
+
+  // Tells whether the buffer contains an identifier (no escapes).
+  // Used for checking if a property name is an identifier.
+  static bool IsIdentifier(unibrow::CharacterStream* buffer);
+
+  static unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
+  static unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
+  static unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
+  static unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+
+  static const int kCharacterLookaheadBufferSize = 1;
+
+ private:
+  CharacterStreamUTF16Buffer char_stream_buffer_;
+  TwoByteStringUTF16Buffer two_byte_string_buffer_;
+
+  // Source.
+  UTF16Buffer* source_;
+  int position_;
+
+  // Buffer to hold literal values (identifiers, strings, numbers)
+  // using 0-terminated UTF-8 encoding.
+  UTF8Buffer literals_;
+
+  bool stack_overflow_;
+  static StaticResource<Utf8Decoder> utf8_decoder_;
+
+  // One Unicode character look-ahead; c0_ < 0 at the end of the input.
+  uc32 c0_;
+
+  // The current and look-ahead token.
+  struct TokenDesc {
+    Token::Value token;
+    Location location;
+    int literal_pos, literal_end;
+  };
+
+  TokenDesc current_;  // desc for current token (as returned by Next())
+  TokenDesc next_;     // desc for next token (one token look-ahead)
+  bool has_line_terminator_before_next_;
+  bool is_pre_parsing_;
+
+  // Literal buffer support
+  void StartLiteral();
+  void AddChar(uc32 ch);
+  void AddCharAdvance();
+  void TerminateLiteral();
+
+  // Low-level scanning support.
+  void Advance() { c0_ = source_->Advance(); }
+  void PushBack(uc32 ch) {
+    source_->PushBack(ch);
+    c0_ = ch;
+  }
+
+  bool SkipWhiteSpace();
+  Token::Value SkipSingleLineComment();
+  Token::Value SkipMultiLineComment();
+
+  inline Token::Value Select(Token::Value tok);
+  inline Token::Value Select(uc32 next, Token::Value then, Token::Value else_);
+
+  void Scan();
+  void ScanDecimalDigits();
+  Token::Value ScanNumber(bool seen_period);
+  Token::Value ScanIdentifier();
+  uc32 ScanHexEscape(uc32 c, int length);
+  uc32 ScanOctalEscape(uc32 c, int length);
+  void ScanEscape();
+  Token::Value ScanString();
+
+  // Scans a possible HTML comment -- begins with '<!'.
+  Token::Value ScanHtmlComment();
+
+  // Return the current source position.
+  int source_pos() {
+    return source_->pos() - kCharacterLookaheadBufferSize + position_;
+  }
+
+  // Decodes a unicode escape-sequence which is part of an identifier.
+  // If the escape sequence cannot be decoded the result is kBadRune.
+  uc32 ScanIdentifierUnicodeEscape();
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_SCANNER_H_
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
new file mode 100644
index 0000000..8a237fd
--- /dev/null
+++ b/src/scopeinfo.cc
@@ -0,0 +1,650 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "scopeinfo.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+static int CompareLocal(Variable* const* v, Variable* const* w) {
+  Slot* s = (*v)->slot();
+  Slot* t = (*w)->slot();
+  // We may have rewritten parameters (that are in the arguments object)
+  // and which may have a NULL slot... - find a better solution...
+  int x = (s != NULL ? s->index() : 0);
+  int y = (t != NULL ? t->index() : 0);
+  // Consider sorting them according to type as well?
+  return x - y;
+}
+
+
+template<class Allocator>
+ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
+    : function_name_(Factory::empty_symbol()),
+      calls_eval_(scope->calls_eval()),
+      parameters_(scope->num_parameters()),
+      stack_slots_(scope->num_stack_slots()),
+      context_slots_(scope->num_heap_slots()),
+      context_modes_(scope->num_heap_slots()) {
+  // Add parameters.
+  for (int i = 0; i < scope->num_parameters(); i++) {
+    ASSERT(parameters_.length() == i);
+    parameters_.Add(scope->parameter(i)->name());
+  }
+
+  // Add stack locals and collect heap locals.
+  // We are assuming that the locals' slots are allocated in
+  // increasing order, so we can simply add them to the
+  // ScopeInfo lists. However, due to usage analysis, this is
+  // not true for context-allocated locals: Some of them
+  // may be parameters which are allocated before the
+  // non-parameter locals. When the non-parameter locals are
+  // sorted according to usage, the allocated slot indices may
+  // not be in increasing order with the variable list anymore.
+  // Thus, we first collect the context-allocated locals, and then
+  // sort them by context slot index before adding them to the
+  // ScopeInfo list.
+  List<Variable*, Allocator> locals(32);  // 32 is a wild guess
+  ASSERT(locals.is_empty());
+  scope->CollectUsedVariables(&locals);
+  locals.Sort(&CompareLocal);
+
+  List<Variable*, Allocator> heap_locals(locals.length());
+  for (int i = 0; i < locals.length(); i++) {
+    Variable* var = locals[i];
+    if (var->var_uses()->is_used()) {
+      Slot* slot = var->slot();
+      if (slot != NULL) {
+        switch (slot->type()) {
+          case Slot::PARAMETER:
+            // explicitly added to parameters_ above - ignore
+            break;
+
+          case Slot::LOCAL:
+            ASSERT(stack_slots_.length() == slot->index());
+            stack_slots_.Add(var->name());
+            break;
+
+          case Slot::CONTEXT:
+            heap_locals.Add(var);
+            break;
+
+          case Slot::LOOKUP:
+          case Slot::GLOBAL:
+            // these are currently not used
+            UNREACHABLE();
+            break;
+        }
+      }
+    }
+  }
+
+  // Add heap locals.
+  if (scope->num_heap_slots() > 0) {
+    // Add user-defined slots.
+    for (int i = 0; i < heap_locals.length(); i++) {
+      ASSERT(heap_locals[i]->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+             context_slots_.length());
+      ASSERT(heap_locals[i]->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+             context_modes_.length());
+      context_slots_.Add(heap_locals[i]->name());
+      context_modes_.Add(heap_locals[i]->mode());
+    }
+
+  } else {
+    ASSERT(heap_locals.length() == 0);
+  }
+
+  // Add the function context slot, if present.
+  // For now, this must happen at the very end because of the
+  // ordering of the scope info slots and the respective slot indices.
+  if (scope->is_function_scope()) {
+    Variable* var = scope->function();
+    if (var != NULL &&
+        var->var_uses()->is_used() &&
+        var->slot()->type() == Slot::CONTEXT) {
+      function_name_ = var->name();
+      // Note that we must not find the function name in the context slot
+      // list - instead it must be handled separately in the
+      // Contexts::Lookup() function. Thus record an empty symbol here so we
+      // get the correct number of context slots.
+      ASSERT(var->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+             context_slots_.length());
+      ASSERT(var->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+             context_modes_.length());
+      context_slots_.Add(Factory::empty_symbol());
+      context_modes_.Add(Variable::INTERNAL);
+    }
+  }
+}
+
+
+// Encoding format in the Code object:
+//
+// - function name
+//
+// - number of variables in the context object (smi) (= function context
+//   slot index + 1)
+// - list of pairs (name, Var mode) of context-allocated variables (starting
+//   with context slot 0)
+// - NULL (sentinel)
+//
+// - number of parameters (smi)
+// - list of parameter names (starting with parameter 0 first)
+// - NULL (sentinel)
+//
+// - number of variables on the stack (smi)
+// - list of names of stack-allocated variables (starting with stack slot 0)
+// - NULL (sentinel)
+
+// The ScopeInfo representation could be simplified and the ScopeInfo
+// re-implemented (with almost the same interface). Here is a
+// suggestion for the new format:
+//
+// - have a single list with all variable names (parameters, stack locals,
+//   context locals), followed by a list of non-Object* values containing
+//   the variables information (what kind, index, attributes)
+// - searching the linear list of names is fast and yields an index into the
+//   list if the variable name is found
+// - that list index is then used to find the variable information in the
+//   subsequent list
+// - the list entries don't have to be in any particular order, so all the
+//   current sorting business can go away
+// - the ScopeInfo lookup routines can be reduced to perhaps a single lookup
+//   which returns all information at once
+// - when gathering the information from a Scope, we only need to iterate
+//   through the local variables (parameters and context info is already
+//   present)
+
+
+static inline Object** ReadInt(Object** p, int* x) {
+  *x = (reinterpret_cast<Smi*>(*p++))->value();
+  return p;
+}
+
+
+static inline Object** ReadBool(Object** p, bool* x) {
+  *x = (reinterpret_cast<Smi*>(*p++))->value() != 0;
+  return p;
+}
+
+
+static inline Object** ReadSymbol(Object** p, Handle<String>* s) {
+  *s = Handle<String>(reinterpret_cast<String*>(*p++));
+  return p;
+}
+
+
+static inline Object** ReadSentinel(Object** p) {
+  ASSERT(*p == NULL);
+  return p + 1;
+}
+
+
+template <class Allocator>
+static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) {
+  ASSERT(list->is_empty());
+  int n;
+  p = ReadInt(p, &n);
+  while (n-- > 0) {
+    Handle<String> s;
+    p = ReadSymbol(p, &s);
+    list->Add(s);
+  }
+  return ReadSentinel(p);
+}
+
+
+template <class Allocator>
+static Object** ReadList(Object** p,
+                         List<Handle<String>, Allocator>* list,
+                         List<Variable::Mode, Allocator>* modes) {
+  ASSERT(list->is_empty());
+  int n;
+  p = ReadInt(p, &n);
+  while (n-- > 0) {
+    Handle<String> s;
+    int m;
+    p = ReadSymbol(p, &s);
+    p = ReadInt(p, &m);
+    list->Add(s);
+    modes->Add(static_cast<Variable::Mode>(m));
+  }
+  return ReadSentinel(p);
+}
+
+
+template<class Allocator>
+ScopeInfo<Allocator>::ScopeInfo(Code* code)
+  : function_name_(Factory::empty_symbol()),
+    parameters_(4),
+    stack_slots_(8),
+    context_slots_(8),
+    context_modes_(8) {
+  if (code == NULL || code->sinfo_size() == 0) return;
+
+  Object** p0 = &Memory::Object_at(code->sinfo_start());
+  Object** p = p0;
+  p = ReadSymbol(p, &function_name_);
+  p = ReadBool(p, &calls_eval_);
+  p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
+  p = ReadList<Allocator>(p, &parameters_);
+  p = ReadList<Allocator>(p, &stack_slots_);
+  ASSERT((p - p0) * kPointerSize == code->sinfo_size());
+}
+
+
+static inline Object** WriteInt(Object** p, int x) {
+  *p++ = Smi::FromInt(x);
+  return p;
+}
+
+
+static inline Object** WriteBool(Object** p, bool b) {
+  *p++ = Smi::FromInt(b ? 1 : 0);
+  return p;
+}
+
+
+static inline Object** WriteSymbol(Object** p, Handle<String> s) {
+  *p++ = *s;
+  return p;
+}
+
+
+static inline Object** WriteSentinel(Object** p) {
+  *p++ = NULL;
+  return p;
+}
+
+
+template <class Allocator>
+static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) {
+  const int n = list->length();
+  p = WriteInt(p, n);
+  for (int i = 0; i < n; i++) {
+    p = WriteSymbol(p, list->at(i));
+  }
+  return WriteSentinel(p);
+}
+
+
+template <class Allocator>
+static Object** WriteList(Object** p,
+                          List<Handle<String>, Allocator>* list,
+                          List<Variable::Mode, Allocator>* modes) {
+  const int n = list->length();
+  p = WriteInt(p, n);
+  for (int i = 0; i < n; i++) {
+    p = WriteSymbol(p, list->at(i));
+    p = WriteInt(p, modes->at(i));
+  }
+  return WriteSentinel(p);
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::Serialize(Code* code) {
+  // function name, calls eval, length & sentinel for 3 tables:
+  const int extra_slots = 1 + 1 + 2 * 3;
+  int size = (extra_slots +
+              context_slots_.length() * 2 +
+              parameters_.length() +
+              stack_slots_.length()) * kPointerSize;
+
+  if (code != NULL) {
+    CHECK(code->sinfo_size() == size);
+    Object** p0 = &Memory::Object_at(code->sinfo_start());
+    Object** p = p0;
+    p = WriteSymbol(p, function_name_);
+    p = WriteBool(p, calls_eval_);
+    p = WriteList(p, &context_slots_, &context_modes_);
+    p = WriteList(p, &parameters_);
+    p = WriteList(p, &stack_slots_);
+    ASSERT((p - p0) * kPointerSize == size);
+  }
+
+  return size;
+}
+
+
+template<class Allocator>
+void ScopeInfo<Allocator>::IterateScopeInfo(Code* code, ObjectVisitor* v) {
+  Object** start = &Memory::Object_at(code->sinfo_start());
+  Object** end = &Memory::Object_at(code->sinfo_start() + code->sinfo_size());
+  v->VisitPointers(start, end);
+}
+
+
+static Object** ContextEntriesAddr(Code* code) {
+  ASSERT(code->sinfo_size() > 0);
+  // +2 for function name and calls eval:
+  return &Memory::Object_at(code->sinfo_start()) + 2;
+}
+
+
+static Object** ParameterEntriesAddr(Code* code) {
+  ASSERT(code->sinfo_size() > 0);
+  Object** p = ContextEntriesAddr(code);
+  int n;  // number of context slots;
+  p = ReadInt(p, &n);
+  return p + n*2 + 1;  // *2 for pairs, +1 for sentinel
+}
+
+
+static Object** StackSlotEntriesAddr(Code* code) {
+  ASSERT(code->sinfo_size() > 0);
+  Object** p = ParameterEntriesAddr(code);
+  int n;  // number of parameter slots;
+  p = ReadInt(p, &n);
+  return p + n + 1;  // +1 for sentinel
+}
+
+
+template<class Allocator>
+bool ScopeInfo<Allocator>::CallsEval(Code* code) {
+  if (code->sinfo_size() > 0) {
+    // +1 for function name:
+    Object** p = &Memory::Object_at(code->sinfo_start()) + 1;
+    bool calls_eval;
+    p = ReadBool(p, &calls_eval);
+    return calls_eval;
+  }
+  return true;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::NumberOfStackSlots(Code* code) {
+  if (code->sinfo_size() > 0) {
+    Object** p = StackSlotEntriesAddr(code);
+    int n;  // number of stack slots;
+    ReadInt(p, &n);
+    return n;
+  }
+  return 0;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::NumberOfContextSlots(Code* code) {
+  if (code->sinfo_size() > 0) {
+    Object** p = ContextEntriesAddr(code);
+    int n;  // number of context slots;
+    ReadInt(p, &n);
+    return n + Context::MIN_CONTEXT_SLOTS;
+  }
+  return 0;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) {
+  ASSERT(name->IsSymbol());
+  if (code->sinfo_size() > 0) {
+    // Loop below depends on the NULL sentinel after the stack slot names.
+    ASSERT(NumberOfStackSlots(code) > 0 ||
+           *(StackSlotEntriesAddr(code) + 1) == NULL);
+    // slots start after length entry
+    Object** p0 = StackSlotEntriesAddr(code) + 1;
+    Object** p = p0;
+    while (*p != NULL) {
+      if (*p == name) return p - p0;
+      p++;
+    }
+  }
+  return -1;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
+                                           String* name,
+                                           Variable::Mode* mode) {
+  ASSERT(name->IsSymbol());
+  int result = ContextSlotCache::Lookup(code, name, mode);
+  if (result != ContextSlotCache::kNotFound) return result;
+  if (code->sinfo_size() > 0) {
+    // Loop below depends on the NULL sentinel after the context slot names.
+    ASSERT(NumberOfContextSlots(code) >= Context::MIN_CONTEXT_SLOTS ||
+           *(ContextEntriesAddr(code) + 1) == NULL);
+
+    // slots start after length entry
+    Object** p0 = ContextEntriesAddr(code) + 1;
+    Object** p = p0;
+    // contexts may have no variable slots (in the presence of eval()).
+    while (*p != NULL) {
+      if (*p == name) {
+        ASSERT(((p - p0) & 1) == 0);
+        int v;
+        ReadInt(p + 1, &v);
+        Variable::Mode mode_value = static_cast<Variable::Mode>(v);
+        if (mode != NULL) *mode = mode_value;
+        result = ((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
+        ContextSlotCache::Update(code, name, mode_value, result);
+        return result;
+      }
+      p += 2;
+    }
+  }
+  ContextSlotCache::Update(code, name, Variable::INTERNAL, -1);
+  return -1;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::ParameterIndex(Code* code, String* name) {
+  ASSERT(name->IsSymbol());
+  if (code->sinfo_size() > 0) {
+    // We must read parameters from the end since for
+    // multiply declared parameters the value of the
+    // last declaration of that parameter is used
+    // inside a function (and thus we need to look
+    // at the last index). Was bug# 1110337.
+    //
+    // Eventually, we should only register such parameters
+    // once, with corresponding index. This requires a new
+    // implementation of the ScopeInfo code. See also other
+    // comments in this file regarding this.
+    Object** p = ParameterEntriesAddr(code);
+    int n;  // number of parameters
+    Object** p0 = ReadInt(p, &n);
+    p = p0 + n;
+    while (p > p0) {
+      p--;
+      if (*p == name) return p - p0;
+    }
+  }
+  return -1;
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::FunctionContextSlotIndex(Code* code, String* name) {
+  ASSERT(name->IsSymbol());
+  if (code->sinfo_size() > 0) {
+    Object** p = &Memory::Object_at(code->sinfo_start());
+    if (*p == name) {
+      p = ContextEntriesAddr(code);
+      int n;  // number of context slots
+      ReadInt(p, &n);
+      ASSERT(n != 0);
+      // The function context slot is the last entry.
+      return n + Context::MIN_CONTEXT_SLOTS - 1;
+    }
+  }
+  return -1;
+}
+
+
+template<class Allocator>
+Handle<String> ScopeInfo<Allocator>::LocalName(int i) const {
+  // A local variable can be allocated either on the stack or in the context.
+  // For variables allocated in the context they are always preceded by the
+  // number Context::MIN_CONTEXT_SLOTS number of fixed allocated slots in the
+  // context.
+  if (i < number_of_stack_slots()) {
+    return stack_slot_name(i);
+  } else {
+    return context_slot_name(i - number_of_stack_slots() +
+                             Context::MIN_CONTEXT_SLOTS);
+  }
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::NumberOfLocals() const {
+  int number_of_locals = number_of_stack_slots();
+  if (number_of_context_slots() > 0) {
+    ASSERT(number_of_context_slots() >= Context::MIN_CONTEXT_SLOTS);
+    number_of_locals += number_of_context_slots() - Context::MIN_CONTEXT_SLOTS;
+  }
+  return number_of_locals;
+}
+
+
+int ContextSlotCache::Hash(Code* code, String* name) {
+  // Uses only lower 32 bits if pointers are larger.
+  uintptr_t addr_hash =
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(code)) >> 2;
+  return (addr_hash ^ name->Hash()) % kLength;
+}
+
+
+int ContextSlotCache::Lookup(Code* code,
+                             String* name,
+                             Variable::Mode* mode) {
+  int index = Hash(code, name);
+  Key& key = keys_[index];
+  if ((key.code == code) && key.name->Equals(name)) {
+    Value result(values_[index]);
+    if (mode != NULL) *mode = result.mode();
+    return result.index() + kNotFound;
+  }
+  return kNotFound;
+}
+
+
+void ContextSlotCache::Update(Code* code,
+                              String* name,
+                              Variable::Mode mode,
+                              int slot_index) {
+  String* symbol;
+  ASSERT(slot_index > kNotFound);
+  if (Heap::LookupSymbolIfExists(name, &symbol)) {
+    int index = Hash(code, symbol);
+    Key& key = keys_[index];
+    key.code = code;
+    key.name = symbol;
+    // Please note value only takes a uint as index.
+    values_[index] = Value(mode, slot_index - kNotFound).raw();
+#ifdef DEBUG
+    ValidateEntry(code, name, mode, slot_index);
+#endif
+  }
+}
+
+
+void ContextSlotCache::Clear() {
+  for (int index = 0; index < kLength; index++) keys_[index].code = NULL;
+}
+
+
+ContextSlotCache::Key ContextSlotCache::keys_[ContextSlotCache::kLength];
+
+
+uint32_t ContextSlotCache::values_[ContextSlotCache::kLength];
+
+
+#ifdef DEBUG
+
+void ContextSlotCache::ValidateEntry(Code* code,
+                                     String* name,
+                                     Variable::Mode mode,
+                                     int slot_index) {
+  String* symbol;
+  if (Heap::LookupSymbolIfExists(name, &symbol)) {
+    int index = Hash(code, name);
+    Key& key = keys_[index];
+    ASSERT(key.code == code);
+    ASSERT(key.name->Equals(name));
+    Value result(values_[index]);
+    ASSERT(result.mode() == mode);
+    ASSERT(result.index() + kNotFound == slot_index);
+  }
+}
+
+
+template <class Allocator>
+static void PrintList(const char* list_name,
+                      int nof_internal_slots,
+                      List<Handle<String>, Allocator>& list) {
+  if (list.length() > 0) {
+    PrintF("\n  // %s\n", list_name);
+    if (nof_internal_slots > 0) {
+      PrintF("  %2d - %2d [internal slots]\n", 0 , nof_internal_slots - 1);
+    }
+    for (int i = 0; i < list.length(); i++) {
+      PrintF("  %2d ", i + nof_internal_slots);
+      list[i]->ShortPrint();
+      PrintF("\n");
+    }
+  }
+}
+
+
+template<class Allocator>
+void ScopeInfo<Allocator>::Print() {
+  PrintF("ScopeInfo ");
+  if (function_name_->length() > 0)
+    function_name_->ShortPrint();
+  else
+    PrintF("/* no function name */");
+  PrintF("{");
+
+  PrintList<Allocator>("parameters", 0, parameters_);
+  PrintList<Allocator>("stack slots", 0, stack_slots_);
+  PrintList<Allocator>("context slots", Context::MIN_CONTEXT_SLOTS,
+                       context_slots_);
+
+  PrintF("}\n");
+}
+#endif  // DEBUG
+
+
+// Make sure the classes get instantiated by the template system.
+template class ScopeInfo<FreeStoreAllocationPolicy>;
+template class ScopeInfo<PreallocatedStorage>;
+template class ScopeInfo<ZoneListAllocationPolicy>;
+
+} }  // namespace v8::internal
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
new file mode 100644
index 0000000..28d169a
--- /dev/null
+++ b/src/scopeinfo.h
@@ -0,0 +1,236 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SCOPEINFO_H_
+#define V8_SCOPEINFO_H_
+
+#include "variables.h"
+
+namespace v8 {
+namespace internal {
+
+// Scope information represents information about a functions's
+// scopes (currently only one, because we don't do any inlining)
+// and the allocation of the scope's variables. Scope information
+// is stored in a compressed form with Code objects and is used
+// at runtime (stack dumps, deoptimization, etc.).
+//
+// Historical note: In other VMs built by this team, ScopeInfo was
+// usually called DebugInfo since the information was used (among
+// other things) for on-demand debugging (Self, Smalltalk). However,
+// DebugInfo seems misleading, since this information is primarily used
+// in debugging-unrelated contexts.
+
+// Forward defined as
+// template <class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
+template<class Allocator>
+class ScopeInfo BASE_EMBEDDED {
+ public:
+  // Create a ScopeInfo instance from a scope.
+  explicit ScopeInfo(Scope* scope);
+
+  // Create a ScopeInfo instance from a Code object.
+  explicit ScopeInfo(Code* code);
+
+  // Write the ScopeInfo data into a Code object, and returns the
+  // amount of space that was needed. If no Code object is provided
+  // (NULL handle), Serialize() only returns the amount of space needed.
+  //
+  // This operations requires that the Code object has the correct amount
+  // of space for the ScopeInfo data; otherwise the operation fails (fatal
+  // error). Any existing scope info in the Code object is simply overwritten.
+  int Serialize(Code* code);
+
+  // Garbage collection support for scope info embedded in Code objects.
+  // This code is in ScopeInfo because only here we should have to know
+  // about the encoding.
+  static void IterateScopeInfo(Code* code, ObjectVisitor* v);
+
+
+  // --------------------------------------------------------------------------
+  // Lookup
+
+  Handle<String> function_name() const  { return function_name_; }
+
+  Handle<String> parameter_name(int i) const  { return parameters_[i]; }
+  int number_of_parameters() const  { return parameters_.length(); }
+
+  Handle<String> stack_slot_name(int i) const  { return stack_slots_[i]; }
+  int number_of_stack_slots() const  { return stack_slots_.length(); }
+
+  Handle<String> context_slot_name(int i) const {
+    return context_slots_[i - Context::MIN_CONTEXT_SLOTS];
+  }
+  int number_of_context_slots() const {
+    int l = context_slots_.length();
+    return l == 0 ? 0 : l + Context::MIN_CONTEXT_SLOTS;
+  }
+
+  Handle<String> LocalName(int i) const;
+  int NumberOfLocals() const;
+
+  // --------------------------------------------------------------------------
+  // The following functions provide quick access to scope info details
+  // for runtime routines w/o the need to explicitly create a ScopeInfo
+  // object.
+  //
+  // ScopeInfo is the only class which should have to know about the
+  // encoding of it's information in a Code object, which is why these
+  // functions are in this class.
+
+  // Does this scope call eval.
+  static bool CallsEval(Code* code);
+
+  // Return the number of stack slots for code.
+  static int NumberOfStackSlots(Code* code);
+
+  // Return the number of context slots for code.
+  static int NumberOfContextSlots(Code* code);
+
+  // Lookup support for scope info embedded in Code objects. Returns
+  // the stack slot index for a given slot name if the slot is
+  // present; otherwise returns a value < 0. The name must be a symbol
+  // (canonicalized).
+  static int StackSlotIndex(Code* code, String* name);
+
+  // Lookup support for scope info embedded in Code objects. Returns the
+  // context slot index for a given slot name if the slot is present; otherwise
+  // returns a value < 0. The name must be a symbol (canonicalized).
+  // If the slot is present and mode != NULL, sets *mode to the corresponding
+  // mode for that variable.
+  static int ContextSlotIndex(Code* code, String* name, Variable::Mode* mode);
+
+  // Lookup support for scope info embedded in Code objects. Returns the
+  // parameter index for a given parameter name if the parameter is present;
+  // otherwise returns a value < 0. The name must be a symbol (canonicalized).
+  static int ParameterIndex(Code* code, String* name);
+
+  // Lookup support for scope info embedded in Code objects. Returns the
+  // function context slot index if the function name is present (named
+  // function expressions, only), otherwise returns a value < 0. The name
+  // must be a symbol (canonicalized).
+  static int FunctionContextSlotIndex(Code* code, String* name);
+
+  // --------------------------------------------------------------------------
+  // Debugging support
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+ private:
+  Handle<String> function_name_;
+  bool calls_eval_;
+  List<Handle<String>, Allocator > parameters_;
+  List<Handle<String>, Allocator > stack_slots_;
+  List<Handle<String>, Allocator > context_slots_;
+  List<Variable::Mode, Allocator > context_modes_;
+};
+
+class ZoneScopeInfo: public ScopeInfo<ZoneListAllocationPolicy> {
+ public:
+  // Create a ZoneScopeInfo instance from a scope.
+  explicit ZoneScopeInfo(Scope* scope)
+      : ScopeInfo<ZoneListAllocationPolicy>(scope) {}
+
+  // Create a ZoneScopeInfo instance from a Code object.
+  explicit ZoneScopeInfo(Code* code)
+      :  ScopeInfo<ZoneListAllocationPolicy>(code) {}
+};
+
+
+// Cache for mapping (code, property name) into context slot index.
+// The cache contains both positive and negative results.
+// Slot index equals -1 means the property is absent.
+// Cleared at startup and prior to mark sweep collection.
+class ContextSlotCache {
+ public:
+  // Lookup context slot index for (code, name).
+  // If absent, kNotFound is returned.
+  static int Lookup(Code* code,
+                    String* name,
+                    Variable::Mode* mode);
+
+  // Update an element in the cache.
+  static void Update(Code* code,
+                     String* name,
+                     Variable::Mode mode,
+                     int slot_index);
+
+  // Clear the cache.
+  static void Clear();
+
+  static const int kNotFound = -2;
+ private:
+  inline static int Hash(Code* code, String* name);
+
+#ifdef DEBUG
+  static void ValidateEntry(Code* code,
+                            String* name,
+                            Variable::Mode mode,
+                            int slot_index);
+#endif
+
+  static const int kLength = 256;
+  struct Key {
+    Code* code;
+    String* name;
+  };
+
+  struct Value {
+    Value(Variable::Mode mode, int index) {
+      ASSERT(ModeField::is_valid(mode));
+      ASSERT(IndexField::is_valid(index));
+      value_ = ModeField::encode(mode) | IndexField::encode(index);
+      ASSERT(mode == this->mode());
+      ASSERT(index == this->index());
+    }
+
+    inline Value(uint32_t value) : value_(value) {}
+
+    uint32_t raw() { return value_; }
+
+    Variable::Mode mode() { return ModeField::decode(value_); }
+
+    int index() { return IndexField::decode(value_); }
+
+    // Bit fields in value_ (type, shift, size). Must be public so the
+    // constants can be embedded in generated code.
+    class ModeField:  public BitField<Variable::Mode, 0, 3> {};
+    class IndexField: public BitField<int,            3, 32-3> {};
+   private:
+    uint32_t value_;
+  };
+
+  static Key keys_[kLength];
+  static uint32_t values_[kLength];
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_SCOPEINFO_H_
diff --git a/src/scopes.cc b/src/scopes.cc
new file mode 100644
index 0000000..25873fa
--- /dev/null
+++ b/src/scopes.cc
@@ -0,0 +1,962 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "prettyprinter.h"
+#include "scopeinfo.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// A Zone allocator for use with LocalsMap.
+
+class ZoneAllocator: public Allocator {
+ public:
+  /* nothing to do */
+  virtual ~ZoneAllocator()  {}
+
+  virtual void* New(size_t size)  { return Zone::New(size); }
+
+  /* ignored - Zone is freed in one fell swoop */
+  virtual void Delete(void* p)  {}
+};
+
+
+static ZoneAllocator LocalsMapAllocator;
+
+
+// ----------------------------------------------------------------------------
+// Implementation of LocalsMap
+//
+// Note: We are storing the handle locations as key values in the hash map.
+//       When inserting a new variable via Declare(), we rely on the fact that
+//       the handle location remains alive for the duration of that variable
+//       use. Because a Variable holding a handle with the same location exists
+//       this is ensured.
+
+static bool Match(void* key1, void* key2) {
+  String* name1 = *reinterpret_cast<String**>(key1);
+  String* name2 = *reinterpret_cast<String**>(key2);
+  ASSERT(name1->IsSymbol());
+  ASSERT(name2->IsSymbol());
+  return name1 == name2;
+}
+
+
+// Dummy constructor
+VariableMap::VariableMap(bool gotta_love_static_overloading) : HashMap() {}
+
+VariableMap::VariableMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
+VariableMap::~VariableMap() {}
+
+
+Variable* VariableMap::Declare(Scope* scope,
+                               Handle<String> name,
+                               Variable::Mode mode,
+                               bool is_valid_lhs,
+                               Variable::Kind kind) {
+  HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
+  if (p->value == NULL) {
+    // The variable has not been declared yet -> insert it.
+    ASSERT(p->key == name.location());
+    p->value = new Variable(scope, name, mode, is_valid_lhs, kind);
+  }
+  return reinterpret_cast<Variable*>(p->value);
+}
+
+
+Variable* VariableMap::Lookup(Handle<String> name) {
+  HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), false);
+  if (p != NULL) {
+    ASSERT(*reinterpret_cast<String**>(p->key) == *name);
+    ASSERT(p->value != NULL);
+    return reinterpret_cast<Variable*>(p->value);
+  }
+  return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// Implementation of Scope
+
+
+// Dummy constructor
+Scope::Scope(Type type)
+  : outer_scope_(NULL),
+    inner_scopes_(0),
+    type_(type),
+    scope_name_(Factory::empty_symbol()),
+    variables_(false),
+    temps_(0),
+    params_(0),
+    dynamics_(NULL),
+    unresolved_(0),
+    decls_(0),
+    receiver_(NULL),
+    function_(NULL),
+    arguments_(NULL),
+    arguments_shadow_(NULL),
+    illegal_redecl_(NULL),
+    scope_inside_with_(false),
+    scope_contains_with_(false),
+    scope_calls_eval_(false),
+    outer_scope_calls_eval_(false),
+    inner_scope_calls_eval_(false),
+    outer_scope_is_eval_scope_(false),
+    force_eager_compilation_(false),
+    num_stack_slots_(0),
+    num_heap_slots_(0) {
+}
+
+
+Scope::Scope(Scope* outer_scope, Type type)
+  : outer_scope_(outer_scope),
+    inner_scopes_(4),
+    type_(type),
+    scope_name_(Factory::empty_symbol()),
+    temps_(4),
+    params_(4),
+    dynamics_(NULL),
+    unresolved_(16),
+    decls_(4),
+    receiver_(NULL),
+    function_(NULL),
+    arguments_(NULL),
+    arguments_shadow_(NULL),
+    illegal_redecl_(NULL),
+    scope_inside_with_(false),
+    scope_contains_with_(false),
+    scope_calls_eval_(false),
+    outer_scope_calls_eval_(false),
+    inner_scope_calls_eval_(false),
+    outer_scope_is_eval_scope_(false),
+    force_eager_compilation_(false),
+    num_stack_slots_(0),
+    num_heap_slots_(0) {
+  // At some point we might want to provide outer scopes to
+  // eval scopes (by walking the stack and reading the scope info).
+  // In that case, the ASSERT below needs to be adjusted.
+  ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
+  ASSERT(!HasIllegalRedeclaration());
+}
+
+
+void Scope::Initialize(bool inside_with) {
+  // Add this scope as a new inner scope of the outer scope.
+  if (outer_scope_ != NULL) {
+    outer_scope_->inner_scopes_.Add(this);
+    scope_inside_with_ = outer_scope_->scope_inside_with_ || inside_with;
+  } else {
+    scope_inside_with_ = inside_with;
+  }
+
+  // Declare convenience variables.
+  // Declare and allocate receiver (even for the global scope, and even
+  // if naccesses_ == 0).
+  // NOTE: When loading parameters in the global scope, we must take
+  // care not to access them as properties of the global object, but
+  // instead load them directly from the stack. Currently, the only
+  // such parameter is 'this' which is passed on the stack when
+  // invoking scripts
+  Variable* var =
+      variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
+                         false, Variable::THIS);
+  var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
+  receiver_ = new VariableProxy(Factory::this_symbol(), true, false);
+  receiver_->BindTo(var);
+
+  if (is_function_scope()) {
+    // Declare 'arguments' variable which exists in all functions.
+    // Note that it might never be accessed, in which case it won't be
+    // allocated during variable allocation.
+    variables_.Declare(this, Factory::arguments_symbol(), Variable::VAR,
+                       true, Variable::ARGUMENTS);
+  }
+}
+
+
+
+Variable* Scope::LocalLookup(Handle<String> name) {
+  return variables_.Lookup(name);
+}
+
+
+Variable* Scope::Lookup(Handle<String> name) {
+  for (Scope* scope = this;
+       scope != NULL;
+       scope = scope->outer_scope()) {
+    Variable* var = scope->LocalLookup(name);
+    if (var != NULL) return var;
+  }
+  return NULL;
+}
+
+
+Variable* Scope::DeclareFunctionVar(Handle<String> name) {
+  ASSERT(is_function_scope() && function_ == NULL);
+  function_ = new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
+  return function_;
+}
+
+
+Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
+  // DYNAMIC variables are introduces during variable allocation,
+  // INTERNAL variables are allocated explicitly, and TEMPORARY
+  // variables are allocated via NewTemporary().
+  ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+  return variables_.Declare(this, name, mode, true, Variable::NORMAL);
+}
+
+
+Variable* Scope::DeclareGlobal(Handle<String> name) {
+  ASSERT(is_global_scope());
+  return variables_.Declare(this, name, Variable::DYNAMIC, true,
+                            Variable::NORMAL);
+}
+
+
+void Scope::AddParameter(Variable* var) {
+  ASSERT(is_function_scope());
+  ASSERT(LocalLookup(var->name()) == var);
+  params_.Add(var);
+}
+
+
+VariableProxy* Scope::NewUnresolved(Handle<String> name, bool inside_with) {
+  // Note that we must not share the unresolved variables with
+  // the same name because they may be removed selectively via
+  // RemoveUnresolved().
+  VariableProxy* proxy = new VariableProxy(name, false, inside_with);
+  unresolved_.Add(proxy);
+  return proxy;
+}
+
+
+void Scope::RemoveUnresolved(VariableProxy* var) {
+  // Most likely (always?) any variable we want to remove
+  // was just added before, so we search backwards.
+  for (int i = unresolved_.length(); i-- > 0;) {
+    if (unresolved_[i] == var) {
+      unresolved_.Remove(i);
+      return;
+    }
+  }
+}
+
+
+VariableProxy* Scope::NewTemporary(Handle<String> name) {
+  Variable* var = new Variable(this, name, Variable::TEMPORARY, true,
+                               Variable::NORMAL);
+  VariableProxy* tmp = new VariableProxy(name, false, false);
+  tmp->BindTo(var);
+  temps_.Add(var);
+  return tmp;
+}
+
+
+void Scope::AddDeclaration(Declaration* declaration) {
+  decls_.Add(declaration);
+}
+
+
+void Scope::SetIllegalRedeclaration(Expression* expression) {
+  // Only set the illegal redeclaration expression the
+  // first time the function is called.
+  if (!HasIllegalRedeclaration()) {
+    illegal_redecl_ = expression;
+  }
+  ASSERT(HasIllegalRedeclaration());
+}
+
+
+void Scope::VisitIllegalRedeclaration(AstVisitor* visitor) {
+  ASSERT(HasIllegalRedeclaration());
+  illegal_redecl_->Accept(visitor);
+}
+
+
+template<class Allocator>
+void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
+  // Collect variables in this scope.
+  // Note that the function_ variable - if present - is not
+  // collected here but handled separately in ScopeInfo
+  // which is the current user of this function).
+  for (int i = 0; i < temps_.length(); i++) {
+    Variable* var = temps_[i];
+    if (var->var_uses()->is_used()) {
+      locals->Add(var);
+    }
+  }
+  for (VariableMap::Entry* p = variables_.Start();
+       p != NULL;
+       p = variables_.Next(p)) {
+    Variable* var = reinterpret_cast<Variable*>(p->value);
+    if (var->var_uses()->is_used()) {
+      locals->Add(var);
+    }
+  }
+}
+
+
+// Make sure the method gets instantiated by the template system.
+template void Scope::CollectUsedVariables(
+    List<Variable*, FreeStoreAllocationPolicy>* locals);
+template void Scope::CollectUsedVariables(
+    List<Variable*, PreallocatedStorage>* locals);
+template void Scope::CollectUsedVariables(
+    List<Variable*, ZoneListAllocationPolicy>* locals);
+
+
+void Scope::AllocateVariables(Handle<Context> context) {
+  ASSERT(outer_scope_ == NULL);  // eval or global scopes only
+
+  // 1) Propagate scope information.
+  // If we are in an eval scope, we may have other outer scopes about
+  // which we don't know anything at this point. Thus we must be conservative
+  // and assume they may invoke eval themselves. Eventually we could capture
+  // this information in the ScopeInfo and then use it here (by traversing
+  // the call chain stack, at compile time).
+  bool eval_scope = is_eval_scope();
+  PropagateScopeInfo(eval_scope, eval_scope);
+
+  // 2) Resolve variables.
+  Scope* global_scope = NULL;
+  if (is_global_scope()) global_scope = this;
+  ResolveVariablesRecursively(global_scope, context);
+
+  // 3) Allocate variables.
+  AllocateVariablesRecursively();
+}
+
+
+bool Scope::AllowsLazyCompilation() const {
+  return !force_eager_compilation_ && HasTrivialOuterContext();
+}
+
+
+bool Scope::HasTrivialContext() const {
+  // A function scope has a trivial context if it always is the global
+  // context. We iteratively scan out the context chain to see if
+  // there is anything that makes this scope non-trivial; otherwise we
+  // return true.
+  for (const Scope* scope = this; scope != NULL; scope = scope->outer_scope_) {
+    if (scope->is_eval_scope()) return false;
+    if (scope->scope_inside_with_) return false;
+    if (scope->num_heap_slots_ > 0) return false;
+  }
+  return true;
+}
+
+
+bool Scope::HasTrivialOuterContext() const {
+  Scope* outer = outer_scope_;
+  if (outer == NULL) return true;
+  // Note that the outer context may be trivial in general, but the current
+  // scope may be inside a 'with' statement in which case the outer context
+  // for this scope is not trivial.
+  return !scope_inside_with_ && outer->HasTrivialContext();
+}
+
+
+int Scope::ContextChainLength(Scope* scope) {
+  int n = 0;
+  for (Scope* s = this; s != scope; s = s->outer_scope_) {
+    ASSERT(s != NULL);  // scope must be in the scope chain
+    if (s->num_heap_slots() > 0) n++;
+  }
+  return n;
+}
+
+
+#ifdef DEBUG
+static const char* Header(Scope::Type type) {
+  switch (type) {
+    case Scope::EVAL_SCOPE: return "eval";
+    case Scope::FUNCTION_SCOPE: return "function";
+    case Scope::GLOBAL_SCOPE: return "global";
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+static void Indent(int n, const char* str) {
+  PrintF("%*s%s", n, "", str);
+}
+
+
+static void PrintName(Handle<String> name) {
+  SmartPointer<char> s = name->ToCString(DISALLOW_NULLS);
+  PrintF("%s", *s);
+}
+
+
+static void PrintVar(PrettyPrinter* printer, int indent, Variable* var) {
+  if (var->var_uses()->is_used() || var->rewrite() != NULL) {
+    Indent(indent, Variable::Mode2String(var->mode()));
+    PrintF(" ");
+    PrintName(var->name());
+    PrintF(";  // ");
+    if (var->rewrite() != NULL) PrintF("%s, ", printer->Print(var->rewrite()));
+    if (var->is_accessed_from_inner_scope()) PrintF("inner scope access, ");
+    PrintF("var ");
+    var->var_uses()->Print();
+    PrintF(", obj ");
+    var->obj_uses()->Print();
+    PrintF("\n");
+  }
+}
+
+
+static void PrintMap(PrettyPrinter* printer, int indent, VariableMap* map) {
+  for (VariableMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
+    Variable* var = reinterpret_cast<Variable*>(p->value);
+    PrintVar(printer, indent, var);
+  }
+}
+
+
+void Scope::Print(int n) {
+  int n0 = (n > 0 ? n : 0);
+  int n1 = n0 + 2;  // indentation
+
+  // Print header.
+  Indent(n0, Header(type_));
+  if (scope_name_->length() > 0) {
+    PrintF(" ");
+    PrintName(scope_name_);
+  }
+
+  // Print parameters, if any.
+  if (is_function_scope()) {
+    PrintF(" (");
+    for (int i = 0; i < params_.length(); i++) {
+      if (i > 0) PrintF(", ");
+      PrintName(params_[i]->name());
+    }
+    PrintF(")");
+  }
+
+  PrintF(" {\n");
+
+  // Function name, if any (named function literals, only).
+  if (function_ != NULL) {
+    Indent(n1, "// (local) function name: ");
+    PrintName(function_->name());
+    PrintF("\n");
+  }
+
+  // Scope info.
+  if (HasTrivialOuterContext()) {
+    Indent(n1, "// scope has trivial outer context\n");
+  }
+  if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
+  if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
+  if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
+  if (outer_scope_calls_eval_) Indent(n1, "// outer scope calls 'eval'\n");
+  if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
+  if (outer_scope_is_eval_scope_) {
+    Indent(n1, "// outer scope is 'eval' scope\n");
+  }
+  if (num_stack_slots_ > 0) { Indent(n1, "// ");
+  PrintF("%d stack slots\n", num_stack_slots_); }
+  if (num_heap_slots_ > 0) { Indent(n1, "// ");
+  PrintF("%d heap slots\n", num_heap_slots_); }
+
+  // Print locals.
+  PrettyPrinter printer;
+  Indent(n1, "// function var\n");
+  if (function_ != NULL) {
+    PrintVar(&printer, n1, function_);
+  }
+
+  Indent(n1, "// temporary vars\n");
+  for (int i = 0; i < temps_.length(); i++) {
+    PrintVar(&printer, n1, temps_[i]);
+  }
+
+  Indent(n1, "// local vars\n");
+  PrintMap(&printer, n1, &variables_);
+
+  Indent(n1, "// dynamic vars\n");
+  if (dynamics_ != NULL) {
+    PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC));
+    PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC_LOCAL));
+    PrintMap(&printer, n1, dynamics_->GetMap(Variable::DYNAMIC_GLOBAL));
+  }
+
+  // Print inner scopes (disable by providing negative n).
+  if (n >= 0) {
+    for (int i = 0; i < inner_scopes_.length(); i++) {
+      PrintF("\n");
+      inner_scopes_[i]->Print(n1);
+    }
+  }
+
+  Indent(n0, "}\n");
+}
+#endif  // DEBUG
+
+
+Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
+  if (dynamics_ == NULL) dynamics_ = new DynamicScopePart();
+  VariableMap* map = dynamics_->GetMap(mode);
+  Variable* var = map->Lookup(name);
+  if (var == NULL) {
+    // Declare a new non-local.
+    var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
+    // Allocate it by giving it a dynamic lookup.
+    var->rewrite_ = new Slot(var, Slot::LOOKUP, -1);
+  }
+  return var;
+}
+
+
+// Lookup a variable starting with this scope. The result is either
+// the statically resolved (local!) variable belonging to an outer scope,
+// or NULL. It may be NULL because a) we couldn't find a variable, or b)
+// because the variable is just a guess (and may be shadowed by another
+// variable that is introduced dynamically via an 'eval' call or a 'with'
+// statement).
+Variable* Scope::LookupRecursive(Handle<String> name,
+                                 bool inner_lookup,
+                                 Variable** invalidated_local) {
+  // If we find a variable, but the current scope calls 'eval', the found
+  // variable may not be the correct one (the 'eval' may introduce a
+  // property with the same name). In that case, remember that the variable
+  // found is just a guess.
+  bool guess = scope_calls_eval_;
+
+  // Try to find the variable in this scope.
+  Variable* var = LocalLookup(name);
+
+  if (var != NULL) {
+    // We found a variable. If this is not an inner lookup, we are done.
+    // (Even if there is an 'eval' in this scope which introduces the
+    // same variable again, the resulting variable remains the same.
+    // Note that enclosing 'with' statements are handled at the call site.)
+    if (!inner_lookup)
+      return var;
+
+  } else {
+    // We did not find a variable locally. Check against the function variable,
+    // if any. We can do this for all scopes, since the function variable is
+    // only present - if at all - for function scopes.
+    //
+    // This lookup corresponds to a lookup in the "intermediate" scope sitting
+    // between this scope and the outer scope. (ECMA-262, 3rd., requires that
+    // the name of named function literal is kept in an intermediate scope
+    // in between this scope and the next outer scope.)
+    if (function_ != NULL && function_->name().is_identical_to(name)) {
+      var = function_;
+
+    } else if (outer_scope_ != NULL) {
+      var = outer_scope_->LookupRecursive(name, true, invalidated_local);
+      // We may have found a variable in an outer scope. However, if
+      // the current scope is inside a 'with', the actual variable may
+      // be a property introduced via the 'with' statement. Then, the
+      // variable we may have found is just a guess.
+      if (scope_inside_with_)
+        guess = true;
+    }
+
+    // If we did not find a variable, we are done.
+    if (var == NULL)
+      return NULL;
+  }
+
+  ASSERT(var != NULL);
+
+  // If this is a lookup from an inner scope, mark the variable.
+  if (inner_lookup)
+    var->is_accessed_from_inner_scope_ = true;
+
+  // If the variable we have found is just a guess, invalidate the result.
+  if (guess) {
+    *invalidated_local = var;
+    var = NULL;
+  }
+
+  return var;
+}
+
+
+void Scope::ResolveVariable(Scope* global_scope,
+                            Handle<Context> context,
+                            VariableProxy* proxy) {
+  ASSERT(global_scope == NULL || global_scope->is_global_scope());
+
+  // If the proxy is already resolved there's nothing to do
+  // (functions and consts may be resolved by the parser).
+  if (proxy->var() != NULL) return;
+
+  // Otherwise, try to resolve the variable.
+  Variable* invalidated_local = NULL;
+  Variable* var = LookupRecursive(proxy->name(), false, &invalidated_local);
+
+  if (proxy->inside_with()) {
+    // If we are inside a local 'with' statement, all bets are off
+    // and we cannot resolve the proxy to a local variable even if
+    // we found an outer matching variable.
+    // Note that we must do a lookup anyway, because if we find one,
+    // we must mark that variable as potentially accessed from this
+    // inner scope (the property may not be in the 'with' object).
+    var = NonLocal(proxy->name(), Variable::DYNAMIC);
+
+  } else {
+    // We are not inside a local 'with' statement.
+
+    if (var == NULL) {
+      // We did not find the variable. We have a global variable
+      // if we are in the global scope (we know already that we
+      // are outside a 'with' statement) or if there is no way
+      // that the variable might be introduced dynamically (through
+      // a local or outer eval() call, or an outer 'with' statement),
+      // or we don't know about the outer scope (because we are
+      // in an eval scope).
+      if (is_global_scope() ||
+          !(scope_inside_with_ || outer_scope_is_eval_scope_ ||
+            scope_calls_eval_ || outer_scope_calls_eval_)) {
+        // We must have a global variable.
+        ASSERT(global_scope != NULL);
+        var = global_scope->DeclareGlobal(proxy->name());
+
+      } else if (scope_inside_with_) {
+        // If we are inside a with statement we give up and look up
+        // the variable at runtime.
+        var = NonLocal(proxy->name(), Variable::DYNAMIC);
+
+      } else if (invalidated_local != NULL) {
+        // No with statements are involved and we found a local
+        // variable that might be shadowed by eval introduced
+        // variables.
+        var = NonLocal(proxy->name(), Variable::DYNAMIC_LOCAL);
+        var->set_local_if_not_shadowed(invalidated_local);
+
+      } else if (outer_scope_is_eval_scope_) {
+        // No with statements and we did not find a local and the code
+        // is executed with a call to eval.  The context contains
+        // scope information that we can use to determine if the
+        // variable is global if it is not shadowed by eval-introduced
+        // variables.
+        if (context->GlobalIfNotShadowedByEval(proxy->name())) {
+          var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
+
+        } else {
+          var = NonLocal(proxy->name(), Variable::DYNAMIC);
+        }
+
+      } else {
+        // No with statements and we did not find a local and the code
+        // is not executed with a call to eval.  We know that this
+        // variable is global unless it is shadowed by eval-introduced
+        // variables.
+        var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
+      }
+    }
+  }
+
+  proxy->BindTo(var);
+}
+
+
+void Scope::ResolveVariablesRecursively(Scope* global_scope,
+                                        Handle<Context> context) {
+  ASSERT(global_scope == NULL || global_scope->is_global_scope());
+
+  // Resolve unresolved variables for this scope.
+  for (int i = 0; i < unresolved_.length(); i++) {
+    ResolveVariable(global_scope, context, unresolved_[i]);
+  }
+
+  // Resolve unresolved variables for inner scopes.
+  for (int i = 0; i < inner_scopes_.length(); i++) {
+    inner_scopes_[i]->ResolveVariablesRecursively(global_scope, context);
+  }
+}
+
+
+bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval,
+                               bool outer_scope_is_eval_scope) {
+  if (outer_scope_calls_eval) {
+    outer_scope_calls_eval_ = true;
+  }
+
+  if (outer_scope_is_eval_scope) {
+    outer_scope_is_eval_scope_ = true;
+  }
+
+  bool calls_eval = scope_calls_eval_ || outer_scope_calls_eval_;
+  bool is_eval = is_eval_scope() || outer_scope_is_eval_scope_;
+  for (int i = 0; i < inner_scopes_.length(); i++) {
+    Scope* inner_scope = inner_scopes_[i];
+    if (inner_scope->PropagateScopeInfo(calls_eval, is_eval)) {
+      inner_scope_calls_eval_ = true;
+    }
+    if (inner_scope->force_eager_compilation_) {
+      force_eager_compilation_ = true;
+    }
+  }
+
+  return scope_calls_eval_ || inner_scope_calls_eval_;
+}
+
+
+bool Scope::MustAllocate(Variable* var) {
+  // Give var a read/write use if there is a chance it might be accessed
+  // via an eval() call.  This is only possible if the variable has a
+  // visible name.
+  if ((var->is_this() || var->name()->length() > 0) &&
+      (var->is_accessed_from_inner_scope_ ||
+       scope_calls_eval_ || inner_scope_calls_eval_ ||
+       scope_contains_with_)) {
+    var->var_uses()->RecordAccess(1);
+  }
+  // Global variables do not need to be allocated.
+  return !var->is_global() && var->var_uses()->is_used();
+}
+
+
+bool Scope::MustAllocateInContext(Variable* var) {
+  // If var is accessed from an inner scope, or if there is a
+  // possibility that it might be accessed from the current or an inner
+  // scope (through an eval() call), it must be allocated in the
+  // context.  Exception: temporary variables are not allocated in the
+  // context.
+  return
+    var->mode() != Variable::TEMPORARY &&
+    (var->is_accessed_from_inner_scope_ ||
+     scope_calls_eval_ || inner_scope_calls_eval_ ||
+     scope_contains_with_ || var->is_global());
+}
+
+
+bool Scope::HasArgumentsParameter() {
+  for (int i = 0; i < params_.length(); i++) {
+    if (params_[i]->name().is_identical_to(Factory::arguments_symbol()))
+      return true;
+  }
+  return false;
+}
+
+
+void Scope::AllocateStackSlot(Variable* var) {
+  var->rewrite_ = new Slot(var, Slot::LOCAL, num_stack_slots_++);
+}
+
+
+void Scope::AllocateHeapSlot(Variable* var) {
+  var->rewrite_ = new Slot(var, Slot::CONTEXT, num_heap_slots_++);
+}
+
+
+void Scope::AllocateParameterLocals() {
+  ASSERT(is_function_scope());
+  Variable* arguments = LocalLookup(Factory::arguments_symbol());
+  ASSERT(arguments != NULL);  // functions have 'arguments' declared implicitly
+  if (MustAllocate(arguments) && !HasArgumentsParameter()) {
+    // 'arguments' is used. Unless there is also a parameter called
+    // 'arguments', we must be conservative and access all parameters via
+    // the arguments object: The i'th parameter is rewritten into
+    // '.arguments[i]' (*). If we have a parameter named 'arguments', a
+    // (new) value is always assigned to it via the function
+    // invocation. Then 'arguments' denotes that specific parameter value
+    // and cannot be used to access the parameters, which is why we don't
+    // need to rewrite in that case.
+    //
+    // (*) Instead of having a parameter called 'arguments', we may have an
+    // assignment to 'arguments' in the function body, at some arbitrary
+    // point in time (possibly through an 'eval()' call!). After that
+    // assignment any re-write of parameters would be invalid (was bug
+    // 881452). Thus, we introduce a shadow '.arguments'
+    // variable which also points to the arguments object. For rewrites we
+    // use '.arguments' which remains valid even if we assign to
+    // 'arguments'. To summarize: If we need to rewrite, we allocate an
+    // 'arguments' object dynamically upon function invocation. The compiler
+    // introduces 2 local variables 'arguments' and '.arguments', both of
+    // which originally point to the arguments object that was
+    // allocated. All parameters are rewritten into property accesses via
+    // the '.arguments' variable. Thus, any changes to properties of
+    // 'arguments' are reflected in the variables and vice versa. If the
+    // 'arguments' variable is changed, '.arguments' still points to the
+    // correct arguments object and the rewrites still work.
+
+    // We are using 'arguments'. Tell the code generator that is needs to
+    // allocate the arguments object by setting 'arguments_'.
+    arguments_ = new VariableProxy(Factory::arguments_symbol(), false, false);
+    arguments_->BindTo(arguments);
+
+    // We also need the '.arguments' shadow variable. Declare it and create
+    // and bind the corresponding proxy. It's ok to declare it only now
+    // because it's a local variable that is allocated after the parameters
+    // have been allocated.
+    //
+    // Note: This is "almost" at temporary variable but we cannot use
+    // NewTemporary() because the mode needs to be INTERNAL since this
+    // variable may be allocated in the heap-allocated context (temporaries
+    // are never allocated in the context).
+    Variable* arguments_shadow =
+        new Variable(this, Factory::arguments_shadow_symbol(),
+                     Variable::INTERNAL, true, Variable::ARGUMENTS);
+    arguments_shadow_ =
+        new VariableProxy(Factory::arguments_shadow_symbol(), false, false);
+    arguments_shadow_->BindTo(arguments_shadow);
+    temps_.Add(arguments_shadow);
+
+    // Allocate the parameters by rewriting them into '.arguments[i]' accesses.
+    for (int i = 0; i < params_.length(); i++) {
+      Variable* var = params_[i];
+      ASSERT(var->scope() == this);
+      if (MustAllocate(var)) {
+        if (MustAllocateInContext(var)) {
+          // It is ok to set this only now, because arguments is a local
+          // variable that is allocated after the parameters have been
+          // allocated.
+          arguments_shadow->is_accessed_from_inner_scope_ = true;
+        }
+        var->rewrite_ =
+          new Property(arguments_shadow_,
+                       new Literal(Handle<Object>(Smi::FromInt(i))),
+                       RelocInfo::kNoPosition,
+                       Property::SYNTHETIC);
+        arguments_shadow->var_uses()->RecordUses(var->var_uses());
+      }
+    }
+
+  } else {
+    // The arguments object is not used, so we can access parameters directly.
+    // The same parameter may occur multiple times in the parameters_ list.
+    // If it does, and if it is not copied into the context object, it must
+    // receive the highest parameter index for that parameter; thus iteration
+    // order is relevant!
+    for (int i = 0; i < params_.length(); i++) {
+      Variable* var = params_[i];
+      ASSERT(var->scope() == this);
+      if (MustAllocate(var)) {
+        if (MustAllocateInContext(var)) {
+          ASSERT(var->rewrite_ == NULL ||
+                 (var->slot() != NULL && var->slot()->type() == Slot::CONTEXT));
+          if (var->rewrite_ == NULL) {
+            // Only set the heap allocation if the parameter has not
+            // been allocated yet.
+            AllocateHeapSlot(var);
+          }
+        } else {
+          ASSERT(var->rewrite_ == NULL ||
+                 (var->slot() != NULL &&
+                  var->slot()->type() == Slot::PARAMETER));
+          // Set the parameter index always, even if the parameter
+          // was seen before! (We need to access the actual parameter
+          // supplied for the last occurrence of a multiply declared
+          // parameter.)
+          var->rewrite_ = new Slot(var, Slot::PARAMETER, i);
+        }
+      }
+    }
+  }
+}
+
+
+void Scope::AllocateNonParameterLocal(Variable* var) {
+  ASSERT(var->scope() == this);
+  ASSERT(var->rewrite_ == NULL ||
+         (!var->IsVariable(Factory::result_symbol())) ||
+         (var->slot() == NULL || var->slot()->type() != Slot::LOCAL));
+  if (var->rewrite_ == NULL && MustAllocate(var)) {
+    if (MustAllocateInContext(var)) {
+      AllocateHeapSlot(var);
+    } else {
+      AllocateStackSlot(var);
+    }
+  }
+}
+
+
+void Scope::AllocateNonParameterLocals() {
+  // All variables that have no rewrite yet are non-parameter locals.
+  for (int i = 0; i < temps_.length(); i++) {
+    AllocateNonParameterLocal(temps_[i]);
+  }
+
+  for (VariableMap::Entry* p = variables_.Start();
+       p != NULL;
+       p = variables_.Next(p)) {
+    Variable* var = reinterpret_cast<Variable*>(p->value);
+    AllocateNonParameterLocal(var);
+  }
+
+  // For now, function_ must be allocated at the very end.  If it gets
+  // allocated in the context, it must be the last slot in the context,
+  // because of the current ScopeInfo implementation (see
+  // ScopeInfo::ScopeInfo(FunctionScope* scope) constructor).
+  if (function_ != NULL) {
+    AllocateNonParameterLocal(function_);
+  }
+}
+
+
+void Scope::AllocateVariablesRecursively() {
+  // The number of slots required for variables.
+  num_stack_slots_ = 0;
+  num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
+
+  // Allocate variables for inner scopes.
+  for (int i = 0; i < inner_scopes_.length(); i++) {
+    inner_scopes_[i]->AllocateVariablesRecursively();
+  }
+
+  // Allocate variables for this scope.
+  // Parameters must be allocated first, if any.
+  if (is_function_scope()) AllocateParameterLocals();
+  AllocateNonParameterLocals();
+
+  // Allocate context if necessary.
+  bool must_have_local_context = false;
+  if (scope_calls_eval_ || scope_contains_with_) {
+    // The context for the eval() call or 'with' statement in this scope.
+    // Unless we are in the global or an eval scope, we need a local
+    // context even if we didn't statically allocate any locals in it,
+    // and the compiler will access the context variable. If we are
+    // not in an inner scope, the scope is provided from the outside.
+    must_have_local_context = is_function_scope();
+  }
+
+  // If we didn't allocate any locals in the local context, then we only
+  // need the minimal number of slots if we must have a local context.
+  if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS &&
+      !must_have_local_context) {
+    num_heap_slots_ = 0;
+  }
+
+  // Allocation done.
+  ASSERT(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
+}
+
+} }  // namespace v8::internal
diff --git a/src/scopes.h b/src/scopes.h
new file mode 100644
index 0000000..fc627df
--- /dev/null
+++ b/src/scopes.h
@@ -0,0 +1,396 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SCOPES_H_
+#define V8_SCOPES_H_
+
+#include "ast.h"
+#include "hashmap.h"
+
+namespace v8 {
+namespace internal {
+
+
+// A hash map to support fast variable declaration and lookup.
+class VariableMap: public HashMap {
+ public:
+  VariableMap();
+
+  // Dummy constructor.  This constructor doesn't set up the map
+  // properly so don't use it unless you have a good reason.
+  explicit VariableMap(bool gotta_love_static_overloading);
+
+  virtual ~VariableMap();
+
+  Variable* Declare(Scope* scope,
+                    Handle<String> name,
+                    Variable::Mode mode,
+                    bool is_valid_lhs,
+                    Variable::Kind kind);
+
+  Variable* Lookup(Handle<String> name);
+};
+
+
+// The dynamic scope part holds hash maps for the variables that will
+// be looked up dynamically from within eval and with scopes. The objects
+// are allocated on-demand from Scope::NonLocal to avoid wasting memory
+// and setup time for scopes that don't need them.
+class DynamicScopePart : public ZoneObject {
+ public:
+  VariableMap* GetMap(Variable::Mode mode) {
+    int index = mode - Variable::DYNAMIC;
+    ASSERT(index >= 0 && index < 3);
+    return &maps_[index];
+  }
+
+ private:
+  VariableMap maps_[3];
+};
+
+
+// Global invariants after AST construction: Each reference (i.e. identifier)
+// to a JavaScript variable (including global properties) is represented by a
+// VariableProxy node. Immediately after AST construction and before variable
+// allocation, most VariableProxy nodes are "unresolved", i.e. not bound to a
+// corresponding variable (though some are bound during parse time). Variable
+// allocation binds each unresolved VariableProxy to one Variable and assigns
+// a location. Note that many VariableProxy nodes may refer to the same Java-
+// Script variable.
+
+class Scope: public ZoneObject {
+ public:
+  // ---------------------------------------------------------------------------
+  // Construction
+
+  enum Type {
+    EVAL_SCOPE,     // the top-level scope for an 'eval' source
+    FUNCTION_SCOPE,  // the top-level scope for a function
+    GLOBAL_SCOPE    // the top-level scope for a program or a top-level eval
+  };
+
+  Scope(Scope* outer_scope, Type type);
+
+  virtual ~Scope() { }
+
+  // The scope name is only used for printing/debugging.
+  void SetScopeName(Handle<String> scope_name)  { scope_name_ = scope_name; }
+
+  void Initialize(bool inside_with);
+
+
+  // ---------------------------------------------------------------------------
+  // Declarations
+
+  // Lookup a variable in this scope. Returns the variable or NULL if not found.
+  virtual Variable* LocalLookup(Handle<String> name);
+
+  // Lookup a variable in this scope or outer scopes.
+  // Returns the variable or NULL if not found.
+  virtual Variable* Lookup(Handle<String> name);
+
+  // Declare the function variable for a function literal. This variable
+  // is in an intermediate scope between this function scope and the the
+  // outer scope. Only possible for function scopes; at most one variable.
+  Variable* DeclareFunctionVar(Handle<String> name);
+
+  // Declare a local variable in this scope. If the variable has been
+  // declared before, the previously declared variable is returned.
+  virtual Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
+
+  // Declare an implicit global variable in this scope which must be a
+  // global scope.  The variable was introduced (possibly from an inner
+  // scope) by a reference to an unresolved variable with no intervening
+  // with statements or eval calls.
+  Variable* DeclareGlobal(Handle<String> name);
+
+  // Add a parameter to the parameter list. The parameter must have been
+  // declared via Declare. The same parameter may occur more than once in
+  // the parameter list; they must be added in source order, from left to
+  // right.
+  void AddParameter(Variable* var);
+
+  // Create a new unresolved variable.
+  virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with);
+
+  // Remove a unresolved variable. During parsing, an unresolved variable
+  // may have been added optimistically, but then only the variable name
+  // was used (typically for labels). If the variable was not declared, the
+  // addition introduced a new unresolved variable which may end up being
+  // allocated globally as a "ghost" variable. RemoveUnresolved removes
+  // such a variable again if it was added; otherwise this is a no-op.
+  void RemoveUnresolved(VariableProxy* var);
+
+  // Creates a new temporary variable in this scope and binds a proxy to it.
+  // The name is only used for printing and cannot be used to find the variable.
+  // In particular, the only way to get hold of the temporary is by keeping the
+  // VariableProxy* around.
+  virtual VariableProxy* NewTemporary(Handle<String> name);
+
+  // Adds the specific declaration node to the list of declarations in
+  // this scope. The declarations are processed as part of entering
+  // the scope; see codegen.cc:ProcessDeclarations.
+  void AddDeclaration(Declaration* declaration);
+
+  // ---------------------------------------------------------------------------
+  // Illegal redeclaration support.
+
+  // Set an expression node that will be executed when the scope is
+  // entered. We only keep track of one illegal redeclaration node per
+  // scope - the first one - so if you try to set it multiple times
+  // the additional requests will be silently ignored.
+  void SetIllegalRedeclaration(Expression* expression);
+
+  // Visit the illegal redeclaration expression. Do not call if the
+  // scope doesn't have an illegal redeclaration node.
+  void VisitIllegalRedeclaration(AstVisitor* visitor);
+
+  // Check if the scope has (at least) one illegal redeclaration.
+  bool HasIllegalRedeclaration() const { return illegal_redecl_ != NULL; }
+
+
+  // ---------------------------------------------------------------------------
+  // Scope-specific info.
+
+  // Inform the scope that the corresponding code contains a with statement.
+  void RecordWithStatement()  { scope_contains_with_ = true; }
+
+  // Inform the scope that the corresponding code contains an eval call.
+  void RecordEvalCall()  { scope_calls_eval_ = true; }
+
+
+  // ---------------------------------------------------------------------------
+  // Predicates.
+
+  // Specific scope types.
+  bool is_eval_scope() const  { return type_ == EVAL_SCOPE; }
+  bool is_function_scope() const  { return type_ == FUNCTION_SCOPE; }
+  bool is_global_scope() const  { return type_ == GLOBAL_SCOPE; }
+
+  // Information about which scopes calls eval.
+  bool calls_eval() const  { return scope_calls_eval_; }
+  bool outer_scope_calls_eval() const  { return outer_scope_calls_eval_; }
+
+  // Is this scope inside a with statement.
+  bool inside_with() const  { return scope_inside_with_; }
+  // Does this scope contain a with statement.
+  bool contains_with() const  { return scope_contains_with_; }
+
+  // The scope immediately surrounding this scope, or NULL.
+  Scope* outer_scope() const  { return outer_scope_; }
+
+  // ---------------------------------------------------------------------------
+  // Accessors.
+
+  // The variable corresponding to the (function) receiver.
+  VariableProxy* receiver() const  { return receiver_; }
+
+  // The variable holding the function literal for named function
+  // literals, or NULL.
+  // Only valid for function scopes.
+  Variable* function() const  {
+    ASSERT(is_function_scope());
+    return function_;
+  }
+
+  // Parameters. The left-most parameter has index 0.
+  // Only valid for function scopes.
+  Variable* parameter(int index) const  {
+    ASSERT(is_function_scope());
+    return params_[index];
+  }
+
+  int num_parameters() const  { return params_.length(); }
+
+  // The local variable 'arguments' if we need to allocate it; NULL otherwise.
+  // If arguments() exist, arguments_shadow() exists, too.
+  VariableProxy* arguments()  const  { return arguments_; }
+
+  // The '.arguments' shadow variable if we need to allocate it; NULL otherwise.
+  // If arguments_shadow() exist, arguments() exists, too.
+  VariableProxy* arguments_shadow()  const  { return arguments_shadow_; }
+
+  // Declarations list.
+  ZoneList<Declaration*>* declarations() { return &decls_; }
+
+
+
+  // ---------------------------------------------------------------------------
+  // Variable allocation.
+
+  // Collect all used locals in this scope.
+  template<class Allocator>
+  void CollectUsedVariables(List<Variable*, Allocator>* locals);
+
+  // Resolve and fill in the allocation information for all variables
+  // in this scopes. Must be called *after* all scopes have been
+  // processed (parsed) to ensure that unresolved variables can be
+  // resolved properly.
+  //
+  // In the case of code compiled and run using 'eval', the context
+  // parameter is the context in which eval was called.  In all other
+  // cases the context parameter is an empty handle.
+  void AllocateVariables(Handle<Context> context);
+
+  // Result of variable allocation.
+  int num_stack_slots() const  { return num_stack_slots_; }
+  int num_heap_slots() const  { return num_heap_slots_; }
+
+  // Make sure this scope and all outer scopes are eagerly compiled.
+  void ForceEagerCompilation()  { force_eager_compilation_ = true; }
+
+  // Determine if we can use lazy compilation for this scope.
+  bool AllowsLazyCompilation() const;
+
+  // True if the outer context of this scope is always the global context.
+  bool HasTrivialOuterContext() const;
+
+  // The number of contexts between this and scope; zero if this == scope.
+  int ContextChainLength(Scope* scope);
+
+
+  // ---------------------------------------------------------------------------
+  // Debugging.
+
+#ifdef DEBUG
+  void Print(int n = 0);  // n = indentation; n < 0 => don't print recursively
+#endif
+
+  // ---------------------------------------------------------------------------
+  // Implementation.
+ protected:
+  friend class ParserFactory;
+
+  explicit Scope(Type type);
+
+  // Scope tree.
+  Scope* outer_scope_;  // the immediately enclosing outer scope, or NULL
+  ZoneList<Scope*> inner_scopes_;  // the immediately enclosed inner scopes
+
+  // The scope type.
+  Type type_;
+
+  // Debugging support.
+  Handle<String> scope_name_;
+
+  // The variables declared in this scope:
+  //
+  // All user-declared variables (incl. parameters).  For global scopes
+  // variables may be implicitly 'declared' by being used (possibly in
+  // an inner scope) with no intervening with statements or eval calls.
+  VariableMap variables_;
+  // Compiler-allocated (user-invisible) temporaries.
+  ZoneList<Variable*> temps_;
+  // Parameter list in source order.
+  ZoneList<Variable*> params_;
+  // Variables that must be looked up dynamically.
+  DynamicScopePart* dynamics_;
+  // Unresolved variables referred to from this scope.
+  ZoneList<VariableProxy*> unresolved_;
+  // Declarations.
+  ZoneList<Declaration*> decls_;
+  // Convenience variable.
+  VariableProxy* receiver_;
+  // Function variable, if any; function scopes only.
+  Variable* function_;
+  // Convenience variable; function scopes only.
+  VariableProxy* arguments_;
+  // Convenience variable; function scopes only.
+  VariableProxy* arguments_shadow_;
+
+  // Illegal redeclaration.
+  Expression* illegal_redecl_;
+
+  // Scope-specific information.
+  bool scope_inside_with_;  // this scope is inside a 'with' of some outer scope
+  bool scope_contains_with_;  // this scope contains a 'with' statement
+  bool scope_calls_eval_;  // this scope contains an 'eval' call
+
+  // Computed via PropagateScopeInfo.
+  bool outer_scope_calls_eval_;
+  bool inner_scope_calls_eval_;
+  bool outer_scope_is_eval_scope_;
+  bool force_eager_compilation_;
+
+  // Computed via AllocateVariables; function scopes only.
+  int num_stack_slots_;
+  int num_heap_slots_;
+
+  // Create a non-local variable with a given name.
+  // These variables are looked up dynamically at runtime.
+  Variable* NonLocal(Handle<String> name, Variable::Mode mode);
+
+  // Variable resolution.
+  Variable* LookupRecursive(Handle<String> name,
+                            bool inner_lookup,
+                            Variable** invalidated_local);
+  void ResolveVariable(Scope* global_scope,
+                       Handle<Context> context,
+                       VariableProxy* proxy);
+  void ResolveVariablesRecursively(Scope* global_scope,
+                                   Handle<Context> context);
+
+  // Scope analysis.
+  bool PropagateScopeInfo(bool outer_scope_calls_eval,
+                          bool outer_scope_is_eval_scope);
+  bool HasTrivialContext() const;
+
+  // Predicates.
+  bool MustAllocate(Variable* var);
+  bool MustAllocateInContext(Variable* var);
+  bool HasArgumentsParameter();
+
+  // Variable allocation.
+  void AllocateStackSlot(Variable* var);
+  void AllocateHeapSlot(Variable* var);
+  void AllocateParameterLocals();
+  void AllocateNonParameterLocal(Variable* var);
+  void AllocateNonParameterLocals();
+  void AllocateVariablesRecursively();
+};
+
+
+class DummyScope : public Scope {
+ public:
+  DummyScope() : Scope(GLOBAL_SCOPE) {
+    outer_scope_ = this;
+  }
+
+  virtual Variable* Lookup(Handle<String> name)  { return NULL; }
+  virtual Variable* Declare(Handle<String> name, Variable::Mode mode) {
+    return NULL;
+  }
+  virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with) {
+    return NULL;
+  }
+  virtual VariableProxy* NewTemporary(Handle<String> name)  { return NULL; }
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_SCOPES_H_
diff --git a/src/serialize.cc b/src/serialize.cc
new file mode 100644
index 0000000..e0ee4bd
--- /dev/null
+++ b/src/serialize.cc
@@ -0,0 +1,1716 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "accessors.h"
+#include "api.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "ic-inl.h"
+#include "natives.h"
+#include "platform.h"
+#include "runtime.h"
+#include "serialize.h"
+#include "stub-cache.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+// 32-bit encoding: a RelativeAddress must be able to fit in a
+// pointer: it is encoded as an Address with (from LS to MS bits):
+// - 2 bits identifying this as a HeapObject.
+// - 4 bits to encode the AllocationSpace (including special values for
+//   code and fixed arrays in LO space)
+// - 27 bits identifying a word in the space, in one of three formats:
+// - paged spaces: 16 bits of page number, 11 bits of word offset in page
+// - NEW space:    27 bits of word offset
+// - LO space:     27 bits of page number
+
+const int kSpaceShift = kHeapObjectTagSize;
+const int kSpaceBits = 4;
+const int kSpaceMask = (1 << kSpaceBits) - 1;
+
+const int kOffsetShift = kSpaceShift + kSpaceBits;
+const int kOffsetBits = 11;
+const int kOffsetMask = (1 << kOffsetBits) - 1;
+
+const int kPageShift = kOffsetShift + kOffsetBits;
+const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
+const int kPageMask = (1 << kPageBits) - 1;
+
+const int kPageAndOffsetShift = kOffsetShift;
+const int kPageAndOffsetBits = kPageBits + kOffsetBits;
+const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
+
+// These values are special allocation space tags used for
+// serialization.
+// Mark the pages executable on platforms that support it.
+const int kLargeCode = LAST_SPACE + 1;
+// Allocate extra remembered-set bits.
+const int kLargeFixedArray = LAST_SPACE + 2;
+
+
+static inline AllocationSpace GetSpace(Address addr) {
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
+  if (space_number > LAST_SPACE) space_number = LO_SPACE;
+  return static_cast<AllocationSpace>(space_number);
+}
+
+
+static inline bool IsLargeExecutableObject(Address addr) {
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  const int space_number =
+      (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
+  return (space_number == kLargeCode);
+}
+
+
+static inline bool IsLargeFixedArray(Address addr) {
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  const int space_number =
+      (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
+  return (space_number == kLargeFixedArray);
+}
+
+
+static inline int PageIndex(Address addr) {
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  return static_cast<int>(encoded >> kPageShift) & kPageMask;
+}
+
+
+static inline int PageOffset(Address addr) {
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  const int offset = static_cast<int>(encoded >> kOffsetShift) & kOffsetMask;
+  return offset << kObjectAlignmentBits;
+}
+
+
+static inline int NewSpaceOffset(Address addr) {
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  const int page_offset =
+      static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
+  return page_offset << kObjectAlignmentBits;
+}
+
+
+static inline int LargeObjectIndex(Address addr) {
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  return static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
+}
+
+
+// A RelativeAddress encodes a heap address that is independent of
+// the actual memory addresses in real heap. The general case (for the
+// OLD, CODE and MAP spaces) is as a (space id, page number, page offset)
+// triple. The NEW space has page number == 0, because there are no
+// pages. The LARGE_OBJECT space has page offset = 0, since there is
+// exactly one object per page.  RelativeAddresses are encodable as
+// Addresses, so that they can replace the map() pointers of
+// HeapObjects. The encoded Addresses are also encoded as HeapObjects
+// and allow for marking (is_marked() see mark(), clear_mark()...) as
+// used by the Mark-Compact collector.
+
+class RelativeAddress {
+ public:
+  RelativeAddress(AllocationSpace space,
+                  int page_index,
+                  int page_offset)
+  : space_(space), page_index_(page_index), page_offset_(page_offset)  {
+    // Assert that the space encoding (plus the two pseudo-spaces for
+    // special large objects) fits in the available bits.
+    ASSERT(((LAST_SPACE + 2) & ~kSpaceMask) == 0);
+    ASSERT(space <= LAST_SPACE && space >= 0);
+  }
+
+  // Return the encoding of 'this' as an Address. Decode with constructor.
+  Address Encode() const;
+
+  AllocationSpace space() const {
+    if (space_ > LAST_SPACE) return LO_SPACE;
+    return static_cast<AllocationSpace>(space_);
+  }
+  int page_index() const { return page_index_; }
+  int page_offset() const { return page_offset_; }
+
+  bool in_paged_space() const {
+    return space_ == CODE_SPACE ||
+           space_ == OLD_POINTER_SPACE ||
+           space_ == OLD_DATA_SPACE ||
+           space_ == MAP_SPACE ||
+           space_ == CELL_SPACE;
+  }
+
+  void next_address(int offset) { page_offset_ += offset; }
+  void next_page(int init_offset = 0) {
+    page_index_++;
+    page_offset_ = init_offset;
+  }
+
+#ifdef DEBUG
+  void Verify();
+#endif
+
+  void set_to_large_code_object() {
+    ASSERT(space_ == LO_SPACE);
+    space_ = kLargeCode;
+  }
+  void set_to_large_fixed_array() {
+    ASSERT(space_ == LO_SPACE);
+    space_ = kLargeFixedArray;
+  }
+
+
+ private:
+  int space_;
+  int page_index_;
+  int page_offset_;
+};
+
+
+Address RelativeAddress::Encode() const {
+  ASSERT(page_index_ >= 0);
+  int word_offset = 0;
+  int result = 0;
+  switch (space_) {
+    case MAP_SPACE:
+    case CELL_SPACE:
+    case OLD_POINTER_SPACE:
+    case OLD_DATA_SPACE:
+    case CODE_SPACE:
+      ASSERT_EQ(0, page_index_ & ~kPageMask);
+      word_offset = page_offset_ >> kObjectAlignmentBits;
+      ASSERT_EQ(0, word_offset & ~kOffsetMask);
+      result = (page_index_ << kPageShift) | (word_offset << kOffsetShift);
+      break;
+    case NEW_SPACE:
+      ASSERT_EQ(0, page_index_);
+      word_offset = page_offset_ >> kObjectAlignmentBits;
+      ASSERT_EQ(0, word_offset & ~kPageAndOffsetMask);
+      result = word_offset << kPageAndOffsetShift;
+      break;
+    case LO_SPACE:
+    case kLargeCode:
+    case kLargeFixedArray:
+      ASSERT_EQ(0, page_offset_);
+      ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
+      result = page_index_ << kPageAndOffsetShift;
+      break;
+  }
+  // OR in AllocationSpace and kHeapObjectTag
+  ASSERT_EQ(0, space_ & ~kSpaceMask);
+  result |= (space_ << kSpaceShift) | kHeapObjectTag;
+  return reinterpret_cast<Address>(result);
+}
+
+
+#ifdef DEBUG
+void RelativeAddress::Verify() {
+  ASSERT(page_offset_ >= 0 && page_index_ >= 0);
+  switch (space_) {
+    case MAP_SPACE:
+    case CELL_SPACE:
+    case OLD_POINTER_SPACE:
+    case OLD_DATA_SPACE:
+    case CODE_SPACE:
+      ASSERT(Page::kObjectStartOffset <= page_offset_ &&
+             page_offset_ <= Page::kPageSize);
+      break;
+    case NEW_SPACE:
+      ASSERT(page_index_ == 0);
+      break;
+    case LO_SPACE:
+    case kLargeCode:
+    case kLargeFixedArray:
+      ASSERT(page_offset_ == 0);
+      break;
+  }
+}
+#endif
+
+enum GCTreatment {
+  DataObject,     // Object that cannot contain a reference to new space.
+  PointerObject,  // Object that can contain a reference to new space.
+  CodeObject      // Object that contains executable code.
+};
+
+// A SimulatedHeapSpace simulates the allocation of objects in a page in
+// the heap. It uses linear allocation - that is, it doesn't simulate the
+// use of a free list. This simulated
+// allocation must exactly match that done by Heap.
+
+class SimulatedHeapSpace {
+ public:
+  // The default constructor initializes to an invalid state.
+  SimulatedHeapSpace(): current_(LAST_SPACE, -1, -1) {}
+
+  // Sets 'this' to the first address in 'space' that would be
+  // returned by allocation in an empty heap.
+  void InitEmptyHeap(AllocationSpace space);
+
+  // Sets 'this' to the next address in 'space' that would be returned
+  // by allocation in the current heap. Intended only for testing
+  // serialization and deserialization in the current address space.
+  void InitCurrentHeap(AllocationSpace space);
+
+  // Returns the RelativeAddress where the next
+  // object of 'size' bytes will be allocated, and updates 'this' to
+  // point to the next free address beyond that object.
+  RelativeAddress Allocate(int size, GCTreatment special_gc_treatment);
+
+ private:
+  RelativeAddress current_;
+};
+
+
+void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
+  switch (space) {
+    case MAP_SPACE:
+    case CELL_SPACE:
+    case OLD_POINTER_SPACE:
+    case OLD_DATA_SPACE:
+    case CODE_SPACE:
+      current_ = RelativeAddress(space, 0, Page::kObjectStartOffset);
+      break;
+    case NEW_SPACE:
+    case LO_SPACE:
+      current_ = RelativeAddress(space, 0, 0);
+      break;
+  }
+}
+
+
+void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
+  switch (space) {
+    case MAP_SPACE:
+    case CELL_SPACE:
+    case OLD_POINTER_SPACE:
+    case OLD_DATA_SPACE:
+    case CODE_SPACE: {
+      PagedSpace* ps;
+      if (space == MAP_SPACE) {
+        ps = Heap::map_space();
+      } else if (space == CELL_SPACE) {
+        ps = Heap::cell_space();
+      } else if (space == OLD_POINTER_SPACE) {
+        ps = Heap::old_pointer_space();
+      } else if (space == OLD_DATA_SPACE) {
+        ps = Heap::old_data_space();
+      } else {
+        ASSERT(space == CODE_SPACE);
+        ps = Heap::code_space();
+      }
+      Address top = ps->top();
+      Page* top_page = Page::FromAllocationTop(top);
+      int page_index = 0;
+      PageIterator it(ps, PageIterator::PAGES_IN_USE);
+      while (it.has_next()) {
+        if (it.next() == top_page) break;
+        page_index++;
+      }
+      current_ = RelativeAddress(space,
+                                 page_index,
+                                 top_page->Offset(top));
+      break;
+    }
+    case NEW_SPACE:
+      current_ = RelativeAddress(space,
+                                 0,
+                                 Heap::NewSpaceTop() - Heap::NewSpaceStart());
+      break;
+    case LO_SPACE:
+      int page_index = 0;
+      for (LargeObjectIterator it(Heap::lo_space()); it.has_next(); it.next()) {
+        page_index++;
+      }
+      current_ = RelativeAddress(space, page_index, 0);
+      break;
+  }
+}
+
+
+RelativeAddress SimulatedHeapSpace::Allocate(int size,
+                                             GCTreatment special_gc_treatment) {
+#ifdef DEBUG
+  current_.Verify();
+#endif
+  int alloc_size = OBJECT_SIZE_ALIGN(size);
+  if (current_.in_paged_space() &&
+      current_.page_offset() + alloc_size > Page::kPageSize) {
+    ASSERT(alloc_size <= Page::kMaxHeapObjectSize);
+    current_.next_page(Page::kObjectStartOffset);
+  }
+  RelativeAddress result = current_;
+  if (current_.space() == LO_SPACE) {
+    current_.next_page();
+    if (special_gc_treatment == CodeObject) {
+      result.set_to_large_code_object();
+    } else if (special_gc_treatment == PointerObject) {
+      result.set_to_large_fixed_array();
+    }
+  } else {
+    current_.next_address(alloc_size);
+  }
+#ifdef DEBUG
+  current_.Verify();
+  result.Verify();
+#endif
+  return result;
+}
+
+// -----------------------------------------------------------------------------
+// Coding of external references.
+
+// The encoding of an external reference. The type is in the high word.
+// The id is in the low word.
+static uint32_t EncodeExternal(TypeCode type, uint16_t id) {
+  return static_cast<uint32_t>(type) << 16 | id;
+}
+
+
+static int* GetInternalPointer(StatsCounter* counter) {
+  // All counters refer to dummy_counter, if deserializing happens without
+  // setting up counters.
+  static int dummy_counter = 0;
+  return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
+}
+
+
+// ExternalReferenceTable is a helper class that defines the relationship
+// between external references and their encodings. It is used to build
+// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
+class ExternalReferenceTable {
+ public:
+  static ExternalReferenceTable* instance() {
+    if (!instance_) instance_ = new ExternalReferenceTable();
+    return instance_;
+  }
+
+  int size() const { return refs_.length(); }
+
+  Address address(int i) { return refs_[i].address; }
+
+  uint32_t code(int i) { return refs_[i].code; }
+
+  const char* name(int i) { return refs_[i].name; }
+
+  int max_id(int code) { return max_id_[code]; }
+
+ private:
+  static ExternalReferenceTable* instance_;
+
+  ExternalReferenceTable() : refs_(64) { PopulateTable(); }
+  ~ExternalReferenceTable() { }
+
+  struct ExternalReferenceEntry {
+    Address address;
+    uint32_t code;
+    const char* name;
+  };
+
+  void PopulateTable();
+
+  // For a few types of references, we can get their address from their id.
+  void AddFromId(TypeCode type, uint16_t id, const char* name);
+
+  // For other types of references, the caller will figure out the address.
+  void Add(Address address, TypeCode type, uint16_t id, const char* name);
+
+  List<ExternalReferenceEntry> refs_;
+  int max_id_[kTypeCodeCount];
+};
+
+
+ExternalReferenceTable* ExternalReferenceTable::instance_ = NULL;
+
+
+void ExternalReferenceTable::AddFromId(TypeCode type,
+                                       uint16_t id,
+                                       const char* name) {
+  Address address;
+  switch (type) {
+    case C_BUILTIN: {
+      ExternalReference ref(static_cast<Builtins::CFunctionId>(id));
+      address = ref.address();
+      break;
+    }
+    case BUILTIN: {
+      ExternalReference ref(static_cast<Builtins::Name>(id));
+      address = ref.address();
+      break;
+    }
+    case RUNTIME_FUNCTION: {
+      ExternalReference ref(static_cast<Runtime::FunctionId>(id));
+      address = ref.address();
+      break;
+    }
+    case IC_UTILITY: {
+      ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)));
+      address = ref.address();
+      break;
+    }
+    default:
+      UNREACHABLE();
+      return;
+  }
+  Add(address, type, id, name);
+}
+
+
+void ExternalReferenceTable::Add(Address address,
+                                 TypeCode type,
+                                 uint16_t id,
+                                 const char* name) {
+  CHECK_NE(NULL, address);
+  ExternalReferenceEntry entry;
+  entry.address = address;
+  entry.code = EncodeExternal(type, id);
+  entry.name = name;
+  CHECK_NE(0, entry.code);
+  refs_.Add(entry);
+  if (id > max_id_[type]) max_id_[type] = id;
+}
+
+
+void ExternalReferenceTable::PopulateTable() {
+  for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
+    max_id_[type_code] = 0;
+  }
+
+  // The following populates all of the different type of external references
+  // into the ExternalReferenceTable.
+  //
+  // NOTE: This function was originally 100k of code.  It has since been
+  // rewritten to be mostly table driven, as the callback macro style tends to
+  // very easily cause code bloat.  Please be careful in the future when adding
+  // new references.
+
+  struct RefTableEntry {
+    TypeCode type;
+    uint16_t id;
+    const char* name;
+  };
+
+  static const RefTableEntry ref_table[] = {
+  // Builtins
+#define DEF_ENTRY_C(name) \
+  { C_BUILTIN, \
+    Builtins::c_##name, \
+    "Builtins::" #name },
+
+  BUILTIN_LIST_C(DEF_ENTRY_C)
+#undef DEF_ENTRY_C
+
+#define DEF_ENTRY_C(name) \
+  { BUILTIN, \
+    Builtins::name, \
+    "Builtins::" #name },
+#define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name)
+
+  BUILTIN_LIST_C(DEF_ENTRY_C)
+  BUILTIN_LIST_A(DEF_ENTRY_A)
+  BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
+#undef DEF_ENTRY_C
+#undef DEF_ENTRY_A
+
+  // Runtime functions
+#define RUNTIME_ENTRY(name, nargs, ressize) \
+  { RUNTIME_FUNCTION, \
+    Runtime::k##name, \
+    "Runtime::" #name },
+
+  RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
+#undef RUNTIME_ENTRY
+
+  // IC utilities
+#define IC_ENTRY(name) \
+  { IC_UTILITY, \
+    IC::k##name, \
+    "IC::" #name },
+
+  IC_UTIL_LIST(IC_ENTRY)
+#undef IC_ENTRY
+  };  // end of ref_table[].
+
+  for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) {
+    AddFromId(ref_table[i].type, ref_table[i].id, ref_table[i].name);
+  }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Debug addresses
+  Add(Debug_Address(Debug::k_after_break_target_address).address(),
+      DEBUG_ADDRESS,
+      Debug::k_after_break_target_address << kDebugIdShift,
+      "Debug::after_break_target_address()");
+  Add(Debug_Address(Debug::k_debug_break_return_address).address(),
+      DEBUG_ADDRESS,
+      Debug::k_debug_break_return_address << kDebugIdShift,
+      "Debug::debug_break_return_address()");
+  const char* debug_register_format = "Debug::register_address(%i)";
+  size_t dr_format_length = strlen(debug_register_format);
+  for (int i = 0; i < kNumJSCallerSaved; ++i) {
+    Vector<char> name = Vector<char>::New(dr_format_length + 1);
+    OS::SNPrintF(name, debug_register_format, i);
+    Add(Debug_Address(Debug::k_register_address, i).address(),
+        DEBUG_ADDRESS,
+        Debug::k_register_address << kDebugIdShift | i,
+        name.start());
+  }
+#endif
+
+  // Stat counters
+  struct StatsRefTableEntry {
+    StatsCounter* counter;
+    uint16_t id;
+    const char* name;
+  };
+
+  static const StatsRefTableEntry stats_ref_table[] = {
+#define COUNTER_ENTRY(name, caption) \
+  { &Counters::name, \
+    Counters::k_##name, \
+    "Counters::" #name },
+
+  STATS_COUNTER_LIST_1(COUNTER_ENTRY)
+  STATS_COUNTER_LIST_2(COUNTER_ENTRY)
+#undef COUNTER_ENTRY
+  };  // end of stats_ref_table[].
+
+  for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
+    Add(reinterpret_cast<Address>(
+            GetInternalPointer(stats_ref_table[i].counter)),
+        STATS_COUNTER,
+        stats_ref_table[i].id,
+        stats_ref_table[i].name);
+  }
+
+  // Top addresses
+  const char* top_address_format = "Top::get_address_from_id(%i)";
+  size_t top_format_length = strlen(top_address_format);
+  for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
+    Vector<char> name = Vector<char>::New(top_format_length + 1);
+    const char* chars = name.start();
+    OS::SNPrintF(name, top_address_format, i);
+    Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
+  }
+
+  // Extensions
+  Add(FUNCTION_ADDR(GCExtension::GC), EXTENSION, 1,
+      "GCExtension::GC");
+
+  // Accessors
+#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
+  Add((Address)&Accessors::name, \
+      ACCESSOR, \
+      Accessors::k##name, \
+      "Accessors::" #name);
+
+  ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
+#undef ACCESSOR_DESCRIPTOR_DECLARATION
+
+  // Stub cache tables
+  Add(SCTableReference::keyReference(StubCache::kPrimary).address(),
+      STUB_CACHE_TABLE,
+      1,
+      "StubCache::primary_->key");
+  Add(SCTableReference::valueReference(StubCache::kPrimary).address(),
+      STUB_CACHE_TABLE,
+      2,
+      "StubCache::primary_->value");
+  Add(SCTableReference::keyReference(StubCache::kSecondary).address(),
+      STUB_CACHE_TABLE,
+      3,
+      "StubCache::secondary_->key");
+  Add(SCTableReference::valueReference(StubCache::kSecondary).address(),
+      STUB_CACHE_TABLE,
+      4,
+      "StubCache::secondary_->value");
+
+  // Runtime entries
+  Add(ExternalReference::perform_gc_function().address(),
+      RUNTIME_ENTRY,
+      1,
+      "Runtime::PerformGC");
+  Add(ExternalReference::random_positive_smi_function().address(),
+      RUNTIME_ENTRY,
+      2,
+      "V8::RandomPositiveSmi");
+
+  // Miscellaneous
+  Add(ExternalReference::builtin_passed_function().address(),
+      UNCLASSIFIED,
+      1,
+      "Builtins::builtin_passed_function");
+  Add(ExternalReference::the_hole_value_location().address(),
+      UNCLASSIFIED,
+      2,
+      "Factory::the_hole_value().location()");
+  Add(ExternalReference::roots_address().address(),
+      UNCLASSIFIED,
+      3,
+      "Heap::roots_address()");
+  Add(ExternalReference::address_of_stack_guard_limit().address(),
+      UNCLASSIFIED,
+      4,
+      "StackGuard::address_of_jslimit()");
+  Add(ExternalReference::address_of_regexp_stack_limit().address(),
+      UNCLASSIFIED,
+      5,
+      "RegExpStack::limit_address()");
+  Add(ExternalReference::new_space_start().address(),
+      UNCLASSIFIED,
+      6,
+      "Heap::NewSpaceStart()");
+  Add(ExternalReference::heap_always_allocate_scope_depth().address(),
+      UNCLASSIFIED,
+      7,
+      "Heap::always_allocate_scope_depth()");
+  Add(ExternalReference::new_space_allocation_limit_address().address(),
+      UNCLASSIFIED,
+      8,
+      "Heap::NewSpaceAllocationLimitAddress()");
+  Add(ExternalReference::new_space_allocation_top_address().address(),
+      UNCLASSIFIED,
+      9,
+      "Heap::NewSpaceAllocationTopAddress()");
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  Add(ExternalReference::debug_break().address(),
+      UNCLASSIFIED,
+      10,
+      "Debug::Break()");
+  Add(ExternalReference::debug_step_in_fp_address().address(),
+      UNCLASSIFIED,
+      11,
+      "Debug::step_in_fp_addr()");
+#endif
+  Add(ExternalReference::double_fp_operation(Token::ADD).address(),
+      UNCLASSIFIED,
+      12,
+      "add_two_doubles");
+  Add(ExternalReference::double_fp_operation(Token::SUB).address(),
+      UNCLASSIFIED,
+      13,
+      "sub_two_doubles");
+  Add(ExternalReference::double_fp_operation(Token::MUL).address(),
+      UNCLASSIFIED,
+      14,
+      "mul_two_doubles");
+  Add(ExternalReference::double_fp_operation(Token::DIV).address(),
+      UNCLASSIFIED,
+      15,
+      "div_two_doubles");
+  Add(ExternalReference::double_fp_operation(Token::MOD).address(),
+      UNCLASSIFIED,
+      16,
+      "mod_two_doubles");
+  Add(ExternalReference::compare_doubles().address(),
+      UNCLASSIFIED,
+      17,
+      "compare_doubles");
+#ifdef V8_NATIVE_REGEXP
+  Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
+      UNCLASSIFIED,
+      18,
+      "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
+  Add(ExternalReference::re_check_stack_guard_state().address(),
+      UNCLASSIFIED,
+      19,
+      "RegExpMacroAssembler*::CheckStackGuardState()");
+  Add(ExternalReference::re_grow_stack().address(),
+      UNCLASSIFIED,
+      20,
+      "NativeRegExpMacroAssembler::GrowStack()");
+#endif
+}
+
+
+ExternalReferenceEncoder::ExternalReferenceEncoder()
+    : encodings_(Match) {
+  ExternalReferenceTable* external_references =
+      ExternalReferenceTable::instance();
+  for (int i = 0; i < external_references->size(); ++i) {
+    Put(external_references->address(i), i);
+  }
+}
+
+
+uint32_t ExternalReferenceEncoder::Encode(Address key) const {
+  int index = IndexOf(key);
+  return index >=0 ? ExternalReferenceTable::instance()->code(index) : 0;
+}
+
+
+const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
+  int index = IndexOf(key);
+  return index >=0 ? ExternalReferenceTable::instance()->name(index) : NULL;
+}
+
+
+int ExternalReferenceEncoder::IndexOf(Address key) const {
+  if (key == NULL) return -1;
+  HashMap::Entry* entry =
+      const_cast<HashMap &>(encodings_).Lookup(key, Hash(key), false);
+  return entry == NULL
+      ? -1
+      : static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+}
+
+
+void ExternalReferenceEncoder::Put(Address key, int index) {
+  HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
+  entry->value = reinterpret_cast<void *>(index);
+}
+
+
+ExternalReferenceDecoder::ExternalReferenceDecoder()
+  : encodings_(NewArray<Address*>(kTypeCodeCount)) {
+  ExternalReferenceTable* external_references =
+      ExternalReferenceTable::instance();
+  for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
+    int max = external_references->max_id(type) + 1;
+    encodings_[type] = NewArray<Address>(max + 1);
+  }
+  for (int i = 0; i < external_references->size(); ++i) {
+    Put(external_references->code(i), external_references->address(i));
+  }
+}
+
+
+ExternalReferenceDecoder::~ExternalReferenceDecoder() {
+  for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
+    DeleteArray(encodings_[type]);
+  }
+  DeleteArray(encodings_);
+}
+
+
+//------------------------------------------------------------------------------
+// Implementation of Serializer
+
+
+// Helper class to write the bytes of the serialized heap.
+
+class SnapshotWriter {
+ public:
+  SnapshotWriter() {
+    len_ = 0;
+    max_ = 8 << 10;  // 8K initial size
+    str_ = NewArray<byte>(max_);
+  }
+
+  ~SnapshotWriter() {
+    DeleteArray(str_);
+  }
+
+  void GetBytes(byte** str, int* len) {
+    *str = NewArray<byte>(len_);
+    memcpy(*str, str_, len_);
+    *len = len_;
+  }
+
+  void Reserve(int bytes, int pos);
+
+  void PutC(char c) {
+    InsertC(c, len_);
+  }
+
+  void PutInt(int i) {
+    InsertInt(i, len_);
+  }
+
+  void PutAddress(Address p) {
+    PutBytes(reinterpret_cast<byte*>(&p), sizeof(p));
+  }
+
+  void PutBytes(const byte* a, int size) {
+    InsertBytes(a, len_, size);
+  }
+
+  void PutString(const char* s) {
+    InsertString(s, len_);
+  }
+
+  int InsertC(char c, int pos) {
+    Reserve(1, pos);
+    str_[pos] = c;
+    len_++;
+    return pos + 1;
+  }
+
+  int InsertInt(int i, int pos) {
+    return InsertBytes(reinterpret_cast<byte*>(&i), pos, sizeof(i));
+  }
+
+  int InsertBytes(const byte* a, int pos, int size) {
+    Reserve(size, pos);
+    memcpy(&str_[pos], a, size);
+    len_ += size;
+    return pos + size;
+  }
+
+  int InsertString(const char* s, int pos);
+
+  int length() { return len_; }
+
+  Address position() { return reinterpret_cast<Address>(&str_[len_]); }
+
+ private:
+  byte* str_;  // the snapshot
+  int len_;   // the current length of str_
+  int max_;   // the allocated size of str_
+};
+
+
+void SnapshotWriter::Reserve(int bytes, int pos) {
+  CHECK(0 <= pos && pos <= len_);
+  while (len_ + bytes >= max_) {
+    max_ *= 2;
+    byte* old = str_;
+    str_ = NewArray<byte>(max_);
+    memcpy(str_, old, len_);
+    DeleteArray(old);
+  }
+  if (pos < len_) {
+    byte* old = str_;
+    str_ = NewArray<byte>(max_);
+    memcpy(str_, old, pos);
+    memcpy(str_ + pos + bytes, old + pos, len_ - pos);
+    DeleteArray(old);
+  }
+}
+
+int SnapshotWriter::InsertString(const char* s, int pos) {
+  int size = strlen(s);
+  pos = InsertC('[', pos);
+  pos = InsertInt(size, pos);
+  pos = InsertC(']', pos);
+  return InsertBytes(reinterpret_cast<const byte*>(s), pos, size);
+}
+
+
+class ReferenceUpdater: public ObjectVisitor {
+ public:
+  ReferenceUpdater(HeapObject* obj, Serializer* serializer)
+    : obj_address_(obj->address()),
+      serializer_(serializer),
+      reference_encoder_(serializer->reference_encoder_),
+      offsets_(8),
+      addresses_(8) {
+  }
+
+  virtual void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; ++p) {
+      if ((*p)->IsHeapObject()) {
+        offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
+        Address a = serializer_->GetSavedAddress(HeapObject::cast(*p));
+        addresses_.Add(a);
+      }
+    }
+  }
+
+  virtual void VisitCodeTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    Address encoded_target = serializer_->GetSavedAddress(target);
+    offsets_.Add(rinfo->target_address_address() - obj_address_);
+    addresses_.Add(encoded_target);
+  }
+
+
+  virtual void VisitExternalReferences(Address* start, Address* end) {
+    for (Address* p = start; p < end; ++p) {
+      uint32_t code = reference_encoder_->Encode(*p);
+      CHECK(*p == NULL ? code == 0 : code != 0);
+      offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
+      addresses_.Add(reinterpret_cast<Address>(code));
+    }
+  }
+
+  virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
+    Address target = rinfo->target_address();
+    uint32_t encoding = reference_encoder_->Encode(target);
+    CHECK(target == NULL ? encoding == 0 : encoding != 0);
+    offsets_.Add(rinfo->target_address_address() - obj_address_);
+    addresses_.Add(reinterpret_cast<Address>(encoding));
+  }
+
+  void Update(Address start_address) {
+    for (int i = 0; i < offsets_.length(); i++) {
+      memcpy(start_address + offsets_[i], &addresses_[i], sizeof(Address));
+    }
+  }
+
+ private:
+  Address obj_address_;
+  Serializer* serializer_;
+  ExternalReferenceEncoder* reference_encoder_;
+  List<int> offsets_;
+  List<Address> addresses_;
+};
+
+
+// Helper functions for a map of encoded heap object addresses.
+static uint32_t HeapObjectHash(HeapObject* key) {
+  uint32_t low32bits = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key));
+  return low32bits >> 2;
+}
+
+
+static bool MatchHeapObject(void* key1, void* key2) {
+  return key1 == key2;
+}
+
+
+Serializer::Serializer()
+  : global_handles_(4),
+    saved_addresses_(MatchHeapObject) {
+  root_ = true;
+  roots_ = 0;
+  objects_ = 0;
+  reference_encoder_ = NULL;
+  writer_ = new SnapshotWriter();
+  for (int i = 0; i <= LAST_SPACE; i++) {
+    allocator_[i] = new SimulatedHeapSpace();
+  }
+}
+
+
+Serializer::~Serializer() {
+  for (int i = 0; i <= LAST_SPACE; i++) {
+    delete allocator_[i];
+  }
+  if (reference_encoder_) delete reference_encoder_;
+  delete writer_;
+}
+
+
+bool Serializer::serialization_enabled_ = false;
+
+
+#ifdef DEBUG
+static const int kMaxTagLength = 32;
+
+void Serializer::Synchronize(const char* tag) {
+  if (FLAG_debug_serialization) {
+    int length = strlen(tag);
+    ASSERT(length <= kMaxTagLength);
+    writer_->PutC('S');
+    writer_->PutInt(length);
+    writer_->PutBytes(reinterpret_cast<const byte*>(tag), length);
+  }
+}
+#endif
+
+
+void Serializer::InitializeAllocators() {
+  for (int i = 0; i <= LAST_SPACE; i++) {
+    allocator_[i]->InitEmptyHeap(static_cast<AllocationSpace>(i));
+  }
+}
+
+
+bool Serializer::IsVisited(HeapObject* obj) {
+  HashMap::Entry* entry =
+    saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
+  return entry != NULL;
+}
+
+
+Address Serializer::GetSavedAddress(HeapObject* obj) {
+  HashMap::Entry* entry =
+    saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
+  ASSERT(entry != NULL);
+  return reinterpret_cast<Address>(entry->value);
+}
+
+
+void Serializer::SaveAddress(HeapObject* obj, Address addr) {
+  HashMap::Entry* entry =
+    saved_addresses_.Lookup(obj, HeapObjectHash(obj), true);
+  entry->value = addr;
+}
+
+
+void Serializer::Serialize() {
+  // No active threads.
+  CHECK_EQ(NULL, ThreadState::FirstInUse());
+  // No active or weak handles.
+  CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
+  CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
+  // We need a counter function during serialization to resolve the
+  // references to counters in the code on the heap.
+  CHECK(StatsTable::HasCounterFunction());
+  CHECK(enabled());
+  InitializeAllocators();
+  reference_encoder_ = new ExternalReferenceEncoder();
+  PutHeader();
+  Heap::IterateRoots(this);
+  PutLog();
+  PutContextStack();
+  Disable();
+}
+
+
+void Serializer::Finalize(byte** str, int* len) {
+  writer_->GetBytes(str, len);
+}
+
+
+// Serialize objects by writing them into the stream.
+
+void Serializer::VisitPointers(Object** start, Object** end) {
+  bool root = root_;
+  root_ = false;
+  for (Object** p = start; p < end; ++p) {
+    bool serialized;
+    Address a = Encode(*p, &serialized);
+    if (root) {
+      roots_++;
+      // If the object was not just serialized,
+      // write its encoded address instead.
+      if (!serialized) PutEncodedAddress(a);
+    }
+  }
+  root_ = root;
+}
+
+
+void Serializer::VisitCodeTarget(RelocInfo* rinfo) {
+  ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+  Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+  bool serialized;
+  Encode(target, &serialized);
+}
+
+
+class GlobalHandlesRetriever: public ObjectVisitor {
+ public:
+  explicit GlobalHandlesRetriever(List<Object**>* handles)
+  : global_handles_(handles) {}
+
+  virtual void VisitPointers(Object** start, Object** end) {
+    for (; start != end; ++start) {
+      global_handles_->Add(start);
+    }
+  }
+
+ private:
+  List<Object**>* global_handles_;
+};
+
+
+void Serializer::PutFlags() {
+  writer_->PutC('F');
+  List<const char*>* argv = FlagList::argv();
+  writer_->PutInt(argv->length());
+  writer_->PutC('[');
+  for (int i = 0; i < argv->length(); i++) {
+    if (i > 0) writer_->PutC('|');
+    writer_->PutString((*argv)[i]);
+    DeleteArray((*argv)[i]);
+  }
+  writer_->PutC(']');
+  flags_end_ = writer_->length();
+  delete argv;
+}
+
+
+void Serializer::PutHeader() {
+  PutFlags();
+  writer_->PutC('D');
+#ifdef DEBUG
+  writer_->PutC(FLAG_debug_serialization ? '1' : '0');
+#else
+  writer_->PutC('0');
+#endif
+#ifdef V8_NATIVE_REGEXP
+  writer_->PutC('N');
+#else  // Interpreted regexp
+  writer_->PutC('I');
+#endif
+  // Write sizes of paged memory spaces. Allocate extra space for the old
+  // and code spaces, because objects in new space will be promoted to them.
+  writer_->PutC('S');
+  writer_->PutC('[');
+  writer_->PutInt(Heap::old_pointer_space()->Size() +
+                  Heap::new_space()->Size());
+  writer_->PutC('|');
+  writer_->PutInt(Heap::old_data_space()->Size() + Heap::new_space()->Size());
+  writer_->PutC('|');
+  writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
+  writer_->PutC('|');
+  writer_->PutInt(Heap::map_space()->Size());
+  writer_->PutC('|');
+  writer_->PutInt(Heap::cell_space()->Size());
+  writer_->PutC(']');
+  // Write global handles.
+  writer_->PutC('G');
+  writer_->PutC('[');
+  GlobalHandlesRetriever ghr(&global_handles_);
+  GlobalHandles::IterateRoots(&ghr);
+  for (int i = 0; i < global_handles_.length(); i++) {
+    writer_->PutC('N');
+  }
+  writer_->PutC(']');
+}
+
+
+void Serializer::PutLog() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (FLAG_log_code) {
+    Logger::TearDown();
+    int pos = writer_->InsertC('L', flags_end_);
+    bool exists;
+    Vector<const char> log = ReadFile(FLAG_logfile, &exists);
+    writer_->InsertString(log.start(), pos);
+    log.Dispose();
+  }
+#endif
+}
+
+
+static int IndexOf(const List<Object**>& list, Object** element) {
+  for (int i = 0; i < list.length(); i++) {
+    if (list[i] == element) return i;
+  }
+  return -1;
+}
+
+
+void Serializer::PutGlobalHandleStack(const List<Handle<Object> >& stack) {
+  writer_->PutC('[');
+  writer_->PutInt(stack.length());
+  for (int i = stack.length() - 1; i >= 0; i--) {
+    writer_->PutC('|');
+    int gh_index = IndexOf(global_handles_, stack[i].location());
+    CHECK_GE(gh_index, 0);
+    writer_->PutInt(gh_index);
+  }
+  writer_->PutC(']');
+}
+
+
+void Serializer::PutContextStack() {
+  List<Context*> contexts(2);
+  while (HandleScopeImplementer::instance()->HasSavedContexts()) {
+    Context* context =
+      HandleScopeImplementer::instance()->RestoreContext();
+    contexts.Add(context);
+  }
+  for (int i = contexts.length() - 1; i >= 0; i--) {
+    HandleScopeImplementer::instance()->SaveContext(contexts[i]);
+  }
+  writer_->PutC('C');
+  writer_->PutC('[');
+  writer_->PutInt(contexts.length());
+  if (!contexts.is_empty()) {
+    Object** start = reinterpret_cast<Object**>(&contexts.first());
+    VisitPointers(start, start + contexts.length());
+  }
+  writer_->PutC(']');
+}
+
+void Serializer::PutEncodedAddress(Address addr) {
+  writer_->PutC('P');
+  writer_->PutAddress(addr);
+}
+
+
+Address Serializer::Encode(Object* o, bool* serialized) {
+  *serialized = false;
+  if (o->IsSmi()) {
+    return reinterpret_cast<Address>(o);
+  } else {
+    HeapObject* obj = HeapObject::cast(o);
+    if (IsVisited(obj)) {
+      return GetSavedAddress(obj);
+    } else {
+      // First visit: serialize the object.
+      *serialized = true;
+      return PutObject(obj);
+    }
+  }
+}
+
+
+Address Serializer::PutObject(HeapObject* obj) {
+  Map* map = obj->map();
+  InstanceType type = map->instance_type();
+  int size = obj->SizeFromMap(map);
+
+  // Simulate the allocation of obj to predict where it will be
+  // allocated during deserialization.
+  Address addr = Allocate(obj).Encode();
+
+  SaveAddress(obj, addr);
+
+  if (type == CODE_TYPE) {
+    LOG(CodeMoveEvent(obj->address(), addr));
+  }
+
+  // Write out the object prologue: type, size, and simulated address of obj.
+  writer_->PutC('[');
+  CHECK_EQ(0, static_cast<int>(size & kObjectAlignmentMask));
+  writer_->PutInt(type);
+  writer_->PutInt(size >> kObjectAlignmentBits);
+  PutEncodedAddress(addr);  // encodes AllocationSpace
+
+  // Visit all the pointers in the object other than the map. This
+  // will recursively serialize any as-yet-unvisited objects.
+  obj->Iterate(this);
+
+  // Mark end of recursively embedded objects, start of object body.
+  writer_->PutC('|');
+  // Write out the raw contents of the object. No compression, but
+  // fast to deserialize.
+  writer_->PutBytes(obj->address(), size);
+  // Update pointers and external references in the written object.
+  ReferenceUpdater updater(obj, this);
+  obj->Iterate(&updater);
+  updater.Update(writer_->position() - size);
+
+#ifdef DEBUG
+  if (FLAG_debug_serialization) {
+    // Write out the object epilogue to catch synchronization errors.
+    PutEncodedAddress(addr);
+    writer_->PutC(']');
+  }
+#endif
+
+  objects_++;
+  return addr;
+}
+
+
+RelativeAddress Serializer::Allocate(HeapObject* obj) {
+  // Find out which AllocationSpace 'obj' is in.
+  AllocationSpace s;
+  bool found = false;
+  for (int i = FIRST_SPACE; !found && i <= LAST_SPACE; i++) {
+    s = static_cast<AllocationSpace>(i);
+    found = Heap::InSpace(obj, s);
+  }
+  CHECK(found);
+  int size = obj->Size();
+  if (s == NEW_SPACE) {
+    if (size > Heap::MaxObjectSizeInPagedSpace()) {
+      s = LO_SPACE;
+    } else {
+      OldSpace* space = Heap::TargetSpace(obj);
+      ASSERT(space == Heap::old_pointer_space() ||
+             space == Heap::old_data_space());
+      s = (space == Heap::old_pointer_space()) ?
+          OLD_POINTER_SPACE :
+          OLD_DATA_SPACE;
+    }
+  }
+  GCTreatment gc_treatment = DataObject;
+  if (obj->IsFixedArray()) gc_treatment = PointerObject;
+  else if (obj->IsCode()) gc_treatment = CodeObject;
+  return allocator_[s]->Allocate(size, gc_treatment);
+}
+
+
+//------------------------------------------------------------------------------
+// Implementation of Deserializer
+
+
+static const int kInitArraySize = 32;
+
+
+Deserializer::Deserializer(const byte* str, int len)
+  : reader_(str, len),
+    map_pages_(kInitArraySize),
+    cell_pages_(kInitArraySize),
+    old_pointer_pages_(kInitArraySize),
+    old_data_pages_(kInitArraySize),
+    code_pages_(kInitArraySize),
+    large_objects_(kInitArraySize),
+    global_handles_(4) {
+  root_ = true;
+  roots_ = 0;
+  objects_ = 0;
+  reference_decoder_ = NULL;
+#ifdef DEBUG
+  expect_debug_information_ = false;
+#endif
+}
+
+
+Deserializer::~Deserializer() {
+  if (reference_decoder_) delete reference_decoder_;
+}
+
+
+void Deserializer::ExpectEncodedAddress(Address expected) {
+  Address a = GetEncodedAddress();
+  USE(a);
+  ASSERT(a == expected);
+}
+
+
+#ifdef DEBUG
+void Deserializer::Synchronize(const char* tag) {
+  if (expect_debug_information_) {
+    char buf[kMaxTagLength];
+    reader_.ExpectC('S');
+    int length = reader_.GetInt();
+    ASSERT(length <= kMaxTagLength);
+    reader_.GetBytes(reinterpret_cast<Address>(buf), length);
+    ASSERT_EQ(strlen(tag), length);
+    ASSERT(strncmp(tag, buf, length) == 0);
+  }
+}
+#endif
+
+
+void Deserializer::Deserialize() {
+  // No active threads.
+  ASSERT_EQ(NULL, ThreadState::FirstInUse());
+  // No active handles.
+  ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
+  reference_decoder_ = new ExternalReferenceDecoder();
+  // By setting linear allocation only, we forbid the use of free list
+  // allocation which is not predicted by SimulatedAddress.
+  GetHeader();
+  Heap::IterateRoots(this);
+  GetContextStack();
+}
+
+
+void Deserializer::VisitPointers(Object** start, Object** end) {
+  bool root = root_;
+  root_ = false;
+  for (Object** p = start; p < end; ++p) {
+    if (root) {
+      roots_++;
+      // Read the next object or pointer from the stream
+      // pointer in the stream.
+      int c = reader_.GetC();
+      if (c == '[') {
+        *p = GetObject();  // embedded object
+      } else {
+        ASSERT(c == 'P');  // pointer to previously serialized object
+        *p = Resolve(reader_.GetAddress());
+      }
+    } else {
+      // A pointer internal to a HeapObject that we've already
+      // read: resolve it to a true address (or Smi)
+      *p = Resolve(reinterpret_cast<Address>(*p));
+    }
+  }
+  root_ = root;
+}
+
+
+void Deserializer::VisitCodeTarget(RelocInfo* rinfo) {
+  ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+  Address encoded_address = reinterpret_cast<Address>(rinfo->target_object());
+  Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address));
+  rinfo->set_target_address(target_object->instruction_start());
+}
+
+
+void Deserializer::VisitExternalReferences(Address* start, Address* end) {
+  for (Address* p = start; p < end; ++p) {
+    uint32_t code = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*p));
+    *p = reference_decoder_->Decode(code);
+  }
+}
+
+
+void Deserializer::VisitRuntimeEntry(RelocInfo* rinfo) {
+  uint32_t* pc = reinterpret_cast<uint32_t*>(rinfo->target_address_address());
+  uint32_t encoding = *pc;
+  Address target = reference_decoder_->Decode(encoding);
+  rinfo->set_target_address(target);
+}
+
+
+void Deserializer::GetFlags() {
+  reader_.ExpectC('F');
+  int argc = reader_.GetInt() + 1;
+  char** argv = NewArray<char*>(argc);
+  reader_.ExpectC('[');
+  for (int i = 1; i < argc; i++) {
+    if (i > 1) reader_.ExpectC('|');
+    argv[i] = reader_.GetString();
+  }
+  reader_.ExpectC(']');
+  has_log_ = false;
+  for (int i = 1; i < argc; i++) {
+    if (strcmp("--log_code", argv[i]) == 0) {
+      has_log_ = true;
+    } else if (strcmp("--nouse_ic", argv[i]) == 0) {
+      FLAG_use_ic = false;
+    } else if (strcmp("--debug_code", argv[i]) == 0) {
+      FLAG_debug_code = true;
+    } else if (strcmp("--nolazy", argv[i]) == 0) {
+      FLAG_lazy = false;
+    }
+    DeleteArray(argv[i]);
+  }
+
+  DeleteArray(argv);
+}
+
+
+void Deserializer::GetLog() {
+  if (has_log_) {
+    reader_.ExpectC('L');
+    char* snapshot_log = reader_.GetString();
+#ifdef ENABLE_LOGGING_AND_PROFILING
+    if (FLAG_log_code) {
+      LOG(Preamble(snapshot_log));
+    }
+#endif
+    DeleteArray(snapshot_log);
+  }
+}
+
+
+static void InitPagedSpace(PagedSpace* space,
+                           int capacity,
+                           List<Page*>* page_list) {
+  if (!space->EnsureCapacity(capacity)) {
+    V8::FatalProcessOutOfMemory("InitPagedSpace");
+  }
+  PageIterator it(space, PageIterator::ALL_PAGES);
+  while (it.has_next()) page_list->Add(it.next());
+}
+
+
+void Deserializer::GetHeader() {
+  reader_.ExpectC('D');
+#ifdef DEBUG
+  expect_debug_information_ = reader_.GetC() == '1';
+#else
+  // In release mode, don't attempt to read a snapshot containing
+  // synchronization tags.
+  if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags.");
+#endif
+#ifdef V8_NATIVE_REGEXP
+  reader_.ExpectC('N');
+#else  // Interpreted regexp.
+  reader_.ExpectC('I');
+#endif
+  // Ensure sufficient capacity in paged memory spaces to avoid growth
+  // during deserialization.
+  reader_.ExpectC('S');
+  reader_.ExpectC('[');
+  InitPagedSpace(Heap::old_pointer_space(),
+                 reader_.GetInt(),
+                 &old_pointer_pages_);
+  reader_.ExpectC('|');
+  InitPagedSpace(Heap::old_data_space(), reader_.GetInt(), &old_data_pages_);
+  reader_.ExpectC('|');
+  InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
+  reader_.ExpectC('|');
+  InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_);
+  reader_.ExpectC('|');
+  InitPagedSpace(Heap::cell_space(), reader_.GetInt(), &cell_pages_);
+  reader_.ExpectC(']');
+  // Create placeholders for global handles later to be fill during
+  // IterateRoots.
+  reader_.ExpectC('G');
+  reader_.ExpectC('[');
+  int c = reader_.GetC();
+  while (c != ']') {
+    ASSERT(c == 'N');
+    global_handles_.Add(GlobalHandles::Create(NULL).location());
+    c = reader_.GetC();
+  }
+}
+
+
+void Deserializer::GetGlobalHandleStack(List<Handle<Object> >* stack) {
+  reader_.ExpectC('[');
+  int length = reader_.GetInt();
+  for (int i = 0; i < length; i++) {
+    reader_.ExpectC('|');
+    int gh_index = reader_.GetInt();
+    stack->Add(global_handles_[gh_index]);
+  }
+  reader_.ExpectC(']');
+}
+
+
+void Deserializer::GetContextStack() {
+  reader_.ExpectC('C');
+  CHECK_EQ(reader_.GetC(), '[');
+  int count = reader_.GetInt();
+  List<Context*> entered_contexts(count);
+  if (count > 0) {
+    Object** start = reinterpret_cast<Object**>(&entered_contexts.first());
+    VisitPointers(start, start + count);
+  }
+  reader_.ExpectC(']');
+  for (int i = 0; i < count; i++) {
+    HandleScopeImplementer::instance()->SaveContext(entered_contexts[i]);
+  }
+}
+
+
+Address Deserializer::GetEncodedAddress() {
+  reader_.ExpectC('P');
+  return reader_.GetAddress();
+}
+
+
+Object* Deserializer::GetObject() {
+  // Read the prologue: type, size and encoded address.
+  InstanceType type = static_cast<InstanceType>(reader_.GetInt());
+  int size = reader_.GetInt() << kObjectAlignmentBits;
+  Address a = GetEncodedAddress();
+
+  // Get a raw object of the right size in the right space.
+  AllocationSpace space = GetSpace(a);
+  Object* o;
+  if (IsLargeExecutableObject(a)) {
+    o = Heap::lo_space()->AllocateRawCode(size);
+  } else if (IsLargeFixedArray(a)) {
+    o = Heap::lo_space()->AllocateRawFixedArray(size);
+  } else {
+    AllocationSpace retry_space = (space == NEW_SPACE)
+        ? Heap::TargetSpaceId(type)
+        : space;
+    o = Heap::AllocateRaw(size, space, retry_space);
+  }
+  ASSERT(!o->IsFailure());
+  // Check that the simulation of heap allocation was correct.
+  ASSERT(o == Resolve(a));
+
+  // Read any recursively embedded objects.
+  int c = reader_.GetC();
+  while (c == '[') {
+    GetObject();
+    c = reader_.GetC();
+  }
+  ASSERT(c == '|');
+
+  HeapObject* obj = reinterpret_cast<HeapObject*>(o);
+  // Read the uninterpreted contents of the object after the map
+  reader_.GetBytes(obj->address(), size);
+#ifdef DEBUG
+  if (expect_debug_information_) {
+    // Read in the epilogue to check that we're still synchronized
+    ExpectEncodedAddress(a);
+    reader_.ExpectC(']');
+  }
+#endif
+
+  // Resolve the encoded pointers we just read in.
+  // Same as obj->Iterate(this), but doesn't rely on the map pointer being set.
+  VisitPointer(reinterpret_cast<Object**>(obj->address()));
+  obj->IterateBody(type, size, this);
+
+  if (type == CODE_TYPE) {
+    LOG(CodeMoveEvent(a, obj->address()));
+  }
+  objects_++;
+  return o;
+}
+
+
+static inline Object* ResolvePaged(int page_index,
+                                   int page_offset,
+                                   PagedSpace* space,
+                                   List<Page*>* page_list) {
+  ASSERT(page_index < page_list->length());
+  Address address = (*page_list)[page_index]->OffsetToAddress(page_offset);
+  return HeapObject::FromAddress(address);
+}
+
+
+template<typename T>
+void ConcatReversed(List<T>* target, const List<T>& source) {
+  for (int i = source.length() - 1; i >= 0; i--) {
+    target->Add(source[i]);
+  }
+}
+
+
+Object* Deserializer::Resolve(Address encoded) {
+  Object* o = reinterpret_cast<Object*>(encoded);
+  if (o->IsSmi()) return o;
+
+  // Encoded addresses of HeapObjects always have 'HeapObject' tags.
+  ASSERT(o->IsHeapObject());
+
+  switch (GetSpace(encoded)) {
+    // For Map space and Old space, we cache the known Pages in map_pages,
+    // old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
+    // of page addresses, we don't rely on it since GetObject uses AllocateRaw,
+    // and that appears not to update the page list.
+    case MAP_SPACE:
+      return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
+                          Heap::map_space(), &map_pages_);
+    case CELL_SPACE:
+      return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
+                          Heap::cell_space(), &cell_pages_);
+    case OLD_POINTER_SPACE:
+      return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
+                          Heap::old_pointer_space(), &old_pointer_pages_);
+    case OLD_DATA_SPACE:
+      return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
+                          Heap::old_data_space(), &old_data_pages_);
+    case CODE_SPACE:
+      return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
+                          Heap::code_space(), &code_pages_);
+    case NEW_SPACE:
+      return HeapObject::FromAddress(Heap::NewSpaceStart() +
+                                     NewSpaceOffset(encoded));
+    case LO_SPACE:
+      // Cache the known large_objects, allocated one per 'page'
+      int index = LargeObjectIndex(encoded);
+      if (index >= large_objects_.length()) {
+        int new_object_count =
+          Heap::lo_space()->PageCount() - large_objects_.length();
+        List<Object*> new_objects(new_object_count);
+        LargeObjectIterator it(Heap::lo_space());
+        for (int i = 0; i < new_object_count; i++) {
+          new_objects.Add(it.next());
+        }
+#ifdef DEBUG
+        for (int i = large_objects_.length() - 1; i >= 0; i--) {
+          ASSERT(it.next() == large_objects_[i]);
+        }
+#endif
+        ConcatReversed(&large_objects_, new_objects);
+        ASSERT(index < large_objects_.length());
+      }
+      return large_objects_[index];  // s.page_offset() is ignored.
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/serialize.h b/src/serialize.h
new file mode 100644
index 0000000..c901480
--- /dev/null
+++ b/src/serialize.h
@@ -0,0 +1,344 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SERIALIZE_H_
+#define V8_SERIALIZE_H_
+
+#include "hashmap.h"
+
+namespace v8 {
+namespace internal {
+
+// A TypeCode is used to distinguish different kinds of external reference.
+// It is a single bit to make testing for types easy.
+enum TypeCode {
+  UNCLASSIFIED,        // One-of-a-kind references.
+  BUILTIN,
+  RUNTIME_FUNCTION,
+  IC_UTILITY,
+  DEBUG_ADDRESS,
+  STATS_COUNTER,
+  TOP_ADDRESS,
+  C_BUILTIN,
+  EXTENSION,
+  ACCESSOR,
+  RUNTIME_ENTRY,
+  STUB_CACHE_TABLE
+};
+
+const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
+const int kFirstTypeCode = UNCLASSIFIED;
+
+const int kReferenceIdBits = 16;
+const int kReferenceIdMask = (1 << kReferenceIdBits) - 1;
+const int kReferenceTypeShift = kReferenceIdBits;
+const int kDebugRegisterBits = 4;
+const int kDebugIdShift = kDebugRegisterBits;
+
+
+class ExternalReferenceEncoder {
+ public:
+  ExternalReferenceEncoder();
+
+  uint32_t Encode(Address key) const;
+
+  const char* NameOfAddress(Address key) const;
+
+ private:
+  HashMap encodings_;
+  static uint32_t Hash(Address key) {
+    return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2);
+  }
+
+  int IndexOf(Address key) const;
+
+  static bool Match(void* key1, void* key2) { return key1 == key2; }
+
+  void Put(Address key, int index);
+};
+
+
+class ExternalReferenceDecoder {
+ public:
+  ExternalReferenceDecoder();
+  ~ExternalReferenceDecoder();
+
+  Address Decode(uint32_t key) const {
+    if (key == 0) return NULL;
+    return *Lookup(key);
+  }
+
+ private:
+  Address** encodings_;
+
+  Address* Lookup(uint32_t key) const {
+    int type = key >> kReferenceTypeShift;
+    ASSERT(kFirstTypeCode <= type && type < kTypeCodeCount);
+    int id = key & kReferenceIdMask;
+    return &encodings_[type][id];
+  }
+
+  void Put(uint32_t key, Address value) {
+    *Lookup(key) = value;
+  }
+};
+
+
+// A Serializer recursively visits objects to construct a serialized
+// representation of the Heap stored in a string. Serialization is
+// destructive. We use a similar mechanism to the GC to ensure that
+// each object is visited once, namely, we modify the map pointer of
+// each visited object to contain the relative address in the
+// appropriate space where that object will be allocated when the heap
+// is deserialized.
+
+
+// Helper classes defined in serialize.cc.
+class RelativeAddress;
+class SimulatedHeapSpace;
+class SnapshotWriter;
+class ReferenceUpdater;
+
+
+class Serializer: public ObjectVisitor {
+ public:
+  Serializer();
+
+  virtual ~Serializer();
+
+  // Serialize the current state of the heap. This operation destroys the
+  // heap contents and the contents of the roots into the heap.
+  void Serialize();
+
+  // Returns the serialized buffer. Ownership is transferred to the
+  // caller. Only the destructor and getters may be called after this call.
+  void Finalize(byte** str, int* len);
+
+  int roots() { return roots_; }
+  int objects() { return objects_; }
+
+#ifdef DEBUG
+  // insert "tag" into the serialized stream
+  virtual void Synchronize(const char* tag);
+#endif
+
+  static bool enabled() { return serialization_enabled_; }
+
+  static void Enable() { serialization_enabled_ = true; }
+  static void Disable() { serialization_enabled_ = false; }
+
+ private:
+  friend class ReferenceUpdater;
+
+  virtual void VisitPointers(Object** start, Object** end);
+  virtual void VisitCodeTarget(RelocInfo* rinfo);
+  bool IsVisited(HeapObject* obj);
+
+  Address GetSavedAddress(HeapObject* obj);
+
+  void SaveAddress(HeapObject* obj, Address addr);
+
+  void PutEncodedAddress(Address addr);
+  // Write the global flags into the file.
+  void PutFlags();
+  // Write global information into the header of the file.
+  void PutHeader();
+  // Write the contents of the log into the file.
+  void PutLog();
+  // Serialize 'obj', and return its encoded RelativeAddress.
+  Address PutObject(HeapObject* obj);
+  // Write a stack of handles to the file bottom first.
+  void PutGlobalHandleStack(const List<Handle<Object> >& stack);
+  // Write the context stack into the file.
+  void PutContextStack();
+
+  // Return the encoded RelativeAddress where this object will be
+  // allocated on deserialization. On the first visit of 'o',
+  // serialize its contents. On return, *serialized will be true iff
+  // 'o' has just been serialized.
+  Address Encode(Object* o, bool* serialized);
+
+  // Simulate the allocation of 'obj', returning the address where it will
+  // be allocated on deserialization
+  RelativeAddress Allocate(HeapObject* obj);
+
+  void InitializeAllocators();
+
+  SnapshotWriter* writer_;
+  bool root_;  // serializing a root?
+  int roots_;  // number of roots visited
+  int objects_;  // number of objects serialized
+
+  static bool serialization_enabled_;
+
+  int flags_end_;  // The position right after the flags.
+
+  // An array of per-space SimulatedHeapSpaces used as memory allocators.
+  SimulatedHeapSpace* allocator_[LAST_SPACE+1];
+  // A list of global handles at serialization time.
+  List<Object**> global_handles_;
+
+  ExternalReferenceEncoder* reference_encoder_;
+
+  HashMap saved_addresses_;
+
+  DISALLOW_COPY_AND_ASSIGN(Serializer);
+};
+
+// Helper class to read the bytes of the serialized heap.
+
+class SnapshotReader {
+ public:
+  SnapshotReader(const byte* str, int len): str_(str), end_(str + len) {}
+
+  void ExpectC(char expected) {
+    int c = GetC();
+    USE(c);
+    ASSERT(c == expected);
+  }
+
+  int GetC() {
+    if (str_ >= end_) return EOF;
+    return *str_++;
+  }
+
+  int GetInt() {
+    int result;
+    GetBytes(reinterpret_cast<Address>(&result), sizeof(result));
+    return result;
+  }
+
+  Address GetAddress() {
+    Address result;
+    GetBytes(reinterpret_cast<Address>(&result), sizeof(result));
+    return result;
+  }
+
+  void GetBytes(Address a, int size) {
+    ASSERT(str_ + size <= end_);
+    memcpy(a, str_, size);
+    str_ += size;
+  }
+
+  char* GetString() {
+    ExpectC('[');
+    int size = GetInt();
+    ExpectC(']');
+    char* s = NewArray<char>(size + 1);
+    GetBytes(reinterpret_cast<Address>(s), size);
+    s[size] = 0;
+    return s;
+  }
+
+ private:
+  const byte* str_;
+  const byte* end_;
+};
+
+
+// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
+
+class Deserializer: public ObjectVisitor {
+ public:
+  // Create a deserializer. The snapshot is held in str and has size len.
+  Deserializer(const byte* str, int len);
+
+  virtual ~Deserializer();
+
+  // Read the flags from the header of the file, and set those that
+  // should be inherited from the snapshot.
+  void GetFlags();
+
+  // Read saved profiling information from the file and log it if required.
+  void GetLog();
+
+  // Deserialize the snapshot into an empty heap.
+  void Deserialize();
+
+  int roots() { return roots_; }
+  int objects() { return objects_; }
+
+#ifdef DEBUG
+  // Check for the presence of "tag" in the serialized stream
+  virtual void Synchronize(const char* tag);
+#endif
+
+ private:
+  virtual void VisitPointers(Object** start, Object** end);
+  virtual void VisitCodeTarget(RelocInfo* rinfo);
+  virtual void VisitExternalReferences(Address* start, Address* end);
+  virtual void VisitRuntimeEntry(RelocInfo* rinfo);
+
+  Address GetEncodedAddress();
+
+  // Read other global information (except flags) from the header of the file.
+  void GetHeader();
+  // Read a stack of handles from the file bottom first.
+  void GetGlobalHandleStack(List<Handle<Object> >* stack);
+  // Read the context stack from the file.
+  void GetContextStack();
+
+  Object* GetObject();
+
+  // Get the encoded address. In debug mode we make sure
+  // it matches the given expectations.
+  void ExpectEncodedAddress(Address expected);
+
+  // Given an encoded address (the result of
+  // RelativeAddress::Encode), return the object to which it points,
+  // which will be either an Smi or a HeapObject in the current heap.
+  Object* Resolve(Address encoded_address);
+
+  SnapshotReader reader_;
+  bool root_;  // Deserializing a root?
+  int roots_;  // number of roots visited
+  int objects_;  // number of objects serialized
+
+  bool has_log_;  // The file has log information.
+
+  // Resolve caches the following:
+  List<Page*> map_pages_;  // All pages in the map space.
+  List<Page*> cell_pages_;  // All pages in the cell space.
+  List<Page*> old_pointer_pages_;  // All pages in the old pointer space.
+  List<Page*> old_data_pages_;  // All pages in the old data space.
+  List<Page*> code_pages_;  // All pages in the code space.
+  List<Object*> large_objects_;    // All known large objects.
+  // A list of global handles at deserialization time.
+  List<Object**> global_handles_;
+
+  ExternalReferenceDecoder* reference_decoder_;
+
+#ifdef DEBUG
+  bool expect_debug_information_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(Deserializer);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_SERIALIZE_H_
diff --git a/src/shell.h b/src/shell.h
new file mode 100644
index 0000000..ca51040
--- /dev/null
+++ b/src/shell.h
@@ -0,0 +1,55 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// A simple interactive shell.  Enable with --shell.
+
+#ifndef V8_SHELL_H_
+#define V8_SHELL_H_
+
+#include "../public/debug.h"
+
+namespace v8 {
+namespace internal {
+
+// Debug event handler for interactive debugging.
+void handle_debug_event(v8::DebugEvent event,
+                        v8::Handle<v8::Object> exec_state,
+                        v8::Handle<v8::Object> event_data,
+                        v8::Handle<Value> data);
+
+
+class Shell {
+ public:
+  static void PrintObject(v8::Handle<v8::Value> obj);
+  // Run the read-eval loop, executing code in the specified
+  // environment.
+  static void Run(v8::Handle<v8::Context> context);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_SHELL_H_
diff --git a/src/smart-pointer.h b/src/smart-pointer.h
new file mode 100644
index 0000000..0fa8224
--- /dev/null
+++ b/src/smart-pointer.h
@@ -0,0 +1,109 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SMART_POINTER_H_
+#define V8_SMART_POINTER_H_
+
+namespace v8 {
+namespace internal {
+
+
+// A 'scoped array pointer' that calls DeleteArray on its pointer when the
+// destructor is called.
+template<typename T>
+class SmartPointer {
+ public:
+
+  // Default constructor. Construct an empty scoped pointer.
+  inline SmartPointer() : p(NULL) {}
+
+
+  // Construct a scoped pointer from a plain one.
+  explicit inline SmartPointer(T* pointer) : p(pointer) {}
+
+
+  // Copy constructor removes the pointer from the original to avoid double
+  // freeing.
+  inline SmartPointer(const SmartPointer<T>& rhs) : p(rhs.p) {
+    const_cast<SmartPointer<T>&>(rhs).p = NULL;
+  }
+
+
+  // When the destructor of the scoped pointer is executed the plain pointer
+  // is deleted using DeleteArray.  This implies that you must allocate with
+  // NewArray.
+  inline ~SmartPointer() { if (p) DeleteArray(p); }
+
+
+  // You can get the underlying pointer out with the * operator.
+  inline T* operator*() { return p; }
+
+
+  // You can use [n] to index as if it was a plain pointer
+  inline T& operator[](size_t i) {
+    return p[i];
+  }
+
+  // We don't have implicit conversion to a T* since that hinders migration:
+  // You would not be able to change a method from returning a T* to
+  // returning an SmartPointer<T> and then get errors wherever it is used.
+
+
+  // If you want to take out the plain pointer and don't want it automatically
+  // deleted then call Detach().  Afterwards, the smart pointer is empty
+  // (NULL).
+  inline T* Detach() {
+    T* temp = p;
+    p = NULL;
+    return temp;
+  }
+
+
+  // Assignment requires an empty (NULL) SmartPointer as the receiver.  Like
+  // the copy constructor it removes the pointer in the original to avoid
+  // double freeing.
+  inline SmartPointer& operator=(const SmartPointer<T>& rhs) {
+    ASSERT(is_empty());
+    T* tmp = rhs.p;  // swap to handle self-assignment
+    const_cast<SmartPointer<T>&>(rhs).p = NULL;
+    p = tmp;
+    return *this;
+  }
+
+
+  inline bool is_empty() {
+    return p == NULL;
+  }
+
+
+ private:
+  T* p;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_SMART_POINTER_H_
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
new file mode 100644
index 0000000..9c66a50
--- /dev/null
+++ b/src/snapshot-common.cc
@@ -0,0 +1,75 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The common functionality when building with or without snapshots.
+
+#include "v8.h"
+
+#include "api.h"
+#include "serialize.h"
+#include "snapshot.h"
+
+namespace v8 {
+namespace internal {
+
+bool Snapshot::Deserialize(const byte* content, int len) {
+  Deserializer des(content, len);
+  des.GetFlags();
+  return V8::Initialize(&des);
+}
+
+
+bool Snapshot::Initialize(const char* snapshot_file) {
+  if (snapshot_file) {
+    int len;
+    byte* str = ReadBytes(snapshot_file, &len);
+    if (!str) return false;
+    bool result = Deserialize(str, len);
+    DeleteArray(str);
+    return result;
+  } else if (size_ > 0) {
+    return Deserialize(data_, size_);
+  }
+  return false;
+}
+
+
+bool Snapshot::WriteToFile(const char* snapshot_file) {
+  Serializer ser;
+  ser.Serialize();
+  byte* str;
+  int len;
+  ser.Finalize(&str, &len);
+
+  int written = WriteBytes(snapshot_file, str, len);
+
+  DeleteArray(str);
+  return written == len;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/snapshot-empty.cc b/src/snapshot-empty.cc
new file mode 100644
index 0000000..60ab1e5
--- /dev/null
+++ b/src/snapshot-empty.cc
@@ -0,0 +1,40 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Used for building without snapshots.
+
+#include "v8.h"
+
+#include "snapshot.h"
+
+namespace v8 {
+namespace internal {
+
+const byte Snapshot::data_[] = { 0 };
+int Snapshot::size_ = 0;
+
+} }  // namespace v8::internal
diff --git a/src/snapshot.h b/src/snapshot.h
new file mode 100644
index 0000000..88ba8db
--- /dev/null
+++ b/src/snapshot.h
@@ -0,0 +1,59 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SNAPSHOT_H_
+#define V8_SNAPSHOT_H_
+
+namespace v8 {
+namespace internal {
+
+class Snapshot {
+ public:
+  // Initialize the VM from the given snapshot file. If snapshot_file is
+  // NULL, use the internal snapshot instead. Returns false if no snapshot
+  // could be found.
+  static bool Initialize(const char* snapshot_file = NULL);
+
+  // Returns whether or not the snapshot is enabled.
+  static bool IsEnabled() { return size_ != 0; }
+
+  // Write snapshot to the given file. Returns true if snapshot was written
+  // successfully.
+  static bool WriteToFile(const char* snapshot_file);
+
+ private:
+  static const byte data_[];
+  static int size_;
+
+  static bool Deserialize(const byte* content, int len);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_SNAPSHOT_H_
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
new file mode 100644
index 0000000..da72497
--- /dev/null
+++ b/src/spaces-inl.h
@@ -0,0 +1,365 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SPACES_INL_H_
+#define V8_SPACES_INL_H_
+
+#include "memory.h"
+#include "spaces.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// HeapObjectIterator
+
+bool HeapObjectIterator::has_next() {
+  if (cur_addr_ < cur_limit_) {
+    return true;  // common case
+  }
+  ASSERT(cur_addr_ == cur_limit_);
+  return HasNextInNextPage();  // slow path
+}
+
+
+HeapObject* HeapObjectIterator::next() {
+  ASSERT(has_next());
+
+  HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+  int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+  ASSERT_OBJECT_SIZE(obj_size);
+
+  cur_addr_ += obj_size;
+  ASSERT(cur_addr_ <= cur_limit_);
+
+  return obj;
+}
+
+
+// -----------------------------------------------------------------------------
+// PageIterator
+
+bool PageIterator::has_next() {
+  return prev_page_ != stop_page_;
+}
+
+
+Page* PageIterator::next() {
+  ASSERT(has_next());
+  prev_page_ = (prev_page_ == NULL)
+               ? space_->first_page_
+               : prev_page_->next_page();
+  return prev_page_;
+}
+
+
+// -----------------------------------------------------------------------------
+// Page
+
+Page* Page::next_page() {
+  return MemoryAllocator::GetNextPage(this);
+}
+
+
+Address Page::AllocationTop() {
+  PagedSpace* owner = MemoryAllocator::PageOwner(this);
+  return owner->PageAllocationTop(this);
+}
+
+
+void Page::ClearRSet() {
+  // This method can be called in all rset states.
+  memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
+}
+
+
+// Given a 32-bit address, separate its bits into:
+// | page address | words (6) | bit offset (5) | pointer alignment (2) |
+// The address of the rset word containing the bit for this word is computed as:
+//    page_address + words * 4
+// For a 64-bit address, if it is:
+// | page address | words(5) | bit offset(5) | pointer alignment (3) |
+// The address of the rset word containing the bit for this word is computed as:
+//    page_address + words * 4 + kRSetOffset.
+// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
+// even on the X64 architecture.
+
+Address Page::ComputeRSetBitPosition(Address address, int offset,
+                                     uint32_t* bitmask) {
+  ASSERT(Page::is_rset_in_use());
+
+  Page* page = Page::FromAddress(address);
+  uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
+                                             kPointerSizeLog2);
+  *bitmask = 1 << (bit_offset % kBitsPerInt);
+
+  Address rset_address =
+      page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
+  // The remembered set address is either in the normal remembered set range
+  // of a page or else we have a large object page.
+  ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
+         || page->IsLargeObjectPage());
+
+  if (rset_address >= page->RSetEnd()) {
+    // We have a large object page, and the remembered set address is actually
+    // past the end of the object.
+
+    // The first part of the remembered set is still located at the start of
+    // the page, but anything after kRSetEndOffset must be relocated to after
+    // the large object, i.e. after
+    //   (page->ObjectAreaStart() + object size)
+    // We do that by adding the difference between the normal RSet's end and
+    // the object's end.
+    ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
+    int fixedarray_length =
+        FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
+                                           + Array::kLengthOffset));
+    rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length;
+  }
+  return rset_address;
+}
+
+
+void Page::SetRSet(Address address, int offset) {
+  uint32_t bitmask = 0;
+  Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
+  Memory::uint32_at(rset_address) |= bitmask;
+
+  ASSERT(IsRSetSet(address, offset));
+}
+
+
+// Clears the corresponding remembered set bit for a given address.
+void Page::UnsetRSet(Address address, int offset) {
+  uint32_t bitmask = 0;
+  Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
+  Memory::uint32_at(rset_address) &= ~bitmask;
+
+  ASSERT(!IsRSetSet(address, offset));
+}
+
+
+bool Page::IsRSetSet(Address address, int offset) {
+  uint32_t bitmask = 0;
+  Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
+  return (Memory::uint32_at(rset_address) & bitmask) != 0;
+}
+
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+
+bool MemoryAllocator::IsValidChunk(int chunk_id) {
+  if (!IsValidChunkId(chunk_id)) return false;
+
+  ChunkInfo& c = chunks_[chunk_id];
+  return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
+}
+
+
+bool MemoryAllocator::IsValidChunkId(int chunk_id) {
+  return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
+}
+
+
+bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
+  ASSERT(p->is_valid());
+
+  int chunk_id = GetChunkId(p);
+  if (!IsValidChunkId(chunk_id)) return false;
+
+  ChunkInfo& c = chunks_[chunk_id];
+  return (c.address() <= p->address()) &&
+         (p->address() < c.address() + c.size()) &&
+         (space == c.owner());
+}
+
+
+Page* MemoryAllocator::GetNextPage(Page* p) {
+  ASSERT(p->is_valid());
+  intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
+  return Page::FromAddress(AddressFrom<Address>(raw_addr));
+}
+
+
+int MemoryAllocator::GetChunkId(Page* p) {
+  ASSERT(p->is_valid());
+  return p->opaque_header & Page::kPageAlignmentMask;
+}
+
+
+void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
+  ASSERT(prev->is_valid());
+  int chunk_id = GetChunkId(prev);
+  ASSERT_PAGE_ALIGNED(next->address());
+  prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
+}
+
+
+PagedSpace* MemoryAllocator::PageOwner(Page* page) {
+  int chunk_id = GetChunkId(page);
+  ASSERT(IsValidChunk(chunk_id));
+  return chunks_[chunk_id].owner();
+}
+
+
+bool MemoryAllocator::InInitialChunk(Address address) {
+  if (initial_chunk_ == NULL) return false;
+
+  Address start = static_cast<Address>(initial_chunk_->address());
+  return (start <= address) && (address < start + initial_chunk_->size());
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void MemoryAllocator::Protect(Address start, size_t size) {
+  OS::Protect(start, size);
+}
+
+
+void MemoryAllocator::Unprotect(Address start,
+                                size_t size,
+                                Executability executable) {
+  OS::Unprotect(start, size, executable);
+}
+
+
+void MemoryAllocator::ProtectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  OS::Protect(chunks_[id].address(), chunks_[id].size());
+}
+
+
+void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
+                chunks_[id].owner()->executable() == EXECUTABLE);
+}
+
+#endif
+
+
+// --------------------------------------------------------------------------
+// PagedSpace
+
+bool PagedSpace::Contains(Address addr) {
+  Page* p = Page::FromAddress(addr);
+  ASSERT(p->is_valid());
+
+  return MemoryAllocator::IsPageInSpace(p, this);
+}
+
+
+// Try linear allocation in the page of alloc_info's allocation top.  Does
+// not contain slow case logic (eg, move to the next page or try free list
+// allocation) so it can be used by all the allocation functions and for all
+// the paged spaces.
+HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
+                                         int size_in_bytes) {
+  Address current_top = alloc_info->top;
+  Address new_top = current_top + size_in_bytes;
+  if (new_top > alloc_info->limit) return NULL;
+
+  alloc_info->top = new_top;
+  ASSERT(alloc_info->VerifyPagedAllocation());
+  accounting_stats_.AllocateBytes(size_in_bytes);
+  return HeapObject::FromAddress(current_top);
+}
+
+
+// Raw allocation.
+Object* PagedSpace::AllocateRaw(int size_in_bytes) {
+  ASSERT(HasBeenSetup());
+  ASSERT_OBJECT_SIZE(size_in_bytes);
+  HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
+  if (object != NULL) return object;
+
+  object = SlowAllocateRaw(size_in_bytes);
+  if (object != NULL) return object;
+
+  return Failure::RetryAfterGC(size_in_bytes, identity());
+}
+
+
+// Reallocating (and promoting) objects during a compacting collection.
+Object* PagedSpace::MCAllocateRaw(int size_in_bytes) {
+  ASSERT(HasBeenSetup());
+  ASSERT_OBJECT_SIZE(size_in_bytes);
+  HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
+  if (object != NULL) return object;
+
+  object = SlowMCAllocateRaw(size_in_bytes);
+  if (object != NULL) return object;
+
+  return Failure::RetryAfterGC(size_in_bytes, identity());
+}
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectChunk
+
+HeapObject* LargeObjectChunk::GetObject() {
+  // Round the chunk address up to the nearest page-aligned address
+  // and return the heap object in that page.
+  Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
+  return HeapObject::FromAddress(page->ObjectAreaStart());
+}
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectSpace
+
+int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
+  int extra_rset_bits =
+      RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
+              kBitsPerInt);
+  return extra_rset_bits / kBitsPerByte;
+}
+
+
+Object* NewSpace::AllocateRawInternal(int size_in_bytes,
+                                      AllocationInfo* alloc_info) {
+  Address new_top = alloc_info->top + size_in_bytes;
+  if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes);
+
+  Object* obj = HeapObject::FromAddress(alloc_info->top);
+  alloc_info->top = new_top;
+#ifdef DEBUG
+  SemiSpace* space =
+      (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
+  ASSERT(space->low() <= alloc_info->top
+         && alloc_info->top <= space->high()
+         && alloc_info->limit == space->high());
+#endif
+  return obj;
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_SPACES_INL_H_
diff --git a/src/spaces.cc b/src/spaces.cc
new file mode 100644
index 0000000..43abaa4
--- /dev/null
+++ b/src/spaces.cc
@@ -0,0 +1,2789 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "mark-compact.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
+  ASSERT((space).low() <= (info).top                  \
+         && (info).top <= (space).high()              \
+         && (info).limit == (space).high())
+
+
+// ----------------------------------------------------------------------------
+// HeapObjectIterator
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
+  Initialize(space->bottom(), space->top(), NULL);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
+                                       HeapObjectCallback size_func) {
+  Initialize(space->bottom(), space->top(), size_func);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
+  Initialize(start, space->top(), NULL);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
+                                       HeapObjectCallback size_func) {
+  Initialize(start, space->top(), size_func);
+}
+
+
+void HeapObjectIterator::Initialize(Address cur, Address end,
+                                    HeapObjectCallback size_f) {
+  cur_addr_ = cur;
+  end_addr_ = end;
+  end_page_ = Page::FromAllocationTop(end);
+  size_func_ = size_f;
+  Page* p = Page::FromAllocationTop(cur_addr_);
+  cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
+
+#ifdef DEBUG
+  Verify();
+#endif
+}
+
+
+bool HeapObjectIterator::HasNextInNextPage() {
+  if (cur_addr_ == end_addr_) return false;
+
+  Page* cur_page = Page::FromAllocationTop(cur_addr_);
+  cur_page = cur_page->next_page();
+  ASSERT(cur_page->is_valid());
+
+  cur_addr_ = cur_page->ObjectAreaStart();
+  cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
+
+  ASSERT(cur_addr_ < cur_limit_);
+#ifdef DEBUG
+  Verify();
+#endif
+  return true;
+}
+
+
+#ifdef DEBUG
+void HeapObjectIterator::Verify() {
+  Page* p = Page::FromAllocationTop(cur_addr_);
+  ASSERT(p == Page::FromAllocationTop(cur_limit_));
+  ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// PageIterator
+
+PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
+  prev_page_ = NULL;
+  switch (mode) {
+    case PAGES_IN_USE:
+      stop_page_ = space->AllocationTopPage();
+      break;
+    case PAGES_USED_BY_MC:
+      stop_page_ = space->MCRelocationTopPage();
+      break;
+    case ALL_PAGES:
+#ifdef DEBUG
+      // Verify that the cached last page in the space is actually the
+      // last page.
+      for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
+        if (!p->next_page()->is_valid()) {
+          ASSERT(space->last_page_ == p);
+        }
+      }
+#endif
+      stop_page_ = space->last_page_;
+      break;
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Page
+
+#ifdef DEBUG
+Page::RSetState Page::rset_state_ = Page::IN_USE;
+#endif
+
+// -----------------------------------------------------------------------------
+// CodeRange
+
+List<CodeRange::FreeBlock> CodeRange::free_list_(0);
+List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
+int CodeRange::current_allocation_block_index_ = 0;
+VirtualMemory* CodeRange::code_range_ = NULL;
+
+
+bool CodeRange::Setup(const size_t requested) {
+  ASSERT(code_range_ == NULL);
+
+  code_range_ = new VirtualMemory(requested);
+  CHECK(code_range_ != NULL);
+  if (!code_range_->IsReserved()) {
+    delete code_range_;
+    code_range_ = NULL;
+    return false;
+  }
+
+  // We are sure that we have mapped a block of requested addresses.
+  ASSERT(code_range_->size() == requested);
+  LOG(NewEvent("CodeRange", code_range_->address(), requested));
+  allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
+  current_allocation_block_index_ = 0;
+  return true;
+}
+
+
+int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
+                                       const FreeBlock* right) {
+  // The entire point of CodeRange is that the difference between two
+  // addresses in the range can be represented as a signed 32-bit int,
+  // so the cast is semantically correct.
+  return static_cast<int>(left->start - right->start);
+}
+
+
+void CodeRange::GetNextAllocationBlock(size_t requested) {
+  for (current_allocation_block_index_++;
+       current_allocation_block_index_ < allocation_list_.length();
+       current_allocation_block_index_++) {
+    if (requested <= allocation_list_[current_allocation_block_index_].size) {
+      return;  // Found a large enough allocation block.
+    }
+  }
+
+  // Sort and merge the free blocks on the free list and the allocation list.
+  free_list_.AddAll(allocation_list_);
+  allocation_list_.Clear();
+  free_list_.Sort(&CompareFreeBlockAddress);
+  for (int i = 0; i < free_list_.length();) {
+    FreeBlock merged = free_list_[i];
+    i++;
+    // Add adjacent free blocks to the current merged block.
+    while (i < free_list_.length() &&
+           free_list_[i].start == merged.start + merged.size) {
+      merged.size += free_list_[i].size;
+      i++;
+    }
+    if (merged.size > 0) {
+      allocation_list_.Add(merged);
+    }
+  }
+  free_list_.Clear();
+
+  for (current_allocation_block_index_ = 0;
+       current_allocation_block_index_ < allocation_list_.length();
+       current_allocation_block_index_++) {
+    if (requested <= allocation_list_[current_allocation_block_index_].size) {
+      return;  // Found a large enough allocation block.
+    }
+  }
+
+  // Code range is full or too fragmented.
+  V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
+}
+
+
+
+void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
+  ASSERT(current_allocation_block_index_ < allocation_list_.length());
+  if (requested > allocation_list_[current_allocation_block_index_].size) {
+    // Find an allocation block large enough.  This function call may
+    // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
+    GetNextAllocationBlock(requested);
+  }
+  // Commit the requested memory at the start of the current allocation block.
+  *allocated = RoundUp(requested, Page::kPageSize);
+  FreeBlock current = allocation_list_[current_allocation_block_index_];
+  if (*allocated >= current.size - Page::kPageSize) {
+    // Don't leave a small free block, useless for a large object or chunk.
+    *allocated = current.size;
+  }
+  ASSERT(*allocated <= current.size);
+  if (!code_range_->Commit(current.start, *allocated, true)) {
+    *allocated = 0;
+    return NULL;
+  }
+  allocation_list_[current_allocation_block_index_].start += *allocated;
+  allocation_list_[current_allocation_block_index_].size -= *allocated;
+  if (*allocated == current.size) {
+    GetNextAllocationBlock(0);  // This block is used up, get the next one.
+  }
+  return current.start;
+}
+
+
+void CodeRange::FreeRawMemory(void* address, size_t length) {
+  free_list_.Add(FreeBlock(address, length));
+  code_range_->Uncommit(address, length);
+}
+
+
+void CodeRange::TearDown() {
+    delete code_range_;  // Frees all memory in the virtual memory range.
+    code_range_ = NULL;
+    free_list_.Free();
+    allocation_list_.Free();
+}
+
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+//
+int MemoryAllocator::capacity_   = 0;
+int MemoryAllocator::size_       = 0;
+
+VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
+
+// 270 is an estimate based on the static default heap size of a pair of 256K
+// semispaces and a 64M old generation.
+const int kEstimatedNumberOfChunks = 270;
+List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
+    kEstimatedNumberOfChunks);
+List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
+int MemoryAllocator::max_nof_chunks_ = 0;
+int MemoryAllocator::top_ = 0;
+
+
+void MemoryAllocator::Push(int free_chunk_id) {
+  ASSERT(max_nof_chunks_ > 0);
+  ASSERT(top_ < max_nof_chunks_);
+  free_chunk_ids_[top_++] = free_chunk_id;
+}
+
+
+int MemoryAllocator::Pop() {
+  ASSERT(top_ > 0);
+  return free_chunk_ids_[--top_];
+}
+
+
+bool MemoryAllocator::Setup(int capacity) {
+  capacity_ = RoundUp(capacity, Page::kPageSize);
+
+  // Over-estimate the size of chunks_ array.  It assumes the expansion of old
+  // space is always in the unit of a chunk (kChunkSize) except the last
+  // expansion.
+  //
+  // Due to alignment, allocated space might be one page less than required
+  // number (kPagesPerChunk) of pages for old spaces.
+  //
+  // Reserve two chunk ids for semispaces, one for map space, one for old
+  // space, and one for code space.
+  max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
+  if (max_nof_chunks_ > kMaxNofChunks) return false;
+
+  size_ = 0;
+  ChunkInfo info;  // uninitialized element.
+  for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
+    chunks_.Add(info);
+    free_chunk_ids_.Add(i);
+  }
+  top_ = max_nof_chunks_;
+  return true;
+}
+
+
+void MemoryAllocator::TearDown() {
+  for (int i = 0; i < max_nof_chunks_; i++) {
+    if (chunks_[i].address() != NULL) DeleteChunk(i);
+  }
+  chunks_.Clear();
+  free_chunk_ids_.Clear();
+
+  if (initial_chunk_ != NULL) {
+    LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
+    delete initial_chunk_;
+    initial_chunk_ = NULL;
+  }
+
+  ASSERT(top_ == max_nof_chunks_);  // all chunks are free
+  top_ = 0;
+  capacity_ = 0;
+  size_ = 0;
+  max_nof_chunks_ = 0;
+}
+
+
+void* MemoryAllocator::AllocateRawMemory(const size_t requested,
+                                         size_t* allocated,
+                                         Executability executable) {
+  if (size_ + static_cast<int>(requested) > capacity_) return NULL;
+  void* mem;
+  if (executable == EXECUTABLE  && CodeRange::exists()) {
+    mem = CodeRange::AllocateRawMemory(requested, allocated);
+  } else {
+    mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
+  }
+  int alloced = *allocated;
+  size_ += alloced;
+  Counters::memory_allocated.Increment(alloced);
+  return mem;
+}
+
+
+void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
+  if (CodeRange::contains(static_cast<Address>(mem))) {
+    CodeRange::FreeRawMemory(mem, length);
+  } else {
+    OS::Free(mem, length);
+  }
+  Counters::memory_allocated.Decrement(length);
+  size_ -= length;
+  ASSERT(size_ >= 0);
+}
+
+
+void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
+  ASSERT(initial_chunk_ == NULL);
+
+  initial_chunk_ = new VirtualMemory(requested);
+  CHECK(initial_chunk_ != NULL);
+  if (!initial_chunk_->IsReserved()) {
+    delete initial_chunk_;
+    initial_chunk_ = NULL;
+    return NULL;
+  }
+
+  // We are sure that we have mapped a block of requested addresses.
+  ASSERT(initial_chunk_->size() == requested);
+  LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
+  size_ += requested;
+  return initial_chunk_->address();
+}
+
+
+static int PagesInChunk(Address start, size_t size) {
+  // The first page starts on the first page-aligned address from start onward
+  // and the last page ends on the last page-aligned address before
+  // start+size.  Page::kPageSize is a power of two so we can divide by
+  // shifting.
+  return (RoundDown(start + size, Page::kPageSize)
+          - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits;
+}
+
+
+Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
+                                     PagedSpace* owner) {
+  if (requested_pages <= 0) return Page::FromAddress(NULL);
+  size_t chunk_size = requested_pages * Page::kPageSize;
+
+  // There is not enough space to guarantee the desired number pages can be
+  // allocated.
+  if (size_ + static_cast<int>(chunk_size) > capacity_) {
+    // Request as many pages as we can.
+    chunk_size = capacity_ - size_;
+    requested_pages = chunk_size >> Page::kPageSizeBits;
+
+    if (requested_pages <= 0) return Page::FromAddress(NULL);
+  }
+  void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
+  if (chunk == NULL) return Page::FromAddress(NULL);
+  LOG(NewEvent("PagedChunk", chunk, chunk_size));
+
+  *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
+  if (*allocated_pages == 0) {
+    FreeRawMemory(chunk, chunk_size);
+    LOG(DeleteEvent("PagedChunk", chunk));
+    return Page::FromAddress(NULL);
+  }
+
+  int chunk_id = Pop();
+  chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
+
+  return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
+}
+
+
+Page* MemoryAllocator::CommitPages(Address start, size_t size,
+                                   PagedSpace* owner, int* num_pages) {
+  ASSERT(start != NULL);
+  *num_pages = PagesInChunk(start, size);
+  ASSERT(*num_pages > 0);
+  ASSERT(initial_chunk_ != NULL);
+  ASSERT(InInitialChunk(start));
+  ASSERT(InInitialChunk(start + size - 1));
+  if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
+    return Page::FromAddress(NULL);
+  }
+  Counters::memory_allocated.Increment(size);
+
+  // So long as we correctly overestimated the number of chunks we should not
+  // run out of chunk ids.
+  CHECK(!OutOfChunkIds());
+  int chunk_id = Pop();
+  chunks_[chunk_id].init(start, size, owner);
+  return InitializePagesInChunk(chunk_id, *num_pages, owner);
+}
+
+
+bool MemoryAllocator::CommitBlock(Address start,
+                                  size_t size,
+                                  Executability executable) {
+  ASSERT(start != NULL);
+  ASSERT(size > 0);
+  ASSERT(initial_chunk_ != NULL);
+  ASSERT(InInitialChunk(start));
+  ASSERT(InInitialChunk(start + size - 1));
+
+  if (!initial_chunk_->Commit(start, size, executable)) return false;
+  Counters::memory_allocated.Increment(size);
+  return true;
+}
+
+bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
+  ASSERT(start != NULL);
+  ASSERT(size > 0);
+  ASSERT(initial_chunk_ != NULL);
+  ASSERT(InInitialChunk(start));
+  ASSERT(InInitialChunk(start + size - 1));
+
+  if (!initial_chunk_->Uncommit(start, size)) return false;
+  Counters::memory_allocated.Decrement(size);
+  return true;
+}
+
+Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+                                              PagedSpace* owner) {
+  ASSERT(IsValidChunk(chunk_id));
+  ASSERT(pages_in_chunk > 0);
+
+  Address chunk_start = chunks_[chunk_id].address();
+
+  Address low = RoundUp(chunk_start, Page::kPageSize);
+
+#ifdef DEBUG
+  size_t chunk_size = chunks_[chunk_id].size();
+  Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
+  ASSERT(pages_in_chunk <=
+        ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
+#endif
+
+  Address page_addr = low;
+  for (int i = 0; i < pages_in_chunk; i++) {
+    Page* p = Page::FromAddress(page_addr);
+    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
+    p->is_normal_page = 1;
+    page_addr += Page::kPageSize;
+  }
+
+  // Set the next page of the last page to 0.
+  Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
+  last_page->opaque_header = OffsetFrom(0) | chunk_id;
+
+  return Page::FromAddress(low);
+}
+
+
+Page* MemoryAllocator::FreePages(Page* p) {
+  if (!p->is_valid()) return p;
+
+  // Find the first page in the same chunk as 'p'
+  Page* first_page = FindFirstPageInSameChunk(p);
+  Page* page_to_return = Page::FromAddress(NULL);
+
+  if (p != first_page) {
+    // Find the last page in the same chunk as 'prev'.
+    Page* last_page = FindLastPageInSameChunk(p);
+    first_page = GetNextPage(last_page);  // first page in next chunk
+
+    // set the next_page of last_page to NULL
+    SetNextPage(last_page, Page::FromAddress(NULL));
+    page_to_return = p;  // return 'p' when exiting
+  }
+
+  while (first_page->is_valid()) {
+    int chunk_id = GetChunkId(first_page);
+    ASSERT(IsValidChunk(chunk_id));
+
+    // Find the first page of the next chunk before deleting this chunk.
+    first_page = GetNextPage(FindLastPageInSameChunk(first_page));
+
+    // Free the current chunk.
+    DeleteChunk(chunk_id);
+  }
+
+  return page_to_return;
+}
+
+
+void MemoryAllocator::DeleteChunk(int chunk_id) {
+  ASSERT(IsValidChunk(chunk_id));
+
+  ChunkInfo& c = chunks_[chunk_id];
+
+  // We cannot free a chunk contained in the initial chunk because it was not
+  // allocated with AllocateRawMemory.  Instead we uncommit the virtual
+  // memory.
+  if (InInitialChunk(c.address())) {
+    // TODO(1240712): VirtualMemory::Uncommit has a return value which
+    // is ignored here.
+    initial_chunk_->Uncommit(c.address(), c.size());
+    Counters::memory_allocated.Decrement(c.size());
+  } else {
+    LOG(DeleteEvent("PagedChunk", c.address()));
+    FreeRawMemory(c.address(), c.size());
+  }
+  c.init(NULL, 0, NULL);
+  Push(chunk_id);
+}
+
+
+Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
+  int chunk_id = GetChunkId(p);
+  ASSERT(IsValidChunk(chunk_id));
+
+  Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
+  return Page::FromAddress(low);
+}
+
+
+Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
+  int chunk_id = GetChunkId(p);
+  ASSERT(IsValidChunk(chunk_id));
+
+  Address chunk_start = chunks_[chunk_id].address();
+  size_t chunk_size = chunks_[chunk_id].size();
+
+  Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
+  ASSERT(chunk_start <= p->address() && p->address() < high);
+
+  return Page::FromAddress(high - Page::kPageSize);
+}
+
+
+#ifdef DEBUG
+void MemoryAllocator::ReportStatistics() {
+  float pct = static_cast<float>(capacity_ - size_) / capacity_;
+  PrintF("  capacity: %d, used: %d, available: %%%d\n\n",
+         capacity_, size_, static_cast<int>(pct*100));
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// PagedSpace implementation
+
+PagedSpace::PagedSpace(int max_capacity,
+                       AllocationSpace id,
+                       Executability executable)
+    : Space(id, executable) {
+  max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
+                  * Page::kObjectAreaSize;
+  accounting_stats_.Clear();
+
+  allocation_info_.top = NULL;
+  allocation_info_.limit = NULL;
+
+  mc_forwarding_info_.top = NULL;
+  mc_forwarding_info_.limit = NULL;
+}
+
+
+bool PagedSpace::Setup(Address start, size_t size) {
+  if (HasBeenSetup()) return false;
+
+  int num_pages = 0;
+  // Try to use the virtual memory range passed to us.  If it is too small to
+  // contain at least one page, ignore it and allocate instead.
+  int pages_in_chunk = PagesInChunk(start, size);
+  if (pages_in_chunk > 0) {
+    first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
+                                               Page::kPageSize * pages_in_chunk,
+                                               this, &num_pages);
+  } else {
+    int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
+                              max_capacity_ / Page::kObjectAreaSize);
+    first_page_ =
+        MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
+    if (!first_page_->is_valid()) return false;
+  }
+
+  // We are sure that the first page is valid and that we have at least one
+  // page.
+  ASSERT(first_page_->is_valid());
+  ASSERT(num_pages > 0);
+  accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
+  ASSERT(Capacity() <= max_capacity_);
+
+  // Sequentially initialize remembered sets in the newly allocated
+  // pages and cache the current last page in the space.
+  for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
+    p->ClearRSet();
+    last_page_ = p;
+  }
+
+  // Use first_page_ for allocation.
+  SetAllocationInfo(&allocation_info_, first_page_);
+
+  return true;
+}
+
+
+bool PagedSpace::HasBeenSetup() {
+  return (Capacity() > 0);
+}
+
+
+void PagedSpace::TearDown() {
+  first_page_ = MemoryAllocator::FreePages(first_page_);
+  ASSERT(!first_page_->is_valid());
+
+  accounting_stats_.Clear();
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void PagedSpace::Protect() {
+  Page* page = first_page_;
+  while (page->is_valid()) {
+    MemoryAllocator::ProtectChunkFromPage(page);
+    page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
+  }
+}
+
+
+void PagedSpace::Unprotect() {
+  Page* page = first_page_;
+  while (page->is_valid()) {
+    MemoryAllocator::UnprotectChunkFromPage(page);
+    page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
+  }
+}
+
+#endif
+
+
+void PagedSpace::ClearRSet() {
+  PageIterator it(this, PageIterator::ALL_PAGES);
+  while (it.has_next()) {
+    it.next()->ClearRSet();
+  }
+}
+
+
+Object* PagedSpace::FindObject(Address addr) {
+  // Note: this function can only be called before or after mark-compact GC
+  // because it accesses map pointers.
+  ASSERT(!MarkCompactCollector::in_use());
+
+  if (!Contains(addr)) return Failure::Exception();
+
+  Page* p = Page::FromAddress(addr);
+  ASSERT(IsUsed(p));
+  Address cur = p->ObjectAreaStart();
+  Address end = p->AllocationTop();
+  while (cur < end) {
+    HeapObject* obj = HeapObject::FromAddress(cur);
+    Address next = cur + obj->Size();
+    if ((cur <= addr) && (addr < next)) return obj;
+    cur = next;
+  }
+
+  UNREACHABLE();
+  return Failure::Exception();
+}
+
+
+bool PagedSpace::IsUsed(Page* page) {
+  PageIterator it(this, PageIterator::PAGES_IN_USE);
+  while (it.has_next()) {
+    if (page == it.next()) return true;
+  }
+  return false;
+}
+
+
+void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
+  alloc_info->top = p->ObjectAreaStart();
+  alloc_info->limit = p->ObjectAreaEnd();
+  ASSERT(alloc_info->VerifyPagedAllocation());
+}
+
+
+void PagedSpace::MCResetRelocationInfo() {
+  // Set page indexes.
+  int i = 0;
+  PageIterator it(this, PageIterator::ALL_PAGES);
+  while (it.has_next()) {
+    Page* p = it.next();
+    p->mc_page_index = i++;
+  }
+
+  // Set mc_forwarding_info_ to the first page in the space.
+  SetAllocationInfo(&mc_forwarding_info_, first_page_);
+  // All the bytes in the space are 'available'.  We will rediscover
+  // allocated and wasted bytes during GC.
+  accounting_stats_.Reset();
+}
+
+
+int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
+#ifdef DEBUG
+  // The Contains function considers the address at the beginning of a
+  // page in the page, MCSpaceOffsetForAddress considers it is in the
+  // previous page.
+  if (Page::IsAlignedToPageSize(addr)) {
+    ASSERT(Contains(addr - kPointerSize));
+  } else {
+    ASSERT(Contains(addr));
+  }
+#endif
+
+  // If addr is at the end of a page, it belongs to previous page
+  Page* p = Page::IsAlignedToPageSize(addr)
+            ? Page::FromAllocationTop(addr)
+            : Page::FromAddress(addr);
+  int index = p->mc_page_index;
+  return (index * Page::kPageSize) + p->Offset(addr);
+}
+
+
+// Slow case for reallocating and promoting objects during a compacting
+// collection.  This function is not space-specific.
+HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
+  Page* current_page = TopPageOf(mc_forwarding_info_);
+  if (!current_page->next_page()->is_valid()) {
+    if (!Expand(current_page)) {
+      return NULL;
+    }
+  }
+
+  // There are surely more pages in the space now.
+  ASSERT(current_page->next_page()->is_valid());
+  // We do not add the top of page block for current page to the space's
+  // free list---the block may contain live objects so we cannot write
+  // bookkeeping information to it.  Instead, we will recover top of page
+  // blocks when we move objects to their new locations.
+  //
+  // We do however write the allocation pointer to the page.  The encoding
+  // of forwarding addresses is as an offset in terms of live bytes, so we
+  // need quick access to the allocation top of each page to decode
+  // forwarding addresses.
+  current_page->mc_relocation_top = mc_forwarding_info_.top;
+  SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
+  return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
+}
+
+
+bool PagedSpace::Expand(Page* last_page) {
+  ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
+  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+
+  if (Capacity() == max_capacity_) return false;
+
+  ASSERT(Capacity() < max_capacity_);
+  // Last page must be valid and its next page is invalid.
+  ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
+
+  int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
+  if (available_pages <= 0) return false;
+
+  int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
+  Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
+  if (!p->is_valid()) return false;
+
+  accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
+  ASSERT(Capacity() <= max_capacity_);
+
+  MemoryAllocator::SetNextPage(last_page, p);
+
+  // Sequentially clear remembered set of new pages and and cache the
+  // new last page in the space.
+  while (p->is_valid()) {
+    p->ClearRSet();
+    last_page_ = p;
+    p = p->next_page();
+  }
+
+  return true;
+}
+
+
+#ifdef DEBUG
+int PagedSpace::CountTotalPages() {
+  int count = 0;
+  for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
+    count++;
+  }
+  return count;
+}
+#endif
+
+
+void PagedSpace::Shrink() {
+  // Release half of free pages.
+  Page* top_page = AllocationTopPage();
+  ASSERT(top_page->is_valid());
+
+  // Count the number of pages we would like to free.
+  int pages_to_free = 0;
+  for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
+    pages_to_free++;
+  }
+
+  // Free pages after top_page.
+  Page* p = MemoryAllocator::FreePages(top_page->next_page());
+  MemoryAllocator::SetNextPage(top_page, p);
+
+  // Find out how many pages we failed to free and update last_page_.
+  // Please note pages can only be freed in whole chunks.
+  last_page_ = top_page;
+  for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
+    pages_to_free--;
+    last_page_ = p;
+  }
+
+  accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
+  ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
+}
+
+
+bool PagedSpace::EnsureCapacity(int capacity) {
+  if (Capacity() >= capacity) return true;
+
+  // Start from the allocation top and loop to the last page in the space.
+  Page* last_page = AllocationTopPage();
+  Page* next_page = last_page->next_page();
+  while (next_page->is_valid()) {
+    last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
+    next_page = last_page->next_page();
+  }
+
+  // Expand the space until it has the required capacity or expansion fails.
+  do {
+    if (!Expand(last_page)) return false;
+    ASSERT(last_page->next_page()->is_valid());
+    last_page =
+        MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
+  } while (Capacity() < capacity);
+
+  return true;
+}
+
+
+#ifdef DEBUG
+void PagedSpace::Print() { }
+#endif
+
+
+#ifdef DEBUG
+// We do not assume that the PageIterator works, because it depends on the
+// invariants we are checking during verification.
+void PagedSpace::Verify(ObjectVisitor* visitor) {
+  // The allocation pointer should be valid, and it should be in a page in the
+  // space.
+  ASSERT(allocation_info_.VerifyPagedAllocation());
+  Page* top_page = Page::FromAllocationTop(allocation_info_.top);
+  ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
+
+  // Loop over all the pages.
+  bool above_allocation_top = false;
+  Page* current_page = first_page_;
+  while (current_page->is_valid()) {
+    if (above_allocation_top) {
+      // We don't care what's above the allocation top.
+    } else {
+      // Unless this is the last page in the space containing allocated
+      // objects, the allocation top should be at a constant offset from the
+      // object area end.
+      Address top = current_page->AllocationTop();
+      if (current_page == top_page) {
+        ASSERT(top == allocation_info_.top);
+        // The next page will be above the allocation top.
+        above_allocation_top = true;
+      } else {
+        ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
+      }
+
+      // It should be packed with objects from the bottom to the top.
+      Address current = current_page->ObjectAreaStart();
+      while (current < top) {
+        HeapObject* object = HeapObject::FromAddress(current);
+
+        // The first word should be a map, and we expect all map pointers to
+        // be in map space.
+        Map* map = object->map();
+        ASSERT(map->IsMap());
+        ASSERT(Heap::map_space()->Contains(map));
+
+        // Perform space-specific object verification.
+        VerifyObject(object);
+
+        // The object itself should look OK.
+        object->Verify();
+
+        // All the interior pointers should be contained in the heap and
+        // have their remembered set bits set if required as determined
+        // by the visitor.
+        int size = object->Size();
+        object->IterateBody(map->instance_type(), size, visitor);
+
+        current += size;
+      }
+
+      // The allocation pointer should not be in the middle of an object.
+      ASSERT(current == top);
+    }
+
+    current_page = current_page->next_page();
+  }
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// NewSpace implementation
+
+
+bool NewSpace::Setup(Address start, int size) {
+  // Setup new space based on the preallocated memory block defined by
+  // start and size. The provided space is divided into two semi-spaces.
+  // To support fast containment testing in the new space, the size of
+  // this chunk must be a power of two and it must be aligned to its size.
+  int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
+  int maximum_semispace_capacity = Heap::SemiSpaceSize();
+
+  ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
+  ASSERT(IsPowerOf2(maximum_semispace_capacity));
+
+  // Allocate and setup the histogram arrays if necessary.
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+  allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
+  promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
+
+#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
+                       promoted_histogram_[name].set_name(#name);
+  INSTANCE_TYPE_LIST(SET_NAME)
+#undef SET_NAME
+#endif
+
+  ASSERT(size == 2 * maximum_semispace_capacity);
+  ASSERT(IsAddressAligned(start, size, 0));
+
+  if (!to_space_.Setup(start,
+                       initial_semispace_capacity,
+                       maximum_semispace_capacity)) {
+    return false;
+  }
+  if (!from_space_.Setup(start + maximum_semispace_capacity,
+                         initial_semispace_capacity,
+                         maximum_semispace_capacity)) {
+    return false;
+  }
+
+  start_ = start;
+  address_mask_ = ~(size - 1);
+  object_mask_ = address_mask_ | kHeapObjectTag;
+  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
+
+  allocation_info_.top = to_space_.low();
+  allocation_info_.limit = to_space_.high();
+  mc_forwarding_info_.top = NULL;
+  mc_forwarding_info_.limit = NULL;
+
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+  return true;
+}
+
+
+void NewSpace::TearDown() {
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+  if (allocated_histogram_) {
+    DeleteArray(allocated_histogram_);
+    allocated_histogram_ = NULL;
+  }
+  if (promoted_histogram_) {
+    DeleteArray(promoted_histogram_);
+    promoted_histogram_ = NULL;
+  }
+#endif
+
+  start_ = NULL;
+  allocation_info_.top = NULL;
+  allocation_info_.limit = NULL;
+  mc_forwarding_info_.top = NULL;
+  mc_forwarding_info_.limit = NULL;
+
+  to_space_.TearDown();
+  from_space_.TearDown();
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void NewSpace::Protect() {
+  MemoryAllocator::Protect(ToSpaceLow(), Capacity());
+  MemoryAllocator::Protect(FromSpaceLow(), Capacity());
+}
+
+
+void NewSpace::Unprotect() {
+  MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
+                             to_space_.executable());
+  MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
+                             from_space_.executable());
+}
+
+#endif
+
+
+void NewSpace::Flip() {
+  SemiSpace tmp = from_space_;
+  from_space_ = to_space_;
+  to_space_ = tmp;
+}
+
+
+void NewSpace::Grow() {
+  ASSERT(Capacity() < MaximumCapacity());
+  if (to_space_.Grow()) {
+    // Only grow from space if we managed to grow to space.
+    if (!from_space_.Grow()) {
+      // If we managed to grow to space but couldn't grow from space,
+      // attempt to shrink to space.
+      if (!to_space_.ShrinkTo(from_space_.Capacity())) {
+        // We are in an inconsistent state because we could not
+        // commit/uncommit memory from new space.
+        V8::FatalProcessOutOfMemory("Failed to grow new space.");
+      }
+    }
+  }
+  allocation_info_.limit = to_space_.high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::Shrink() {
+  int new_capacity = Max(InitialCapacity(), 2 * Size());
+  int rounded_new_capacity = RoundUp(new_capacity, OS::AllocateAlignment());
+  if (rounded_new_capacity < Capacity() &&
+      to_space_.ShrinkTo(rounded_new_capacity))  {
+    // Only shrink from space if we managed to shrink to space.
+    if (!from_space_.ShrinkTo(rounded_new_capacity)) {
+      // If we managed to shrink to space but couldn't shrink from
+      // space, attempt to grow to space again.
+      if (!to_space_.GrowTo(from_space_.Capacity())) {
+        // We are in an inconsistent state because we could not
+        // commit/uncommit memory from new space.
+        V8::FatalProcessOutOfMemory("Failed to shrink new space.");
+      }
+    }
+  }
+  allocation_info_.limit = to_space_.high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::ResetAllocationInfo() {
+  allocation_info_.top = to_space_.low();
+  allocation_info_.limit = to_space_.high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::MCResetRelocationInfo() {
+  mc_forwarding_info_.top = from_space_.low();
+  mc_forwarding_info_.limit = from_space_.high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
+}
+
+
+void NewSpace::MCCommitRelocationInfo() {
+  // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
+  // valid allocation info for the to space.
+  allocation_info_.top = mc_forwarding_info_.top;
+  allocation_info_.limit = to_space_.high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+#ifdef DEBUG
+// We do not use the SemispaceIterator because verification doesn't assume
+// that it works (it depends on the invariants we are checking).
+void NewSpace::Verify() {
+  // The allocation pointer should be in the space or at the very end.
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+  // There should be objects packed in from the low address up to the
+  // allocation pointer.
+  Address current = to_space_.low();
+  while (current < top()) {
+    HeapObject* object = HeapObject::FromAddress(current);
+
+    // The first word should be a map, and we expect all map pointers to
+    // be in map space.
+    Map* map = object->map();
+    ASSERT(map->IsMap());
+    ASSERT(Heap::map_space()->Contains(map));
+
+    // The object should not be code or a map.
+    ASSERT(!object->IsMap());
+    ASSERT(!object->IsCode());
+
+    // The object itself should look OK.
+    object->Verify();
+
+    // All the interior pointers should be contained in the heap.
+    VerifyPointersVisitor visitor;
+    int size = object->Size();
+    object->IterateBody(map->instance_type(), size, &visitor);
+
+    current += size;
+  }
+
+  // The allocation pointer should not be in the middle of an object.
+  ASSERT(current == top());
+}
+#endif
+
+
+bool SemiSpace::Commit() {
+  ASSERT(!is_committed());
+  if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
+    return false;
+  }
+  committed_ = true;
+  return true;
+}
+
+
+bool SemiSpace::Uncommit() {
+  ASSERT(is_committed());
+  if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
+    return false;
+  }
+  committed_ = false;
+  return true;
+}
+
+
+// -----------------------------------------------------------------------------
+// SemiSpace implementation
+
+bool SemiSpace::Setup(Address start,
+                      int initial_capacity,
+                      int maximum_capacity) {
+  // Creates a space in the young generation. The constructor does not
+  // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
+  // memory of size 'capacity' when set up, and does not grow or shrink
+  // otherwise.  In the mark-compact collector, the memory region of the from
+  // space is used as the marking stack. It requires contiguous memory
+  // addresses.
+  initial_capacity_ = initial_capacity;
+  capacity_ = initial_capacity;
+  maximum_capacity_ = maximum_capacity;
+  committed_ = false;
+
+  start_ = start;
+  address_mask_ = ~(maximum_capacity - 1);
+  object_mask_ = address_mask_ | kHeapObjectTag;
+  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
+  age_mark_ = start_;
+
+  return Commit();
+}
+
+
+void SemiSpace::TearDown() {
+  start_ = NULL;
+  capacity_ = 0;
+}
+
+
+bool SemiSpace::Grow() {
+  // Double the semispace size but only up to maximum capacity.
+  int maximum_extra = maximum_capacity_ - capacity_;
+  int extra = Min(RoundUp(capacity_, OS::AllocateAlignment()),
+                  maximum_extra);
+  if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
+    return false;
+  }
+  capacity_ += extra;
+  return true;
+}
+
+
+bool SemiSpace::GrowTo(int new_capacity) {
+  ASSERT(new_capacity <= maximum_capacity_);
+  ASSERT(new_capacity > capacity_);
+  size_t delta = new_capacity - capacity_;
+  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+  if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
+    return false;
+  }
+  capacity_ = new_capacity;
+  return true;
+}
+
+
+bool SemiSpace::ShrinkTo(int new_capacity) {
+  ASSERT(new_capacity >= initial_capacity_);
+  ASSERT(new_capacity < capacity_);
+  size_t delta = capacity_ - new_capacity;
+  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+  if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
+    return false;
+  }
+  capacity_ = new_capacity;
+  return true;
+}
+
+
+#ifdef DEBUG
+void SemiSpace::Print() { }
+
+
+void SemiSpace::Verify() { }
+#endif
+
+
+// -----------------------------------------------------------------------------
+// SemiSpaceIterator implementation.
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
+  Initialize(space, space->bottom(), space->top(), NULL);
+}
+
+
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
+                                     HeapObjectCallback size_func) {
+  Initialize(space, space->bottom(), space->top(), size_func);
+}
+
+
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
+  Initialize(space, start, space->top(), NULL);
+}
+
+
+void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
+                                   Address end,
+                                   HeapObjectCallback size_func) {
+  ASSERT(space->ToSpaceContains(start));
+  ASSERT(space->ToSpaceLow() <= end
+         && end <= space->ToSpaceHigh());
+  space_ = &space->to_space_;
+  current_ = start;
+  limit_ = end;
+  size_func_ = size_func;
+}
+
+
+#ifdef DEBUG
+// A static array of histogram info for each type.
+static HistogramInfo heap_histograms[LAST_TYPE+1];
+static JSObject::SpillInformation js_spill_information;
+
+// heap_histograms is shared, always clear it before using it.
+static void ClearHistograms() {
+  // We reset the name each time, though it hasn't changed.
+#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
+  INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
+#undef DEF_TYPE_NAME
+
+#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
+  INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
+#undef CLEAR_HISTOGRAM
+
+  js_spill_information.Clear();
+}
+
+
+static int code_kind_statistics[Code::NUMBER_OF_KINDS];
+
+
+static void ClearCodeKindStatistics() {
+  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+    code_kind_statistics[i] = 0;
+  }
+}
+
+
+static void ReportCodeKindStatistics() {
+  const char* table[Code::NUMBER_OF_KINDS];
+
+#define CASE(name)                            \
+  case Code::name: table[Code::name] = #name; \
+  break
+
+  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+    switch (static_cast<Code::Kind>(i)) {
+      CASE(FUNCTION);
+      CASE(STUB);
+      CASE(BUILTIN);
+      CASE(LOAD_IC);
+      CASE(KEYED_LOAD_IC);
+      CASE(STORE_IC);
+      CASE(KEYED_STORE_IC);
+      CASE(CALL_IC);
+    }
+  }
+
+#undef CASE
+
+  PrintF("\n   Code kind histograms: \n");
+  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+    if (code_kind_statistics[i] > 0) {
+      PrintF("     %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
+    }
+  }
+  PrintF("\n");
+}
+
+
+static int CollectHistogramInfo(HeapObject* obj) {
+  InstanceType type = obj->map()->instance_type();
+  ASSERT(0 <= type && type <= LAST_TYPE);
+  ASSERT(heap_histograms[type].name() != NULL);
+  heap_histograms[type].increment_number(1);
+  heap_histograms[type].increment_bytes(obj->Size());
+
+  if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
+    JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
+  }
+
+  return obj->Size();
+}
+
+
+static void ReportHistogram(bool print_spill) {
+  PrintF("\n  Object Histogram:\n");
+  for (int i = 0; i <= LAST_TYPE; i++) {
+    if (heap_histograms[i].number() > 0) {
+      PrintF("    %-33s%10d (%10d bytes)\n",
+             heap_histograms[i].name(),
+             heap_histograms[i].number(),
+             heap_histograms[i].bytes());
+    }
+  }
+  PrintF("\n");
+
+  // Summarize string types.
+  int string_number = 0;
+  int string_bytes = 0;
+#define INCREMENT(type, size, name, camel_name)      \
+    string_number += heap_histograms[type].number(); \
+    string_bytes += heap_histograms[type].bytes();
+  STRING_TYPE_LIST(INCREMENT)
+#undef INCREMENT
+  if (string_number > 0) {
+    PrintF("    %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
+           string_bytes);
+  }
+
+  if (FLAG_collect_heap_spill_statistics && print_spill) {
+    js_spill_information.Print();
+  }
+}
+#endif  // DEBUG
+
+
+// Support for statistics gathering for --heap-stats and --log-gc.
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+void NewSpace::ClearHistograms() {
+  for (int i = 0; i <= LAST_TYPE; i++) {
+    allocated_histogram_[i].clear();
+    promoted_histogram_[i].clear();
+  }
+}
+
+// Because the copying collector does not touch garbage objects, we iterate
+// the new space before a collection to get a histogram of allocated objects.
+// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
+// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
+// flag is set.
+void NewSpace::CollectStatistics() {
+  ClearHistograms();
+  SemiSpaceIterator it(this);
+  while (it.has_next()) RecordAllocation(it.next());
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+static void DoReportStatistics(HistogramInfo* info, const char* description) {
+  LOG(HeapSampleBeginEvent("NewSpace", description));
+  // Lump all the string types together.
+  int string_number = 0;
+  int string_bytes = 0;
+#define INCREMENT(type, size, name, camel_name)       \
+    string_number += info[type].number();             \
+    string_bytes += info[type].bytes();
+  STRING_TYPE_LIST(INCREMENT)
+#undef INCREMENT
+  if (string_number > 0) {
+    LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
+  }
+
+  // Then do the other types.
+  for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
+    if (info[i].number() > 0) {
+      LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
+                              info[i].bytes()));
+    }
+  }
+  LOG(HeapSampleEndEvent("NewSpace", description));
+}
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+
+void NewSpace::ReportStatistics() {
+#ifdef DEBUG
+  if (FLAG_heap_stats) {
+    float pct = static_cast<float>(Available()) / Capacity();
+    PrintF("  capacity: %d, available: %d, %%%d\n",
+           Capacity(), Available(), static_cast<int>(pct*100));
+    PrintF("\n  Object Histogram:\n");
+    for (int i = 0; i <= LAST_TYPE; i++) {
+      if (allocated_histogram_[i].number() > 0) {
+        PrintF("    %-33s%10d (%10d bytes)\n",
+               allocated_histogram_[i].name(),
+               allocated_histogram_[i].number(),
+               allocated_histogram_[i].bytes());
+      }
+    }
+    PrintF("\n");
+  }
+#endif  // DEBUG
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (FLAG_log_gc) {
+    DoReportStatistics(allocated_histogram_, "allocated");
+    DoReportStatistics(promoted_histogram_, "promoted");
+  }
+#endif  // ENABLE_LOGGING_AND_PROFILING
+}
+
+
+void NewSpace::RecordAllocation(HeapObject* obj) {
+  InstanceType type = obj->map()->instance_type();
+  ASSERT(0 <= type && type <= LAST_TYPE);
+  allocated_histogram_[type].increment_number(1);
+  allocated_histogram_[type].increment_bytes(obj->Size());
+}
+
+
+void NewSpace::RecordPromotion(HeapObject* obj) {
+  InstanceType type = obj->map()->instance_type();
+  ASSERT(0 <= type && type <= LAST_TYPE);
+  promoted_histogram_[type].increment_number(1);
+  promoted_histogram_[type].increment_bytes(obj->Size());
+}
+#endif  // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+
+
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces implementation
+
+void FreeListNode::set_size(int size_in_bytes) {
+  ASSERT(size_in_bytes > 0);
+  ASSERT(IsAligned(size_in_bytes, kPointerSize));
+
+  // We write a map and possibly size information to the block.  If the block
+  // is big enough to be a ByteArray with at least one extra word (the next
+  // pointer), we set its map to be the byte array map and its size to an
+  // appropriate array length for the desired size from HeapObject::Size().
+  // If the block is too small (eg, one or two words), to hold both a size
+  // field and a next pointer, we give it a filler map that gives it the
+  // correct size.
+  if (size_in_bytes > ByteArray::kAlignedSize) {
+    set_map(Heap::raw_unchecked_byte_array_map());
+    ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
+  } else if (size_in_bytes == kPointerSize) {
+    set_map(Heap::raw_unchecked_one_pointer_filler_map());
+  } else if (size_in_bytes == 2 * kPointerSize) {
+    set_map(Heap::raw_unchecked_two_pointer_filler_map());
+  } else {
+    UNREACHABLE();
+  }
+  ASSERT(Size() == size_in_bytes);
+}
+
+
+Address FreeListNode::next() {
+  ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
+         map() == Heap::raw_unchecked_two_pointer_filler_map());
+  if (map() == Heap::raw_unchecked_byte_array_map()) {
+    ASSERT(Size() >= kNextOffset + kPointerSize);
+    return Memory::Address_at(address() + kNextOffset);
+  } else {
+    return Memory::Address_at(address() + kPointerSize);
+  }
+}
+
+
+void FreeListNode::set_next(Address next) {
+  ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
+         map() == Heap::raw_unchecked_two_pointer_filler_map());
+  if (map() == Heap::raw_unchecked_byte_array_map()) {
+    ASSERT(Size() >= kNextOffset + kPointerSize);
+    Memory::Address_at(address() + kNextOffset) = next;
+  } else {
+    Memory::Address_at(address() + kPointerSize) = next;
+  }
+}
+
+
+OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
+  Reset();
+}
+
+
+void OldSpaceFreeList::Reset() {
+  available_ = 0;
+  for (int i = 0; i < kFreeListsLength; i++) {
+    free_[i].head_node_ = NULL;
+  }
+  needs_rebuild_ = false;
+  finger_ = kHead;
+  free_[kHead].next_size_ = kEnd;
+}
+
+
+void OldSpaceFreeList::RebuildSizeList() {
+  ASSERT(needs_rebuild_);
+  int cur = kHead;
+  for (int i = cur + 1; i < kFreeListsLength; i++) {
+    if (free_[i].head_node_ != NULL) {
+      free_[cur].next_size_ = i;
+      cur = i;
+    }
+  }
+  free_[cur].next_size_ = kEnd;
+  needs_rebuild_ = false;
+}
+
+
+int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
+#ifdef DEBUG
+  for (int i = 0; i < size_in_bytes; i += kPointerSize) {
+    Memory::Address_at(start + i) = kZapValue;
+  }
+#endif
+  FreeListNode* node = FreeListNode::FromAddress(start);
+  node->set_size(size_in_bytes);
+
+  // We don't use the freelists in compacting mode.  This makes it more like a
+  // GC that only has mark-sweep-compact and doesn't have a mark-sweep
+  // collector.
+  if (FLAG_always_compact) {
+    return size_in_bytes;
+  }
+
+  // Early return to drop too-small blocks on the floor (one or two word
+  // blocks cannot hold a map pointer, a size field, and a pointer to the
+  // next block in the free list).
+  if (size_in_bytes < kMinBlockSize) {
+    return size_in_bytes;
+  }
+
+  // Insert other blocks at the head of an exact free list.
+  int index = size_in_bytes >> kPointerSizeLog2;
+  node->set_next(free_[index].head_node_);
+  free_[index].head_node_ = node->address();
+  available_ += size_in_bytes;
+  needs_rebuild_ = true;
+  return 0;
+}
+
+
+Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
+  ASSERT(0 < size_in_bytes);
+  ASSERT(size_in_bytes <= kMaxBlockSize);
+  ASSERT(IsAligned(size_in_bytes, kPointerSize));
+
+  if (needs_rebuild_) RebuildSizeList();
+  int index = size_in_bytes >> kPointerSizeLog2;
+  // Check for a perfect fit.
+  if (free_[index].head_node_ != NULL) {
+    FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
+    // If this was the last block of its size, remove the size.
+    if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
+    available_ -= size_in_bytes;
+    *wasted_bytes = 0;
+    ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
+    return node;
+  }
+  // Search the size list for the best fit.
+  int prev = finger_ < index ? finger_ : kHead;
+  int cur = FindSize(index, &prev);
+  ASSERT(index < cur);
+  if (cur == kEnd) {
+    // No large enough size in list.
+    *wasted_bytes = 0;
+    return Failure::RetryAfterGC(size_in_bytes, owner_);
+  }
+  ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
+  int rem = cur - index;
+  int rem_bytes = rem << kPointerSizeLog2;
+  FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
+  ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
+  FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
+                                                     size_in_bytes);
+  // Distinguish the cases prev < rem < cur and rem <= prev < cur
+  // to avoid many redundant tests and calls to Insert/RemoveSize.
+  if (prev < rem) {
+    // Simple case: insert rem between prev and cur.
+    finger_ = prev;
+    free_[prev].next_size_ = rem;
+    // If this was the last block of size cur, remove the size.
+    if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
+      free_[rem].next_size_ = free_[cur].next_size_;
+    } else {
+      free_[rem].next_size_ = cur;
+    }
+    // Add the remainder block.
+    rem_node->set_size(rem_bytes);
+    rem_node->set_next(free_[rem].head_node_);
+    free_[rem].head_node_ = rem_node->address();
+  } else {
+    // If this was the last block of size cur, remove the size.
+    if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
+      finger_ = prev;
+      free_[prev].next_size_ = free_[cur].next_size_;
+    }
+    if (rem_bytes < kMinBlockSize) {
+      // Too-small remainder is wasted.
+      rem_node->set_size(rem_bytes);
+      available_ -= size_in_bytes + rem_bytes;
+      *wasted_bytes = rem_bytes;
+      return cur_node;
+    }
+    // Add the remainder block and, if needed, insert its size.
+    rem_node->set_size(rem_bytes);
+    rem_node->set_next(free_[rem].head_node_);
+    free_[rem].head_node_ = rem_node->address();
+    if (rem_node->next() == NULL) InsertSize(rem);
+  }
+  available_ -= size_in_bytes;
+  *wasted_bytes = 0;
+  return cur_node;
+}
+
+
+#ifdef DEBUG
+bool OldSpaceFreeList::Contains(FreeListNode* node) {
+  for (int i = 0; i < kFreeListsLength; i++) {
+    Address cur_addr = free_[i].head_node_;
+    while (cur_addr != NULL) {
+      FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
+      if (cur_node == node) return true;
+      cur_addr = cur_node->next();
+    }
+  }
+  return false;
+}
+#endif
+
+
+FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
+    : owner_(owner), object_size_(object_size) {
+  Reset();
+}
+
+
+void FixedSizeFreeList::Reset() {
+  available_ = 0;
+  head_ = NULL;
+}
+
+
+void FixedSizeFreeList::Free(Address start) {
+#ifdef DEBUG
+  for (int i = 0; i < object_size_; i += kPointerSize) {
+    Memory::Address_at(start + i) = kZapValue;
+  }
+#endif
+  ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
+  FreeListNode* node = FreeListNode::FromAddress(start);
+  node->set_size(object_size_);
+  node->set_next(head_);
+  head_ = node->address();
+  available_ += object_size_;
+}
+
+
+Object* FixedSizeFreeList::Allocate() {
+  if (head_ == NULL) {
+    return Failure::RetryAfterGC(object_size_, owner_);
+  }
+
+  ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
+  FreeListNode* node = FreeListNode::FromAddress(head_);
+  head_ = node->next();
+  available_ -= object_size_;
+  return node;
+}
+
+
+// -----------------------------------------------------------------------------
+// OldSpace implementation
+
+void OldSpace::PrepareForMarkCompact(bool will_compact) {
+  if (will_compact) {
+    // Reset relocation info.  During a compacting collection, everything in
+    // the space is considered 'available' and we will rediscover live data
+    // and waste during the collection.
+    MCResetRelocationInfo();
+    ASSERT(Available() == Capacity());
+  } else {
+    // During a non-compacting collection, everything below the linear
+    // allocation pointer is considered allocated (everything above is
+    // available) and we will rediscover available and wasted bytes during
+    // the collection.
+    accounting_stats_.AllocateBytes(free_list_.available());
+    accounting_stats_.FillWastedBytes(Waste());
+  }
+
+  // Clear the free list before a full GC---it will be rebuilt afterward.
+  free_list_.Reset();
+}
+
+
+void OldSpace::MCCommitRelocationInfo() {
+  // Update fast allocation info.
+  allocation_info_.top = mc_forwarding_info_.top;
+  allocation_info_.limit = mc_forwarding_info_.limit;
+  ASSERT(allocation_info_.VerifyPagedAllocation());
+
+  // The space is compacted and we haven't yet built free lists or
+  // wasted any space.
+  ASSERT(Waste() == 0);
+  ASSERT(AvailableFree() == 0);
+
+  // Build the free list for the space.
+  int computed_size = 0;
+  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
+  while (it.has_next()) {
+    Page* p = it.next();
+    // Space below the relocation pointer is allocated.
+    computed_size += p->mc_relocation_top - p->ObjectAreaStart();
+    if (it.has_next()) {
+      // Free the space at the top of the page.  We cannot use
+      // p->mc_relocation_top after the call to Free (because Free will clear
+      // remembered set bits).
+      int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top;
+      if (extra_size > 0) {
+        int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
+        // The bytes we have just "freed" to add to the free list were
+        // already accounted as available.
+        accounting_stats_.WasteBytes(wasted_bytes);
+      }
+    }
+  }
+
+  // Make sure the computed size - based on the used portion of the pages in
+  // use - matches the size obtained while computing forwarding addresses.
+  ASSERT(computed_size == Size());
+}
+
+
+// Slow case for normal allocation.  Try in order: (1) allocate in the next
+// page in the space, (2) allocate off the space's free list, (3) expand the
+// space, (4) fail.
+HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
+  // Linear allocation in this space has failed.  If there is another page
+  // in the space, move to that page and allocate there.  This allocation
+  // should succeed (size_in_bytes should not be greater than a page's
+  // object area size).
+  Page* current_page = TopPageOf(allocation_info_);
+  if (current_page->next_page()->is_valid()) {
+    return AllocateInNextPage(current_page, size_in_bytes);
+  }
+
+  // There is no next page in this space.  Try free list allocation.
+  int wasted_bytes;
+  Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
+  accounting_stats_.WasteBytes(wasted_bytes);
+  if (!result->IsFailure()) {
+    accounting_stats_.AllocateBytes(size_in_bytes);
+    return HeapObject::cast(result);
+  }
+
+  // Free list allocation failed and there is no next page.  Fail if we have
+  // hit the old generation size limit that should cause a garbage
+  // collection.
+  if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
+    return NULL;
+  }
+
+  // Try to expand the space and allocate in the new next page.
+  ASSERT(!current_page->next_page()->is_valid());
+  if (Expand(current_page)) {
+    return AllocateInNextPage(current_page, size_in_bytes);
+  }
+
+  // Finally, fail.
+  return NULL;
+}
+
+
+// Add the block at the top of the page to the space's free list, set the
+// allocation info to the next page (assumed to be one), and allocate
+// linearly there.
+HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
+                                         int size_in_bytes) {
+  ASSERT(current_page->next_page()->is_valid());
+  // Add the block at the top of this page to the free list.
+  int free_size = current_page->ObjectAreaEnd() - allocation_info_.top;
+  if (free_size > 0) {
+    int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
+    accounting_stats_.WasteBytes(wasted_bytes);
+  }
+  SetAllocationInfo(&allocation_info_, current_page->next_page());
+  return AllocateLinearly(&allocation_info_, size_in_bytes);
+}
+
+
+#ifdef DEBUG
+struct CommentStatistic {
+  const char* comment;
+  int size;
+  int count;
+  void Clear() {
+    comment = NULL;
+    size = 0;
+    count = 0;
+  }
+};
+
+
+// must be small, since an iteration is used for lookup
+const int kMaxComments = 64;
+static CommentStatistic comments_statistics[kMaxComments+1];
+
+
+void PagedSpace::ReportCodeStatistics() {
+  ReportCodeKindStatistics();
+  PrintF("Code comment statistics (\"   [ comment-txt   :    size/   "
+         "count  (average)\"):\n");
+  for (int i = 0; i <= kMaxComments; i++) {
+    const CommentStatistic& cs = comments_statistics[i];
+    if (cs.size > 0) {
+      PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
+             cs.size/cs.count);
+    }
+  }
+  PrintF("\n");
+}
+
+
+void PagedSpace::ResetCodeStatistics() {
+  ClearCodeKindStatistics();
+  for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
+  comments_statistics[kMaxComments].comment = "Unknown";
+  comments_statistics[kMaxComments].size = 0;
+  comments_statistics[kMaxComments].count = 0;
+}
+
+
+// Adds comment to 'comment_statistics' table. Performance OK sa long as
+// 'kMaxComments' is small
+static void EnterComment(const char* comment, int delta) {
+  // Do not count empty comments
+  if (delta <= 0) return;
+  CommentStatistic* cs = &comments_statistics[kMaxComments];
+  // Search for a free or matching entry in 'comments_statistics': 'cs'
+  // points to result.
+  for (int i = 0; i < kMaxComments; i++) {
+    if (comments_statistics[i].comment == NULL) {
+      cs = &comments_statistics[i];
+      cs->comment = comment;
+      break;
+    } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
+      cs = &comments_statistics[i];
+      break;
+    }
+  }
+  // Update entry for 'comment'
+  cs->size += delta;
+  cs->count += 1;
+}
+
+
+// Call for each nested comment start (start marked with '[ xxx', end marked
+// with ']'.  RelocIterator 'it' must point to a comment reloc info.
+static void CollectCommentStatistics(RelocIterator* it) {
+  ASSERT(!it->done());
+  ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
+  const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
+  if (tmp[0] != '[') {
+    // Not a nested comment; skip
+    return;
+  }
+
+  // Search for end of nested comment or a new nested comment
+  const char* const comment_txt =
+      reinterpret_cast<const char*>(it->rinfo()->data());
+  const byte* prev_pc = it->rinfo()->pc();
+  int flat_delta = 0;
+  it->next();
+  while (true) {
+    // All nested comments must be terminated properly, and therefore exit
+    // from loop.
+    ASSERT(!it->done());
+    if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
+      const char* const txt =
+          reinterpret_cast<const char*>(it->rinfo()->data());
+      flat_delta += it->rinfo()->pc() - prev_pc;
+      if (txt[0] == ']') break;  // End of nested  comment
+      // A new comment
+      CollectCommentStatistics(it);
+      // Skip code that was covered with previous comment
+      prev_pc = it->rinfo()->pc();
+    }
+    it->next();
+  }
+  EnterComment(comment_txt, flat_delta);
+}
+
+
+// Collects code size statistics:
+// - by code kind
+// - by code comment
+void PagedSpace::CollectCodeStatistics() {
+  HeapObjectIterator obj_it(this);
+  while (obj_it.has_next()) {
+    HeapObject* obj = obj_it.next();
+    if (obj->IsCode()) {
+      Code* code = Code::cast(obj);
+      code_kind_statistics[code->kind()] += code->Size();
+      RelocIterator it(code);
+      int delta = 0;
+      const byte* prev_pc = code->instruction_start();
+      while (!it.done()) {
+        if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
+          delta += it.rinfo()->pc() - prev_pc;
+          CollectCommentStatistics(&it);
+          prev_pc = it.rinfo()->pc();
+        }
+        it.next();
+      }
+
+      ASSERT(code->instruction_start() <= prev_pc &&
+             prev_pc <= code->relocation_start());
+      delta += code->relocation_start() - prev_pc;
+      EnterComment("NoComment", delta);
+    }
+  }
+}
+
+
+void OldSpace::ReportStatistics() {
+  int pct = Available() * 100 / Capacity();
+  PrintF("  capacity: %d, waste: %d, available: %d, %%%d\n",
+         Capacity(), Waste(), Available(), pct);
+
+  // Report remembered set statistics.
+  int rset_marked_pointers = 0;
+  int rset_marked_arrays = 0;
+  int rset_marked_array_elements = 0;
+  int cross_gen_pointers = 0;
+  int cross_gen_array_elements = 0;
+
+  PageIterator page_it(this, PageIterator::PAGES_IN_USE);
+  while (page_it.has_next()) {
+    Page* p = page_it.next();
+
+    for (Address rset_addr = p->RSetStart();
+         rset_addr < p->RSetEnd();
+         rset_addr += kIntSize) {
+      int rset = Memory::int_at(rset_addr);
+      if (rset != 0) {
+        // Bits were set
+        int intoff = rset_addr - p->address() - Page::kRSetOffset;
+        int bitoff = 0;
+        for (; bitoff < kBitsPerInt; ++bitoff) {
+          if ((rset & (1 << bitoff)) != 0) {
+            int bitpos = intoff*kBitsPerByte + bitoff;
+            Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
+            Object** obj = reinterpret_cast<Object**>(slot);
+            if (*obj == Heap::raw_unchecked_fixed_array_map()) {
+              rset_marked_arrays++;
+              FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
+
+              rset_marked_array_elements += fa->length();
+              // Manually inline FixedArray::IterateBody
+              Address elm_start = slot + FixedArray::kHeaderSize;
+              Address elm_stop = elm_start + fa->length() * kPointerSize;
+              for (Address elm_addr = elm_start;
+                   elm_addr < elm_stop; elm_addr += kPointerSize) {
+                // Filter non-heap-object pointers
+                Object** elm_p = reinterpret_cast<Object**>(elm_addr);
+                if (Heap::InNewSpace(*elm_p))
+                  cross_gen_array_elements++;
+              }
+            } else {
+              rset_marked_pointers++;
+              if (Heap::InNewSpace(*obj))
+                cross_gen_pointers++;
+            }
+          }
+        }
+      }
+    }
+  }
+
+  pct = rset_marked_pointers == 0 ?
+        0 : cross_gen_pointers * 100 / rset_marked_pointers;
+  PrintF("  rset-marked pointers %d, to-new-space %d (%%%d)\n",
+            rset_marked_pointers, cross_gen_pointers, pct);
+  PrintF("  rset_marked arrays %d, ", rset_marked_arrays);
+  PrintF("  elements %d, ", rset_marked_array_elements);
+  pct = rset_marked_array_elements == 0 ? 0
+           : cross_gen_array_elements * 100 / rset_marked_array_elements;
+  PrintF("  pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
+  PrintF("  total rset-marked bits %d\n",
+            (rset_marked_pointers + rset_marked_arrays));
+  pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
+        : (cross_gen_pointers + cross_gen_array_elements) * 100 /
+          (rset_marked_pointers + rset_marked_array_elements);
+  PrintF("  total rset pointers %d, true cross generation ones %d (%%%d)\n",
+         (rset_marked_pointers + rset_marked_array_elements),
+         (cross_gen_pointers + cross_gen_array_elements),
+         pct);
+
+  ClearHistograms();
+  HeapObjectIterator obj_it(this);
+  while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
+  ReportHistogram(true);
+}
+
+
+// Dump the range of remembered set words between [start, end) corresponding
+// to the pointers starting at object_p.  The allocation_top is an object
+// pointer which should not be read past.  This is important for large object
+// pages, where some bits in the remembered set range do not correspond to
+// allocated addresses.
+static void PrintRSetRange(Address start, Address end, Object** object_p,
+                           Address allocation_top) {
+  Address rset_address = start;
+
+  // If the range starts on on odd numbered word (eg, for large object extra
+  // remembered set ranges), print some spaces.
+  if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
+    PrintF("                                    ");
+  }
+
+  // Loop over all the words in the range.
+  while (rset_address < end) {
+    uint32_t rset_word = Memory::uint32_at(rset_address);
+    int bit_position = 0;
+
+    // Loop over all the bits in the word.
+    while (bit_position < kBitsPerInt) {
+      if (object_p == reinterpret_cast<Object**>(allocation_top)) {
+        // Print a bar at the allocation pointer.
+        PrintF("|");
+      } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
+        // Do not dereference object_p past the allocation pointer.
+        PrintF("#");
+      } else if ((rset_word & (1 << bit_position)) == 0) {
+        // Print a dot for zero bits.
+        PrintF(".");
+      } else if (Heap::InNewSpace(*object_p)) {
+        // Print an X for one bits for pointers to new space.
+        PrintF("X");
+      } else {
+        // Print a circle for one bits for pointers to old space.
+        PrintF("o");
+      }
+
+      // Print a space after every 8th bit except the last.
+      if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
+        PrintF(" ");
+      }
+
+      // Advance to next bit.
+      bit_position++;
+      object_p++;
+    }
+
+    // Print a newline after every odd numbered word, otherwise a space.
+    if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
+      PrintF("\n");
+    } else {
+      PrintF(" ");
+    }
+
+    // Advance to next remembered set word.
+    rset_address += kIntSize;
+  }
+}
+
+
+void PagedSpace::DoPrintRSet(const char* space_name) {
+  PageIterator it(this, PageIterator::PAGES_IN_USE);
+  while (it.has_next()) {
+    Page* p = it.next();
+    PrintF("%s page 0x%x:\n", space_name, p);
+    PrintRSetRange(p->RSetStart(), p->RSetEnd(),
+                   reinterpret_cast<Object**>(p->ObjectAreaStart()),
+                   p->AllocationTop());
+    PrintF("\n");
+  }
+}
+
+
+void OldSpace::PrintRSet() { DoPrintRSet("old"); }
+#endif
+
+// -----------------------------------------------------------------------------
+// FixedSpace implementation
+
+void FixedSpace::PrepareForMarkCompact(bool will_compact) {
+  if (will_compact) {
+    // Reset relocation info.
+    MCResetRelocationInfo();
+
+    // During a compacting collection, everything in the space is considered
+    // 'available' (set by the call to MCResetRelocationInfo) and we will
+    // rediscover live and wasted bytes during the collection.
+    ASSERT(Available() == Capacity());
+  } else {
+    // During a non-compacting collection, everything below the linear
+    // allocation pointer except wasted top-of-page blocks is considered
+    // allocated and we will rediscover available bytes during the
+    // collection.
+    accounting_stats_.AllocateBytes(free_list_.available());
+  }
+
+  // Clear the free list before a full GC---it will be rebuilt afterward.
+  free_list_.Reset();
+}
+
+
+void FixedSpace::MCCommitRelocationInfo() {
+  // Update fast allocation info.
+  allocation_info_.top = mc_forwarding_info_.top;
+  allocation_info_.limit = mc_forwarding_info_.limit;
+  ASSERT(allocation_info_.VerifyPagedAllocation());
+
+  // The space is compacted and we haven't yet wasted any space.
+  ASSERT(Waste() == 0);
+
+  // Update allocation_top of each page in use and compute waste.
+  int computed_size = 0;
+  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
+  while (it.has_next()) {
+    Page* page = it.next();
+    Address page_top = page->AllocationTop();
+    computed_size += page_top - page->ObjectAreaStart();
+    if (it.has_next()) {
+      accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
+    }
+  }
+
+  // Make sure the computed size - based on the used portion of the
+  // pages in use - matches the size we adjust during allocation.
+  ASSERT(computed_size == Size());
+}
+
+
+// Slow case for normal allocation. Try in order: (1) allocate in the next
+// page in the space, (2) allocate off the space's free list, (3) expand the
+// space, (4) fail.
+HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
+  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
+  // Linear allocation in this space has failed.  If there is another page
+  // in the space, move to that page and allocate there.  This allocation
+  // should succeed.
+  Page* current_page = TopPageOf(allocation_info_);
+  if (current_page->next_page()->is_valid()) {
+    return AllocateInNextPage(current_page, size_in_bytes);
+  }
+
+  // There is no next page in this space.  Try free list allocation.
+  // The fixed space free list implicitly assumes that all free blocks
+  // are of the fixed size.
+  if (size_in_bytes == object_size_in_bytes_) {
+    Object* result = free_list_.Allocate();
+    if (!result->IsFailure()) {
+      accounting_stats_.AllocateBytes(size_in_bytes);
+      return HeapObject::cast(result);
+    }
+  }
+
+  // Free list allocation failed and there is no next page.  Fail if we have
+  // hit the old generation size limit that should cause a garbage
+  // collection.
+  if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
+    return NULL;
+  }
+
+  // Try to expand the space and allocate in the new next page.
+  ASSERT(!current_page->next_page()->is_valid());
+  if (Expand(current_page)) {
+    return AllocateInNextPage(current_page, size_in_bytes);
+  }
+
+  // Finally, fail.
+  return NULL;
+}
+
+
+// Move to the next page (there is assumed to be one) and allocate there.
+// The top of page block is always wasted, because it is too small to hold a
+// map.
+HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
+                                           int size_in_bytes) {
+  ASSERT(current_page->next_page()->is_valid());
+  ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
+  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
+  accounting_stats_.WasteBytes(page_extra_);
+  SetAllocationInfo(&allocation_info_, current_page->next_page());
+  return AllocateLinearly(&allocation_info_, size_in_bytes);
+}
+
+
+#ifdef DEBUG
+void FixedSpace::ReportStatistics() {
+  int pct = Available() * 100 / Capacity();
+  PrintF("  capacity: %d, waste: %d, available: %d, %%%d\n",
+         Capacity(), Waste(), Available(), pct);
+
+  // Report remembered set statistics.
+  int rset_marked_pointers = 0;
+  int cross_gen_pointers = 0;
+
+  PageIterator page_it(this, PageIterator::PAGES_IN_USE);
+  while (page_it.has_next()) {
+    Page* p = page_it.next();
+
+    for (Address rset_addr = p->RSetStart();
+         rset_addr < p->RSetEnd();
+         rset_addr += kIntSize) {
+      int rset = Memory::int_at(rset_addr);
+      if (rset != 0) {
+        // Bits were set
+        int intoff = rset_addr - p->address() - Page::kRSetOffset;
+        int bitoff = 0;
+        for (; bitoff < kBitsPerInt; ++bitoff) {
+          if ((rset & (1 << bitoff)) != 0) {
+            int bitpos = intoff*kBitsPerByte + bitoff;
+            Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
+            Object** obj = reinterpret_cast<Object**>(slot);
+            rset_marked_pointers++;
+            if (Heap::InNewSpace(*obj))
+              cross_gen_pointers++;
+          }
+        }
+      }
+    }
+  }
+
+  pct = rset_marked_pointers == 0 ?
+          0 : cross_gen_pointers * 100 / rset_marked_pointers;
+  PrintF("  rset-marked pointers %d, to-new-space %d (%%%d)\n",
+            rset_marked_pointers, cross_gen_pointers, pct);
+
+  ClearHistograms();
+  HeapObjectIterator obj_it(this);
+  while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
+  ReportHistogram(false);
+}
+
+
+void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
+#endif
+
+
+// -----------------------------------------------------------------------------
+// MapSpace implementation
+
+void MapSpace::PrepareForMarkCompact(bool will_compact) {
+  // Call prepare of the super class.
+  FixedSpace::PrepareForMarkCompact(will_compact);
+
+  if (will_compact) {
+    // Initialize map index entry.
+    int page_count = 0;
+    PageIterator it(this, PageIterator::ALL_PAGES);
+    while (it.has_next()) {
+      ASSERT_MAP_PAGE_INDEX(page_count);
+
+      Page* p = it.next();
+      ASSERT(p->mc_page_index == page_count);
+
+      page_addresses_[page_count++] = p->address();
+    }
+  }
+}
+
+
+#ifdef DEBUG
+void MapSpace::VerifyObject(HeapObject* object) {
+  // The object should be a map or a free-list node.
+  ASSERT(object->IsMap() || object->IsByteArray());
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// GlobalPropertyCellSpace implementation
+
+#ifdef DEBUG
+void CellSpace::VerifyObject(HeapObject* object) {
+  // The object should be a global object property cell or a free-list node.
+  ASSERT(object->IsJSGlobalPropertyCell() ||
+         object->map() == Heap::two_pointer_filler_map());
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectIterator
+
+LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
+  current_ = space->first_chunk_;
+  size_func_ = NULL;
+}
+
+
+LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
+                                         HeapObjectCallback size_func) {
+  current_ = space->first_chunk_;
+  size_func_ = size_func;
+}
+
+
+HeapObject* LargeObjectIterator::next() {
+  ASSERT(has_next());
+  HeapObject* object = current_->GetObject();
+  current_ = current_->next();
+  return object;
+}
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectChunk
+
+LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
+                                        size_t* chunk_size,
+                                        Executability executable) {
+  size_t requested = ChunkSizeFor(size_in_bytes);
+  void* mem = MemoryAllocator::AllocateRawMemory(requested,
+                                                 chunk_size,
+                                                 executable);
+  if (mem == NULL) return NULL;
+  LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
+  if (*chunk_size < requested) {
+    MemoryAllocator::FreeRawMemory(mem, *chunk_size);
+    LOG(DeleteEvent("LargeObjectChunk", mem));
+    return NULL;
+  }
+  return reinterpret_cast<LargeObjectChunk*>(mem);
+}
+
+
+int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
+  int os_alignment = OS::AllocateAlignment();
+  if (os_alignment < Page::kPageSize)
+    size_in_bytes += (Page::kPageSize - os_alignment);
+  return size_in_bytes + Page::kObjectStartOffset;
+}
+
+// -----------------------------------------------------------------------------
+// LargeObjectSpace
+
+LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
+    : Space(id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
+      first_chunk_(NULL),
+      size_(0),
+      page_count_(0) {}
+
+
+bool LargeObjectSpace::Setup() {
+  first_chunk_ = NULL;
+  size_ = 0;
+  page_count_ = 0;
+  return true;
+}
+
+
+void LargeObjectSpace::TearDown() {
+  while (first_chunk_ != NULL) {
+    LargeObjectChunk* chunk = first_chunk_;
+    first_chunk_ = first_chunk_->next();
+    LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
+    MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
+  }
+
+  size_ = 0;
+  page_count_ = 0;
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void LargeObjectSpace::Protect() {
+  LargeObjectChunk* chunk = first_chunk_;
+  while (chunk != NULL) {
+    MemoryAllocator::Protect(chunk->address(), chunk->size());
+    chunk = chunk->next();
+  }
+}
+
+
+void LargeObjectSpace::Unprotect() {
+  LargeObjectChunk* chunk = first_chunk_;
+  while (chunk != NULL) {
+    bool is_code = chunk->GetObject()->IsCode();
+    MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
+                               is_code ? EXECUTABLE : NOT_EXECUTABLE);
+    chunk = chunk->next();
+  }
+}
+
+#endif
+
+
+Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
+                                              int object_size,
+                                              Executability executable) {
+  ASSERT(0 < object_size && object_size <= requested_size);
+
+  // Check if we want to force a GC before growing the old space further.
+  // If so, fail the allocation.
+  if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
+    return Failure::RetryAfterGC(requested_size, identity());
+  }
+
+  size_t chunk_size;
+  LargeObjectChunk* chunk =
+      LargeObjectChunk::New(requested_size, &chunk_size, executable);
+  if (chunk == NULL) {
+    return Failure::RetryAfterGC(requested_size, identity());
+  }
+
+  size_ += chunk_size;
+  page_count_++;
+  chunk->set_next(first_chunk_);
+  chunk->set_size(chunk_size);
+  first_chunk_ = chunk;
+
+  // Set the object address and size in the page header and clear its
+  // remembered set.
+  Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
+  Address object_address = page->ObjectAreaStart();
+  // Clear the low order bit of the second word in the page to flag it as a
+  // large object page.  If the chunk_size happened to be written there, its
+  // low order bit should already be clear.
+  ASSERT((chunk_size & 0x1) == 0);
+  page->is_normal_page &= ~0x1;
+  page->ClearRSet();
+  int extra_bytes = requested_size - object_size;
+  if (extra_bytes > 0) {
+    // The extra memory for the remembered set should be cleared.
+    memset(object_address + object_size, 0, extra_bytes);
+  }
+
+  return HeapObject::FromAddress(object_address);
+}
+
+
+Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
+  ASSERT(0 < size_in_bytes);
+  return AllocateRawInternal(size_in_bytes,
+                             size_in_bytes,
+                             EXECUTABLE);
+}
+
+
+Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
+  ASSERT(0 < size_in_bytes);
+  int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
+  return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
+                             size_in_bytes,
+                             NOT_EXECUTABLE);
+}
+
+
+Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
+  ASSERT(0 < size_in_bytes);
+  return AllocateRawInternal(size_in_bytes,
+                             size_in_bytes,
+                             NOT_EXECUTABLE);
+}
+
+
+// GC support
+Object* LargeObjectSpace::FindObject(Address a) {
+  for (LargeObjectChunk* chunk = first_chunk_;
+       chunk != NULL;
+       chunk = chunk->next()) {
+    Address chunk_address = chunk->address();
+    if (chunk_address <= a && a < chunk_address + chunk->size()) {
+      return chunk->GetObject();
+    }
+  }
+  return Failure::Exception();
+}
+
+
+void LargeObjectSpace::ClearRSet() {
+  ASSERT(Page::is_rset_in_use());
+
+  LargeObjectIterator it(this);
+  while (it.has_next()) {
+    HeapObject* object = it.next();
+    // We only have code, sequential strings, or fixed arrays in large
+    // object space, and only fixed arrays need remembered set support.
+    if (object->IsFixedArray()) {
+      // Clear the normal remembered set region of the page;
+      Page* page = Page::FromAddress(object->address());
+      page->ClearRSet();
+
+      // Clear the extra remembered set.
+      int size = object->Size();
+      int extra_rset_bytes = ExtraRSetBytesFor(size);
+      memset(object->address() + size, 0, extra_rset_bytes);
+    }
+  }
+}
+
+
+void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
+  ASSERT(Page::is_rset_in_use());
+
+  static void* lo_rset_histogram = StatsTable::CreateHistogram(
+      "V8.RSetLO",
+      0,
+      // Keeping this histogram's buckets the same as the paged space histogram.
+      Page::kObjectAreaSize / kPointerSize,
+      30);
+
+  LargeObjectIterator it(this);
+  while (it.has_next()) {
+    // We only have code, sequential strings, or fixed arrays in large
+    // object space, and only fixed arrays can possibly contain pointers to
+    // the young generation.
+    HeapObject* object = it.next();
+    if (object->IsFixedArray()) {
+      // Iterate the normal page remembered set range.
+      Page* page = Page::FromAddress(object->address());
+      Address object_end = object->address() + object->Size();
+      int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
+                                         Min(page->ObjectAreaEnd(), object_end),
+                                         page->RSetStart(),
+                                         copy_object_func);
+
+      // Iterate the extra array elements.
+      if (object_end > page->ObjectAreaEnd()) {
+        count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
+                                        object_end, copy_object_func);
+      }
+      if (lo_rset_histogram != NULL) {
+        StatsTable::AddHistogramSample(lo_rset_histogram, count);
+      }
+    }
+  }
+}
+
+
+void LargeObjectSpace::FreeUnmarkedObjects() {
+  LargeObjectChunk* previous = NULL;
+  LargeObjectChunk* current = first_chunk_;
+  while (current != NULL) {
+    HeapObject* object = current->GetObject();
+    if (object->IsMarked()) {
+      object->ClearMark();
+      MarkCompactCollector::tracer()->decrement_marked_count();
+      previous = current;
+      current = current->next();
+    } else {
+      Address chunk_address = current->address();
+      size_t chunk_size = current->size();
+
+      // Cut the chunk out from the chunk list.
+      current = current->next();
+      if (previous == NULL) {
+        first_chunk_ = current;
+      } else {
+        previous->set_next(current);
+      }
+
+      // Free the chunk.
+      if (object->IsCode()) {
+        LOG(CodeDeleteEvent(object->address()));
+      }
+      size_ -= chunk_size;
+      page_count_--;
+      MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
+      LOG(DeleteEvent("LargeObjectChunk", chunk_address));
+    }
+  }
+}
+
+
+bool LargeObjectSpace::Contains(HeapObject* object) {
+  Address address = object->address();
+  Page* page = Page::FromAddress(address);
+
+  SLOW_ASSERT(!page->IsLargeObjectPage()
+              || !FindObject(address)->IsFailure());
+
+  return page->IsLargeObjectPage();
+}
+
+
+#ifdef DEBUG
+// We do not assume that the large object iterator works, because it depends
+// on the invariants we are checking during verification.
+void LargeObjectSpace::Verify() {
+  for (LargeObjectChunk* chunk = first_chunk_;
+       chunk != NULL;
+       chunk = chunk->next()) {
+    // Each chunk contains an object that starts at the large object page's
+    // object area start.
+    HeapObject* object = chunk->GetObject();
+    Page* page = Page::FromAddress(object->address());
+    ASSERT(object->address() == page->ObjectAreaStart());
+
+    // The first word should be a map, and we expect all map pointers to be
+    // in map space.
+    Map* map = object->map();
+    ASSERT(map->IsMap());
+    ASSERT(Heap::map_space()->Contains(map));
+
+    // We have only code, sequential strings, external strings
+    // (sequential strings that have been morphed into external
+    // strings), fixed arrays, and byte arrays in large object space.
+    ASSERT(object->IsCode() || object->IsSeqString() ||
+           object->IsExternalString() || object->IsFixedArray() ||
+           object->IsByteArray());
+
+    // The object itself should look OK.
+    object->Verify();
+
+    // Byte arrays and strings don't have interior pointers.
+    if (object->IsCode()) {
+      VerifyPointersVisitor code_visitor;
+      object->IterateBody(map->instance_type(),
+                          object->Size(),
+                          &code_visitor);
+    } else if (object->IsFixedArray()) {
+      // We loop over fixed arrays ourselves, rather then using the visitor,
+      // because the visitor doesn't support the start/offset iteration
+      // needed for IsRSetSet.
+      FixedArray* array = FixedArray::cast(object);
+      for (int j = 0; j < array->length(); j++) {
+        Object* element = array->get(j);
+        if (element->IsHeapObject()) {
+          HeapObject* element_object = HeapObject::cast(element);
+          ASSERT(Heap::Contains(element_object));
+          ASSERT(element_object->map()->IsMap());
+          if (Heap::InNewSpace(element_object)) {
+            ASSERT(Page::IsRSetSet(object->address(),
+                                   FixedArray::kHeaderSize + j * kPointerSize));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+void LargeObjectSpace::Print() {
+  LargeObjectIterator it(this);
+  while (it.has_next()) {
+    it.next()->Print();
+  }
+}
+
+
+void LargeObjectSpace::ReportStatistics() {
+  PrintF("  size: %d\n", size_);
+  int num_objects = 0;
+  ClearHistograms();
+  LargeObjectIterator it(this);
+  while (it.has_next()) {
+    num_objects++;
+    CollectHistogramInfo(it.next());
+  }
+
+  PrintF("  number of objects %d\n", num_objects);
+  if (num_objects > 0) ReportHistogram(false);
+}
+
+
+void LargeObjectSpace::CollectCodeStatistics() {
+  LargeObjectIterator obj_it(this);
+  while (obj_it.has_next()) {
+    HeapObject* obj = obj_it.next();
+    if (obj->IsCode()) {
+      Code* code = Code::cast(obj);
+      code_kind_statistics[code->kind()] += code->Size();
+    }
+  }
+}
+
+
+void LargeObjectSpace::PrintRSet() {
+  LargeObjectIterator it(this);
+  while (it.has_next()) {
+    HeapObject* object = it.next();
+    if (object->IsFixedArray()) {
+      Page* page = Page::FromAddress(object->address());
+
+      Address allocation_top = object->address() + object->Size();
+      PrintF("large page 0x%x:\n", page);
+      PrintRSetRange(page->RSetStart(), page->RSetEnd(),
+                     reinterpret_cast<Object**>(object->address()),
+                     allocation_top);
+      int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
+      int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
+                                    kBitsPerInt);
+      PrintF("------------------------------------------------------------"
+             "-----------\n");
+      PrintRSetRange(allocation_top,
+                     allocation_top + extra_rset_bits / kBitsPerByte,
+                     reinterpret_cast<Object**>(object->address()
+                                                + Page::kObjectAreaSize),
+                     allocation_top);
+      PrintF("\n");
+    }
+  }
+}
+#endif  // DEBUG
+
+} }  // namespace v8::internal
diff --git a/src/spaces.h b/src/spaces.h
new file mode 100644
index 0000000..76b88ef
--- /dev/null
+++ b/src/spaces.h
@@ -0,0 +1,1942 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SPACES_H_
+#define V8_SPACES_H_
+
+#include "list-inl.h"
+#include "log.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Heap structures:
+//
+// A JS heap consists of a young generation, an old generation, and a large
+// object space. The young generation is divided into two semispaces. A
+// scavenger implements Cheney's copying algorithm. The old generation is
+// separated into a map space and an old object space. The map space contains
+// all (and only) map objects, the rest of old objects go into the old space.
+// The old generation is collected by a mark-sweep-compact collector.
+//
+// The semispaces of the young generation are contiguous.  The old and map
+// spaces consists of a list of pages. A page has a page header, a remembered
+// set area, and an object area. A page size is deliberately chosen as 8K
+// bytes. The first word of a page is an opaque page header that has the
+// address of the next page and its ownership information. The second word may
+// have the allocation top address of this page. The next 248 bytes are
+// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
+// remembered set bit corresponds to a pointer in the object area.
+//
+// There is a separate large object space for objects larger than
+// Page::kMaxHeapObjectSize, so that they do not have to move during
+// collection.  The large object space is paged and uses the same remembered
+// set implementation.  Pages in large object space may be larger than 8K.
+//
+// NOTE: The mark-compact collector rebuilds the remembered set after a
+// collection. It reuses first a few words of the remembered set for
+// bookkeeping relocation information.
+
+
+// Some assertion macros used in the debugging mode.
+
+#define ASSERT_PAGE_ALIGNED(address)                  \
+  ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
+
+#define ASSERT_OBJECT_ALIGNED(address)                \
+  ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
+
+#define ASSERT_OBJECT_SIZE(size)                      \
+  ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
+
+#define ASSERT_PAGE_OFFSET(offset)                    \
+  ASSERT((Page::kObjectStartOffset <= offset)         \
+      && (offset <= Page::kPageSize))
+
+#define ASSERT_MAP_PAGE_INDEX(index)                            \
+  ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
+
+
+class PagedSpace;
+class MemoryAllocator;
+class AllocationInfo;
+
+// -----------------------------------------------------------------------------
+// A page normally has 8K bytes. Large object pages may be larger.  A page
+// address is always aligned to the 8K page size.  A page is divided into
+// three areas: the first two words are used for bookkeeping, the next 248
+// bytes are used as remembered set, and the rest of the page is the object
+// area.
+//
+// Pointers are aligned to the pointer size (4), only 1 bit is needed
+// for a pointer in the remembered set. Given an address, its remembered set
+// bit position (offset from the start of the page) is calculated by dividing
+// its page offset by 32. Therefore, the object area in a page starts at the
+// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
+// the first two words (64 bits) in a page can be used for other purposes.
+//
+// On the 64-bit platform, we add an offset to the start of the remembered set,
+// and pointers are aligned to 8-byte pointer size. This means that we need
+// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
+// For this reason we add an offset to get room for the Page data at the start.
+//
+// The mark-compact collector transforms a map pointer into a page index and a
+// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
+// 8K) in total.  Because a map pointer is aligned to the pointer size (4
+// bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
+// page index + 11 for the offset in the page) are required to encode a map
+// pointer.
+//
+// The only way to get a page pointer is by calling factory methods:
+//   Page* p = Page::FromAddress(addr); or
+//   Page* p = Page::FromAllocationTop(top);
+class Page {
+ public:
+  // Returns the page containing a given address. The address ranges
+  // from [page_addr .. page_addr + kPageSize[
+  //
+  // Note that this function only works for addresses in normal paged
+  // spaces and addresses in the first 8K of large object pages (i.e.,
+  // the start of large objects but not necessarily derived pointers
+  // within them).
+  INLINE(static Page* FromAddress(Address a)) {
+    return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
+  }
+
+  // Returns the page containing an allocation top. Because an allocation
+  // top address can be the upper bound of the page, we need to subtract
+  // it with kPointerSize first. The address ranges from
+  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
+  INLINE(static Page* FromAllocationTop(Address top)) {
+    Page* p = FromAddress(top - kPointerSize);
+    ASSERT_PAGE_OFFSET(p->Offset(top));
+    return p;
+  }
+
+  // Returns the start address of this page.
+  Address address() { return reinterpret_cast<Address>(this); }
+
+  // Checks whether this is a valid page address.
+  bool is_valid() { return address() != NULL; }
+
+  // Returns the next page of this page.
+  inline Page* next_page();
+
+  // Return the end of allocation in this page. Undefined for unused pages.
+  inline Address AllocationTop();
+
+  // Returns the start address of the object area in this page.
+  Address ObjectAreaStart() { return address() + kObjectStartOffset; }
+
+  // Returns the end address (exclusive) of the object area in this page.
+  Address ObjectAreaEnd() { return address() + Page::kPageSize; }
+
+  // Returns the start address of the remembered set area.
+  Address RSetStart() { return address() + kRSetStartOffset; }
+
+  // Returns the end address of the remembered set area (exclusive).
+  Address RSetEnd() { return address() + kRSetEndOffset; }
+
+  // Checks whether an address is page aligned.
+  static bool IsAlignedToPageSize(Address a) {
+    return 0 == (OffsetFrom(a) & kPageAlignmentMask);
+  }
+
+  // True if this page is a large object page.
+  bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
+
+  // Returns the offset of a given address to this page.
+  INLINE(int Offset(Address a)) {
+    int offset = a - address();
+    ASSERT_PAGE_OFFSET(offset);
+    return offset;
+  }
+
+  // Returns the address for a given offset to the this page.
+  Address OffsetToAddress(int offset) {
+    ASSERT_PAGE_OFFSET(offset);
+    return address() + offset;
+  }
+
+  // ---------------------------------------------------------------------
+  // Remembered set support
+
+  // Clears remembered set in this page.
+  inline void ClearRSet();
+
+  // Return the address of the remembered set word corresponding to an
+  // object address/offset pair, and the bit encoded as a single-bit
+  // mask in the output parameter 'bitmask'.
+  INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
+                                               uint32_t* bitmask));
+
+  // Sets the corresponding remembered set bit for a given address.
+  INLINE(static void SetRSet(Address address, int offset));
+
+  // Clears the corresponding remembered set bit for a given address.
+  static inline void UnsetRSet(Address address, int offset);
+
+  // Checks whether the remembered set bit for a given address is set.
+  static inline bool IsRSetSet(Address address, int offset);
+
+#ifdef DEBUG
+  // Use a state to mark whether remembered set space can be used for other
+  // purposes.
+  enum RSetState { IN_USE,  NOT_IN_USE };
+  static bool is_rset_in_use() { return rset_state_ == IN_USE; }
+  static void set_rset_state(RSetState state) { rset_state_ = state; }
+#endif
+
+  // 8K bytes per page.
+  static const int kPageSizeBits = 13;
+
+  // Page size in bytes.  This must be a multiple of the OS page size.
+  static const int kPageSize = 1 << kPageSizeBits;
+
+  // Page size mask.
+  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+
+  // The offset of the remembered set in a page, in addition to the empty bytes
+  // formed as the remembered bits of the remembered set itself.
+#ifdef V8_TARGET_ARCH_X64
+  static const int kRSetOffset = 4 * kPointerSize;  // Room for four pointers.
+#else
+  static const int kRSetOffset = 0;
+#endif
+  // The end offset of the remembered set in a page
+  // (heaps are aligned to pointer size).
+  static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
+
+  // The start offset of the object area in a page.
+  // This needs to be at least (bits per uint32_t) * kBitsPerPointer,
+  // to align start of rset to a uint32_t address.
+  static const int kObjectStartOffset = 256;
+
+  // The start offset of the used part of the remembered set in a page.
+  static const int kRSetStartOffset = kRSetOffset +
+      kObjectStartOffset / kBitsPerPointer;
+
+  // Object area size in bytes.
+  static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
+
+  // Maximum object size that fits in a page.
+  static const int kMaxHeapObjectSize = kObjectAreaSize;
+
+  //---------------------------------------------------------------------------
+  // Page header description.
+  //
+  // If a page is not in the large object space, the first word,
+  // opaque_header, encodes the next page address (aligned to kPageSize 8K)
+  // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use
+  // opaque_header. The value range of the opaque_header is [0..kPageSize[,
+  // or [next_page_start, next_page_end[. It cannot point to a valid address
+  // in the current page.  If a page is in the large object space, the first
+  // word *may* (if the page start and large object chunk start are the
+  // same) contain the address of the next large object chunk.
+  intptr_t opaque_header;
+
+  // If the page is not in the large object space, the low-order bit of the
+  // second word is set. If the page is in the large object space, the
+  // second word *may* (if the page start and large object chunk start are
+  // the same) contain the large object chunk size.  In either case, the
+  // low-order bit for large object pages will be cleared.
+  int is_normal_page;
+
+  // The following fields may overlap with remembered set, they can only
+  // be used in the mark-compact collector when remembered set is not
+  // used.
+
+  // The index of the page in its owner space.
+  int mc_page_index;
+
+  // The allocation pointer after relocating objects to this page.
+  Address mc_relocation_top;
+
+  // The forwarding address of the first live object in this page.
+  Address mc_first_forwarded;
+
+#ifdef DEBUG
+ private:
+  static RSetState rset_state_;  // state of the remembered set
+#endif
+};
+
+
+// ----------------------------------------------------------------------------
+// Space is the abstract superclass for all allocation spaces.
+class Space : public Malloced {
+ public:
+  Space(AllocationSpace id, Executability executable)
+      : id_(id), executable_(executable) {}
+
+  virtual ~Space() {}
+
+  // Does the space need executable memory?
+  Executability executable() { return executable_; }
+
+  // Identity used in error reporting.
+  AllocationSpace identity() { return id_; }
+
+  virtual int Size() = 0;
+
+#ifdef DEBUG
+  virtual void Print() = 0;
+#endif
+
+ private:
+  AllocationSpace id_;
+  Executability executable_;
+};
+
+
+// ----------------------------------------------------------------------------
+// All heap objects containing executable code (code objects) must be allocated
+// from a 2 GB range of memory, so that they can call each other using 32-bit
+// displacements.  This happens automatically on 32-bit platforms, where 32-bit
+// displacements cover the entire 4GB virtual address space.  On 64-bit
+// platforms, we support this using the CodeRange object, which reserves and
+// manages a range of virtual memory.
+class CodeRange : public AllStatic {
+ public:
+  // Reserves a range of virtual memory, but does not commit any of it.
+  // Can only be called once, at heap initialization time.
+  // Returns false on failure.
+  static bool Setup(const size_t requested_size);
+
+  // Frees the range of virtual memory, and frees the data structures used to
+  // manage it.
+  static void TearDown();
+
+  static bool exists() { return code_range_ != NULL; }
+  static bool contains(Address address) {
+    if (code_range_ == NULL) return false;
+    Address start = static_cast<Address>(code_range_->address());
+    return start <= address && address < start + code_range_->size();
+  }
+
+  // Allocates a chunk of memory from the large-object portion of
+  // the code range.  On platforms with no separate code range, should
+  // not be called.
+  static void* AllocateRawMemory(const size_t requested, size_t* allocated);
+  static void FreeRawMemory(void* buf, size_t length);
+
+ private:
+  // The reserved range of virtual memory that all code objects are put in.
+  static VirtualMemory* code_range_;
+  // Plain old data class, just a struct plus a constructor.
+  class FreeBlock {
+   public:
+    FreeBlock(Address start_arg, size_t size_arg)
+        : start(start_arg), size(size_arg) {}
+    FreeBlock(void* start_arg, size_t size_arg)
+        : start(static_cast<Address>(start_arg)), size(size_arg) {}
+
+    Address start;
+    size_t size;
+  };
+
+  // Freed blocks of memory are added to the free list.  When the allocation
+  // list is exhausted, the free list is sorted and merged to make the new
+  // allocation list.
+  static List<FreeBlock> free_list_;
+  // Memory is allocated from the free blocks on the allocation list.
+  // The block at current_allocation_block_index_ is the current block.
+  static List<FreeBlock> allocation_list_;
+  static int current_allocation_block_index_;
+
+  // Finds a block on the allocation list that contains at least the
+  // requested amount of memory.  If none is found, sorts and merges
+  // the existing free memory blocks, and searches again.
+  // If none can be found, terminates V8 with FatalProcessOutOfMemory.
+  static void GetNextAllocationBlock(size_t requested);
+  // Compares the start addresses of two free blocks.
+  static int CompareFreeBlockAddress(const FreeBlock* left,
+                                     const FreeBlock* right);
+};
+
+
+// ----------------------------------------------------------------------------
+// A space acquires chunks of memory from the operating system. The memory
+// allocator manages chunks for the paged heap spaces (old space and map
+// space).  A paged chunk consists of pages. Pages in a chunk have contiguous
+// addresses and are linked as a list.
+//
+// The allocator keeps an initial chunk which is used for the new space.  The
+// leftover regions of the initial chunk are used for the initial chunks of
+// old space and map space if they are big enough to hold at least one page.
+// The allocator assumes that there is one old space and one map space, each
+// expands the space by allocating kPagesPerChunk pages except the last
+// expansion (before running out of space).  The first chunk may contain fewer
+// than kPagesPerChunk pages as well.
+//
+// The memory allocator also allocates chunks for the large object space, but
+// they are managed by the space itself.  The new space does not expand.
+
+class MemoryAllocator : public AllStatic {
+ public:
+  // Initializes its internal bookkeeping structures.
+  // Max capacity of the total space.
+  static bool Setup(int max_capacity);
+
+  // Deletes valid chunks.
+  static void TearDown();
+
+  // Reserves an initial address range of virtual memory to be split between
+  // the two new space semispaces, the old space, and the map space.  The
+  // memory is not yet committed or assigned to spaces and split into pages.
+  // The initial chunk is unmapped when the memory allocator is torn down.
+  // This function should only be called when there is not already a reserved
+  // initial chunk (initial_chunk_ should be NULL).  It returns the start
+  // address of the initial chunk if successful, with the side effect of
+  // setting the initial chunk, or else NULL if unsuccessful and leaves the
+  // initial chunk NULL.
+  static void* ReserveInitialChunk(const size_t requested);
+
+  // Commits pages from an as-yet-unmanaged block of virtual memory into a
+  // paged space.  The block should be part of the initial chunk reserved via
+  // a call to ReserveInitialChunk.  The number of pages is always returned in
+  // the output parameter num_pages.  This function assumes that the start
+  // address is non-null and that it is big enough to hold at least one
+  // page-aligned page.  The call always succeeds, and num_pages is always
+  // greater than zero.
+  static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
+                           int* num_pages);
+
+  // Commit a contiguous block of memory from the initial chunk.  Assumes that
+  // the address is not NULL, the size is greater than zero, and that the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  static bool CommitBlock(Address start, size_t size, Executability executable);
+
+
+  // Uncommit a contiguous block of memory [start..(start+size)[.
+  // start is not NULL, the size is greater than zero, and the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  static bool UncommitBlock(Address start, size_t size);
+
+  // Attempts to allocate the requested (non-zero) number of pages from the
+  // OS.  Fewer pages might be allocated than requested. If it fails to
+  // allocate memory for the OS or cannot allocate a single page, this
+  // function returns an invalid page pointer (NULL). The caller must check
+  // whether the returned page is valid (by calling Page::is_valid()).  It is
+  // guaranteed that allocated pages have contiguous addresses.  The actual
+  // number of allocated pages is returned in the output parameter
+  // allocated_pages.  If the PagedSpace owner is executable and there is
+  // a code range, the pages are allocated from the code range.
+  static Page* AllocatePages(int requested_pages, int* allocated_pages,
+                             PagedSpace* owner);
+
+  // Frees pages from a given page and after. If 'p' is the first page
+  // of a chunk, pages from 'p' are freed and this function returns an
+  // invalid page pointer. Otherwise, the function searches a page
+  // after 'p' that is the first page of a chunk. Pages after the
+  // found page are freed and the function returns 'p'.
+  static Page* FreePages(Page* p);
+
+  // Allocates and frees raw memory of certain size.
+  // These are just thin wrappers around OS::Allocate and OS::Free,
+  // but keep track of allocated bytes as part of heap.
+  // If the flag is EXECUTABLE and a code range exists, the requested
+  // memory is allocated from the code range.  If a code range exists
+  // and the freed memory is in it, the code range manages the freed memory.
+  static void* AllocateRawMemory(const size_t requested,
+                                 size_t* allocated,
+                                 Executability executable);
+  static void FreeRawMemory(void* buf, size_t length);
+
+  // Returns the maximum available bytes of heaps.
+  static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
+
+  // Returns allocated spaces in bytes.
+  static int Size() { return size_; }
+
+  // Returns maximum available bytes that the old space can have.
+  static int MaxAvailable() {
+    return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
+  }
+
+  // Links two pages.
+  static inline void SetNextPage(Page* prev, Page* next);
+
+  // Returns the next page of a given page.
+  static inline Page* GetNextPage(Page* p);
+
+  // Checks whether a page belongs to a space.
+  static inline bool IsPageInSpace(Page* p, PagedSpace* space);
+
+  // Returns the space that owns the given page.
+  static inline PagedSpace* PageOwner(Page* page);
+
+  // Finds the first/last page in the same chunk as a given page.
+  static Page* FindFirstPageInSameChunk(Page* p);
+  static Page* FindLastPageInSameChunk(Page* p);
+
+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect a block of memory by marking it read-only/writable.
+  static inline void Protect(Address start, size_t size);
+  static inline void Unprotect(Address start, size_t size,
+                               Executability executable);
+
+  // Protect/unprotect a chunk given a page in the chunk.
+  static inline void ProtectChunkFromPage(Page* page);
+  static inline void UnprotectChunkFromPage(Page* page);
+#endif
+
+#ifdef DEBUG
+  // Reports statistic info of the space.
+  static void ReportStatistics();
+#endif
+
+  // Due to encoding limitation, we can only have 8K chunks.
+  static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
+  // If a chunk has at least 16 pages, the maximum heap size is about
+  // 8K * 8K * 16 = 1G bytes.
+#ifdef V8_TARGET_ARCH_X64
+  static const int kPagesPerChunk = 32;
+#else
+  static const int kPagesPerChunk = 16;
+#endif
+  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
+
+ private:
+  // Maximum space size in bytes.
+  static int capacity_;
+
+  // Allocated space size in bytes.
+  static int size_;
+
+  // The initial chunk of virtual memory.
+  static VirtualMemory* initial_chunk_;
+
+  // Allocated chunk info: chunk start address, chunk size, and owning space.
+  class ChunkInfo BASE_EMBEDDED {
+   public:
+    ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
+    void init(Address a, size_t s, PagedSpace* o) {
+      address_ = a;
+      size_ = s;
+      owner_ = o;
+    }
+    Address address() { return address_; }
+    size_t size() { return size_; }
+    PagedSpace* owner() { return owner_; }
+
+   private:
+    Address address_;
+    size_t size_;
+    PagedSpace* owner_;
+  };
+
+  // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
+  static List<ChunkInfo> chunks_;
+  static List<int> free_chunk_ids_;
+  static int max_nof_chunks_;
+  static int top_;
+
+  // Push/pop a free chunk id onto/from the stack.
+  static void Push(int free_chunk_id);
+  static int Pop();
+  static bool OutOfChunkIds() { return top_ == 0; }
+
+  // Frees a chunk.
+  static void DeleteChunk(int chunk_id);
+
+  // Basic check whether a chunk id is in the valid range.
+  static inline bool IsValidChunkId(int chunk_id);
+
+  // Checks whether a chunk id identifies an allocated chunk.
+  static inline bool IsValidChunk(int chunk_id);
+
+  // Returns the chunk id that a page belongs to.
+  static inline int GetChunkId(Page* p);
+
+  // True if the address lies in the initial chunk.
+  static inline bool InInitialChunk(Address address);
+
+  // Initializes pages in a chunk. Returns the first page address.
+  // This function and GetChunkId() are provided for the mark-compact
+  // collector to rebuild page headers in the from space, which is
+  // used as a marking stack and its page headers are destroyed.
+  static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+                                      PagedSpace* owner);
+};
+
+
+// -----------------------------------------------------------------------------
+// Interface for heap object iterator to be implemented by all object space
+// object iterators.
+//
+// NOTE: The space specific object iterators also implements the own has_next()
+//       and next() methods which are used to avoid using virtual functions
+//       iterating a specific space.
+
+class ObjectIterator : public Malloced {
+ public:
+  virtual ~ObjectIterator() { }
+
+  virtual bool has_next_object() = 0;
+  virtual HeapObject* next_object() = 0;
+};
+
+
+// -----------------------------------------------------------------------------
+// Heap object iterator in new/old/map spaces.
+//
+// A HeapObjectIterator iterates objects from a given address to the
+// top of a space. The given address must be below the current
+// allocation pointer (space top). There are some caveats.
+//
+// (1) If the space top changes upward during iteration (because of
+//     allocating new objects), the iterator does not iterate objects
+//     above the original space top. The caller must create a new
+//     iterator starting from the old top in order to visit these new
+//     objects.
+//
+// (2) If new objects are allocated below the original allocation top
+//     (e.g., free-list allocation in paged spaces), the new objects
+//     may or may not be iterated depending on their position with
+//     respect to the current point of iteration.
+//
+// (3) The space top should not change downward during iteration,
+//     otherwise the iterator will return not-necessarily-valid
+//     objects.
+
+class HeapObjectIterator: public ObjectIterator {
+ public:
+  // Creates a new object iterator in a given space. If a start
+  // address is not given, the iterator starts from the space bottom.
+  // If the size function is not given, the iterator calls the default
+  // Object::Size().
+  explicit HeapObjectIterator(PagedSpace* space);
+  HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
+  HeapObjectIterator(PagedSpace* space, Address start);
+  HeapObjectIterator(PagedSpace* space,
+                     Address start,
+                     HeapObjectCallback size_func);
+
+  inline bool has_next();
+  inline HeapObject* next();
+
+  // implementation of ObjectIterator.
+  virtual bool has_next_object() { return has_next(); }
+  virtual HeapObject* next_object() { return next(); }
+
+ private:
+  Address cur_addr_;  // current iteration point
+  Address end_addr_;  // end iteration point
+  Address cur_limit_;  // current page limit
+  HeapObjectCallback size_func_;  // size function
+  Page* end_page_;  // caches the page of the end address
+
+  // Slow path of has_next, checks whether there are more objects in
+  // the next page.
+  bool HasNextInNextPage();
+
+  // Initializes fields.
+  void Initialize(Address start, Address end, HeapObjectCallback size_func);
+
+#ifdef DEBUG
+  // Verifies whether fields have valid values.
+  void Verify();
+#endif
+};
+
+
+// -----------------------------------------------------------------------------
+// A PageIterator iterates the pages in a paged space.
+//
+// The PageIterator class provides three modes for iterating pages in a space:
+//   PAGES_IN_USE iterates pages containing allocated objects.
+//   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
+//                    mark-compact collection.
+//   ALL_PAGES iterates all pages in the space.
+//
+// There are some caveats.
+//
+// (1) If the space expands during iteration, new pages will not be
+//     returned by the iterator in any mode.
+//
+// (2) If new objects are allocated during iteration, they will appear
+//     in pages returned by the iterator.  Allocation may cause the
+//     allocation pointer or MC allocation pointer in the last page to
+//     change between constructing the iterator and iterating the last
+//     page.
+//
+// (3) The space should not shrink during iteration, otherwise the
+//     iterator will return deallocated pages.
+
+class PageIterator BASE_EMBEDDED {
+ public:
+  enum Mode {
+    PAGES_IN_USE,
+    PAGES_USED_BY_MC,
+    ALL_PAGES
+  };
+
+  PageIterator(PagedSpace* space, Mode mode);
+
+  inline bool has_next();
+  inline Page* next();
+
+ private:
+  PagedSpace* space_;
+  Page* prev_page_;  // Previous page returned.
+  Page* stop_page_;  // Page to stop at (last page returned by the iterator).
+};
+
+
+// -----------------------------------------------------------------------------
+// A space has a list of pages. The next page can be accessed via
+// Page::next_page() call. The next page of the last page is an
+// invalid page pointer. A space can expand and shrink dynamically.
+
+// An abstraction of allocation and relocation pointers in a page-structured
+// space.
+class AllocationInfo {
+ public:
+  Address top;  // current allocation top
+  Address limit;  // current allocation limit
+
+#ifdef DEBUG
+  bool VerifyPagedAllocation() {
+    return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
+        && (top <= limit);
+  }
+#endif
+};
+
+
+// An abstraction of the accounting statistics of a page-structured space.
+// The 'capacity' of a space is the number of object-area bytes (ie, not
+// including page bookkeeping structures) currently in the space. The 'size'
+// of a space is the number of allocated bytes, the 'waste' in the space is
+// the number of bytes that are not allocated and not available to
+// allocation without reorganizing the space via a GC (eg, small blocks due
+// to internal fragmentation, top of page areas in map space), and the bytes
+// 'available' is the number of unallocated bytes that are not waste.  The
+// capacity is the sum of size, waste, and available.
+//
+// The stats are only set by functions that ensure they stay balanced. These
+// functions increase or decrease one of the non-capacity stats in
+// conjunction with capacity, or else they always balance increases and
+// decreases to the non-capacity stats.
+class AllocationStats BASE_EMBEDDED {
+ public:
+  AllocationStats() { Clear(); }
+
+  // Zero out all the allocation statistics (ie, no capacity).
+  void Clear() {
+    capacity_ = 0;
+    available_ = 0;
+    size_ = 0;
+    waste_ = 0;
+  }
+
+  // Reset the allocation statistics (ie, available = capacity with no
+  // wasted or allocated bytes).
+  void Reset() {
+    available_ = capacity_;
+    size_ = 0;
+    waste_ = 0;
+  }
+
+  // Accessors for the allocation statistics.
+  int Capacity() { return capacity_; }
+  int Available() { return available_; }
+  int Size() { return size_; }
+  int Waste() { return waste_; }
+
+  // Grow the space by adding available bytes.
+  void ExpandSpace(int size_in_bytes) {
+    capacity_ += size_in_bytes;
+    available_ += size_in_bytes;
+  }
+
+  // Shrink the space by removing available bytes.
+  void ShrinkSpace(int size_in_bytes) {
+    capacity_ -= size_in_bytes;
+    available_ -= size_in_bytes;
+  }
+
+  // Allocate from available bytes (available -> size).
+  void AllocateBytes(int size_in_bytes) {
+    available_ -= size_in_bytes;
+    size_ += size_in_bytes;
+  }
+
+  // Free allocated bytes, making them available (size -> available).
+  void DeallocateBytes(int size_in_bytes) {
+    size_ -= size_in_bytes;
+    available_ += size_in_bytes;
+  }
+
+  // Waste free bytes (available -> waste).
+  void WasteBytes(int size_in_bytes) {
+    available_ -= size_in_bytes;
+    waste_ += size_in_bytes;
+  }
+
+  // Consider the wasted bytes to be allocated, as they contain filler
+  // objects (waste -> size).
+  void FillWastedBytes(int size_in_bytes) {
+    waste_ -= size_in_bytes;
+    size_ += size_in_bytes;
+  }
+
+ private:
+  int capacity_;
+  int available_;
+  int size_;
+  int waste_;
+};
+
+
+class PagedSpace : public Space {
+ public:
+  // Creates a space with a maximum capacity, and an id.
+  PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
+
+  virtual ~PagedSpace() {}
+
+  // Set up the space using the given address range of virtual memory (from
+  // the memory allocator's initial chunk) if possible.  If the block of
+  // addresses is not big enough to contain a single page-aligned page, a
+  // fresh chunk will be allocated.
+  bool Setup(Address start, size_t size);
+
+  // Returns true if the space has been successfully set up and not
+  // subsequently torn down.
+  bool HasBeenSetup();
+
+  // Cleans up the space, frees all pages in this space except those belonging
+  // to the initial chunk, uncommits addresses in the initial chunk.
+  void TearDown();
+
+  // Checks whether an object/address is in this space.
+  inline bool Contains(Address a);
+  bool Contains(HeapObject* o) { return Contains(o->address()); }
+
+  // Given an address occupied by a live object, return that object if it is
+  // in this space, or Failure::Exception() if it is not. The implementation
+  // iterates over objects in the page containing the address, the cost is
+  // linear in the number of objects in the page. It may be slow.
+  Object* FindObject(Address addr);
+
+  // Checks whether page is currently in use by this space.
+  bool IsUsed(Page* page);
+
+  // Clears remembered sets of pages in this space.
+  void ClearRSet();
+
+  // Prepares for a mark-compact GC.
+  virtual void PrepareForMarkCompact(bool will_compact) = 0;
+
+  virtual Address PageAllocationTop(Page* page) = 0;
+
+  // Current capacity without growing (Size() + Available() + Waste()).
+  int Capacity() { return accounting_stats_.Capacity(); }
+
+  // Available bytes without growing.
+  int Available() { return accounting_stats_.Available(); }
+
+  // Allocated bytes in this space.
+  virtual int Size() { return accounting_stats_.Size(); }
+
+  // Wasted bytes due to fragmentation and not recoverable until the
+  // next GC of this space.
+  int Waste() { return accounting_stats_.Waste(); }
+
+  // Returns the address of the first object in this space.
+  Address bottom() { return first_page_->ObjectAreaStart(); }
+
+  // Returns the allocation pointer in this space.
+  Address top() { return allocation_info_.top; }
+
+  // Allocate the requested number of bytes in the space if possible, return a
+  // failure object if not.
+  inline Object* AllocateRaw(int size_in_bytes);
+
+  // Allocate the requested number of bytes for relocation during mark-compact
+  // collection.
+  inline Object* MCAllocateRaw(int size_in_bytes);
+
+
+  // ---------------------------------------------------------------------------
+  // Mark-compact collection support functions
+
+  // Set the relocation point to the beginning of the space.
+  void MCResetRelocationInfo();
+
+  // Writes relocation info to the top page.
+  void MCWriteRelocationInfoToPage() {
+    TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
+  }
+
+  // Computes the offset of a given address in this space to the beginning
+  // of the space.
+  int MCSpaceOffsetForAddress(Address addr);
+
+  // Updates the allocation pointer to the relocation top after a mark-compact
+  // collection.
+  virtual void MCCommitRelocationInfo() = 0;
+
+  // Releases half of unused pages.
+  void Shrink();
+
+  // Ensures that the capacity is at least 'capacity'. Returns false on failure.
+  bool EnsureCapacity(int capacity);
+
+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect the space by marking it read-only/writable.
+  void Protect();
+  void Unprotect();
+#endif
+
+#ifdef DEBUG
+  // Print meta info and objects in this space.
+  virtual void Print();
+
+  // Verify integrity of this space.
+  virtual void Verify(ObjectVisitor* visitor);
+
+  // Overridden by subclasses to verify space-specific object
+  // properties (e.g., only maps or free-list nodes are in map space).
+  virtual void VerifyObject(HeapObject* obj) {}
+
+  // Report code object related statistics
+  void CollectCodeStatistics();
+  static void ReportCodeStatistics();
+  static void ResetCodeStatistics();
+#endif
+
+ protected:
+  // Maximum capacity of this space.
+  int max_capacity_;
+
+  // Accounting information for this space.
+  AllocationStats accounting_stats_;
+
+  // The first page in this space.
+  Page* first_page_;
+
+  // The last page in this space.  Initially set in Setup, updated in
+  // Expand and Shrink.
+  Page* last_page_;
+
+  // Normal allocation information.
+  AllocationInfo allocation_info_;
+
+  // Relocation information during mark-compact collections.
+  AllocationInfo mc_forwarding_info_;
+
+  // Bytes of each page that cannot be allocated.  Possibly non-zero
+  // for pages in spaces with only fixed-size objects.  Always zero
+  // for pages in spaces with variable sized objects (those pages are
+  // padded with free-list nodes).
+  int page_extra_;
+
+  // Sets allocation pointer to a page bottom.
+  static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
+
+  // Returns the top page specified by an allocation info structure.
+  static Page* TopPageOf(AllocationInfo alloc_info) {
+    return Page::FromAllocationTop(alloc_info.limit);
+  }
+
+  // Expands the space by allocating a fixed number of pages. Returns false if
+  // it cannot allocate requested number of pages from OS. Newly allocated
+  // pages are append to the last_page;
+  bool Expand(Page* last_page);
+
+  // Generic fast case allocation function that tries linear allocation in
+  // the top page of 'alloc_info'.  Returns NULL on failure.
+  inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
+                                      int size_in_bytes);
+
+  // During normal allocation or deserialization, roll to the next page in
+  // the space (there is assumed to be one) and allocate there.  This
+  // function is space-dependent.
+  virtual HeapObject* AllocateInNextPage(Page* current_page,
+                                         int size_in_bytes) = 0;
+
+  // Slow path of AllocateRaw.  This function is space-dependent.
+  virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
+
+  // Slow path of MCAllocateRaw.
+  HeapObject* SlowMCAllocateRaw(int size_in_bytes);
+
+#ifdef DEBUG
+  void DoPrintRSet(const char* space_name);
+#endif
+ private:
+  // Returns the page of the allocation pointer.
+  Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
+
+  // Returns a pointer to the page of the relocation pointer.
+  Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
+
+#ifdef DEBUG
+  // Returns the number of total pages in this space.
+  int CountTotalPages();
+#endif
+
+  friend class PageIterator;
+};
+
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+class NumberAndSizeInfo BASE_EMBEDDED {
+ public:
+  NumberAndSizeInfo() : number_(0), bytes_(0) {}
+
+  int number() const { return number_; }
+  void increment_number(int num) { number_ += num; }
+
+  int bytes() const { return bytes_; }
+  void increment_bytes(int size) { bytes_ += size; }
+
+  void clear() {
+    number_ = 0;
+    bytes_ = 0;
+  }
+
+ private:
+  int number_;
+  int bytes_;
+};
+
+
+// HistogramInfo class for recording a single "bar" of a histogram.  This
+// class is used for collecting statistics to print to stdout (when compiled
+// with DEBUG) or to the log file (when compiled with
+// ENABLE_LOGGING_AND_PROFILING).
+class HistogramInfo: public NumberAndSizeInfo {
+ public:
+  HistogramInfo() : NumberAndSizeInfo() {}
+
+  const char* name() { return name_; }
+  void set_name(const char* name) { name_ = name; }
+
+ private:
+  const char* name_;
+};
+#endif
+
+
+// -----------------------------------------------------------------------------
+// SemiSpace in young generation
+//
+// A semispace is a contiguous chunk of memory. The mark-compact collector
+// uses the memory in the from space as a marking stack when tracing live
+// objects.
+
+class SemiSpace : public Space {
+ public:
+  // Constructor.
+  SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
+    start_ = NULL;
+    age_mark_ = NULL;
+  }
+
+  // Sets up the semispace using the given chunk.
+  bool Setup(Address start, int initial_capacity, int maximum_capacity);
+
+  // Tear down the space.  Heap memory was not allocated by the space, so it
+  // is not deallocated here.
+  void TearDown();
+
+  // True if the space has been set up but not torn down.
+  bool HasBeenSetup() { return start_ != NULL; }
+
+  // Grow the size of the semispace by committing extra virtual memory.
+  // Assumes that the caller has checked that the semispace has not reached
+  // its maximum capacity (and thus there is space available in the reserved
+  // address range to grow).
+  bool Grow();
+
+  // Grow the semispace to the new capacity.  The new capacity
+  // requested must be larger than the current capacity.
+  bool GrowTo(int new_capacity);
+
+  // Shrinks the semispace to the new capacity.  The new capacity
+  // requested must be more than the amount of used memory in the
+  // semispace and less than the current capacity.
+  bool ShrinkTo(int new_capacity);
+
+  // Returns the start address of the space.
+  Address low() { return start_; }
+  // Returns one past the end address of the space.
+  Address high() { return low() + capacity_; }
+
+  // Age mark accessors.
+  Address age_mark() { return age_mark_; }
+  void set_age_mark(Address mark) { age_mark_ = mark; }
+
+  // True if the address is in the address range of this semispace (not
+  // necessarily below the allocation pointer).
+  bool Contains(Address a) {
+    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
+           == reinterpret_cast<uintptr_t>(start_);
+  }
+
+  // True if the object is a heap object in the address range of this
+  // semispace (not necessarily below the allocation pointer).
+  bool Contains(Object* o) {
+    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
+  }
+
+  // The offset of an address from the beginning of the space.
+  int SpaceOffsetForAddress(Address addr) { return addr - low(); }
+
+  // If we don't have this here then SemiSpace will be abstract.  However
+  // it should never be called.
+  virtual int Size() {
+    UNREACHABLE();
+    return 0;
+  }
+
+  bool is_committed() { return committed_; }
+  bool Commit();
+  bool Uncommit();
+
+#ifdef DEBUG
+  virtual void Print();
+  virtual void Verify();
+#endif
+
+  // Returns the current capacity of the semi space.
+  int Capacity() { return capacity_; }
+
+  // Returns the maximum capacity of the semi space.
+  int MaximumCapacity() { return maximum_capacity_; }
+
+  // Returns the initial capacity of the semi space.
+  int InitialCapacity() { return initial_capacity_; }
+
+ private:
+  // The current and maximum capacity of the space.
+  int capacity_;
+  int maximum_capacity_;
+  int initial_capacity_;
+
+  // The start address of the space.
+  Address start_;
+  // Used to govern object promotion during mark-compact collection.
+  Address age_mark_;
+
+  // Masks and comparison values to test for containment in this semispace.
+  uintptr_t address_mask_;
+  uintptr_t object_mask_;
+  uintptr_t object_expected_;
+
+  bool committed_;
+
+ public:
+  TRACK_MEMORY("SemiSpace")
+};
+
+
+// A SemiSpaceIterator is an ObjectIterator that iterates over the active
+// semispace of the heap's new space.  It iterates over the objects in the
+// semispace from a given start address (defaulting to the bottom of the
+// semispace) to the top of the semispace.  New objects allocated after the
+// iterator is created are not iterated.
+class SemiSpaceIterator : public ObjectIterator {
+ public:
+  // Create an iterator over the objects in the given space.  If no start
+  // address is given, the iterator starts from the bottom of the space.  If
+  // no size function is given, the iterator calls Object::Size().
+  explicit SemiSpaceIterator(NewSpace* space);
+  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
+  SemiSpaceIterator(NewSpace* space, Address start);
+
+  bool has_next() {return current_ < limit_; }
+
+  HeapObject* next() {
+    ASSERT(has_next());
+
+    HeapObject* object = HeapObject::FromAddress(current_);
+    int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
+
+    current_ += size;
+    return object;
+  }
+
+  // Implementation of the ObjectIterator functions.
+  virtual bool has_next_object() { return has_next(); }
+  virtual HeapObject* next_object() { return next(); }
+
+ private:
+  void Initialize(NewSpace* space, Address start, Address end,
+                  HeapObjectCallback size_func);
+
+  // The semispace.
+  SemiSpace* space_;
+  // The current iteration point.
+  Address current_;
+  // The end of iteration.
+  Address limit_;
+  // The callback function.
+  HeapObjectCallback size_func_;
+};
+
+
+// -----------------------------------------------------------------------------
+// The young generation space.
+//
+// The new space consists of a contiguous pair of semispaces.  It simply
+// forwards most functions to the appropriate semispace.
+
+class NewSpace : public Space {
+ public:
+  // Constructor.
+  NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
+
+  // Sets up the new space using the given chunk.
+  bool Setup(Address start, int size);
+
+  // Tears down the space.  Heap memory was not allocated by the space, so it
+  // is not deallocated here.
+  void TearDown();
+
+  // True if the space has been set up but not torn down.
+  bool HasBeenSetup() {
+    return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
+  }
+
+  // Flip the pair of spaces.
+  void Flip();
+
+  // Grow the capacity of the semispaces.  Assumes that they are not at
+  // their maximum capacity.
+  void Grow();
+
+  // Shrink the capacity of the semispaces.
+  void Shrink();
+
+  // True if the address or object lies in the address range of either
+  // semispace (not necessarily below the allocation pointer).
+  bool Contains(Address a) {
+    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
+        == reinterpret_cast<uintptr_t>(start_);
+  }
+  bool Contains(Object* o) {
+    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
+  }
+
+  // Return the allocated bytes in the active semispace.
+  virtual int Size() { return top() - bottom(); }
+  // Return the current capacity of a semispace.
+  int Capacity() {
+    ASSERT(to_space_.Capacity() == from_space_.Capacity());
+    return to_space_.Capacity();
+  }
+  // Return the available bytes without growing in the active semispace.
+  int Available() { return Capacity() - Size(); }
+
+  // Return the maximum capacity of a semispace.
+  int MaximumCapacity() {
+    ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
+    return to_space_.MaximumCapacity();
+  }
+
+  // Returns the initial capacity of a semispace.
+  int InitialCapacity() {
+    ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
+    return to_space_.InitialCapacity();
+  }
+
+  // Return the address of the allocation pointer in the active semispace.
+  Address top() { return allocation_info_.top; }
+  // Return the address of the first object in the active semispace.
+  Address bottom() { return to_space_.low(); }
+
+  // Get the age mark of the inactive semispace.
+  Address age_mark() { return from_space_.age_mark(); }
+  // Set the age mark in the active semispace.
+  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
+
+  // The start address of the space and a bit mask. Anding an address in the
+  // new space with the mask will result in the start address.
+  Address start() { return start_; }
+  uintptr_t mask() { return address_mask_; }
+
+  // The allocation top and limit addresses.
+  Address* allocation_top_address() { return &allocation_info_.top; }
+  Address* allocation_limit_address() { return &allocation_info_.limit; }
+
+  Object* AllocateRaw(int size_in_bytes) {
+    return AllocateRawInternal(size_in_bytes, &allocation_info_);
+  }
+
+  // Allocate the requested number of bytes for relocation during mark-compact
+  // collection.
+  Object* MCAllocateRaw(int size_in_bytes) {
+    return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
+  }
+
+  // Reset the allocation pointer to the beginning of the active semispace.
+  void ResetAllocationInfo();
+  // Reset the reloction pointer to the bottom of the inactive semispace in
+  // preparation for mark-compact collection.
+  void MCResetRelocationInfo();
+  // Update the allocation pointer in the active semispace after a
+  // mark-compact collection.
+  void MCCommitRelocationInfo();
+
+  // Get the extent of the inactive semispace (for use as a marking stack).
+  Address FromSpaceLow() { return from_space_.low(); }
+  Address FromSpaceHigh() { return from_space_.high(); }
+
+  // Get the extent of the active semispace (to sweep newly copied objects
+  // during a scavenge collection).
+  Address ToSpaceLow() { return to_space_.low(); }
+  Address ToSpaceHigh() { return to_space_.high(); }
+
+  // Offsets from the beginning of the semispaces.
+  int ToSpaceOffsetForAddress(Address a) {
+    return to_space_.SpaceOffsetForAddress(a);
+  }
+  int FromSpaceOffsetForAddress(Address a) {
+    return from_space_.SpaceOffsetForAddress(a);
+  }
+
+  // True if the object is a heap object in the address range of the
+  // respective semispace (not necessarily below the allocation pointer of the
+  // semispace).
+  bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+  bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+
+  bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
+  bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
+
+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect the space by marking it read-only/writable.
+  virtual void Protect();
+  virtual void Unprotect();
+#endif
+
+#ifdef DEBUG
+  // Verify the active semispace.
+  virtual void Verify();
+  // Print the active semispace.
+  virtual void Print() { to_space_.Print(); }
+#endif
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+  // Iterates the active semispace to collect statistics.
+  void CollectStatistics();
+  // Reports previously collected statistics of the active semispace.
+  void ReportStatistics();
+  // Clears previously collected statistics.
+  void ClearHistograms();
+
+  // Record the allocation or promotion of a heap object.  Note that we don't
+  // record every single allocation, but only those that happen in the
+  // to space during a scavenge GC.
+  void RecordAllocation(HeapObject* obj);
+  void RecordPromotion(HeapObject* obj);
+#endif
+
+  // Return whether the operation succeded.
+  bool CommitFromSpaceIfNeeded() {
+    if (from_space_.is_committed()) return true;
+    return from_space_.Commit();
+  }
+
+  bool UncommitFromSpace() {
+    if (!from_space_.is_committed()) return true;
+    return from_space_.Uncommit();
+  }
+
+ private:
+  // The semispaces.
+  SemiSpace to_space_;
+  SemiSpace from_space_;
+
+  // Start address and bit mask for containment testing.
+  Address start_;
+  uintptr_t address_mask_;
+  uintptr_t object_mask_;
+  uintptr_t object_expected_;
+
+  // Allocation pointer and limit for normal allocation and allocation during
+  // mark-compact collection.
+  AllocationInfo allocation_info_;
+  AllocationInfo mc_forwarding_info_;
+
+#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+  HistogramInfo* allocated_histogram_;
+  HistogramInfo* promoted_histogram_;
+#endif
+
+  // Implementation of AllocateRaw and MCAllocateRaw.
+  inline Object* AllocateRawInternal(int size_in_bytes,
+                                     AllocationInfo* alloc_info);
+
+  friend class SemiSpaceIterator;
+
+ public:
+  TRACK_MEMORY("NewSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap.  They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object).  They have a size and a next pointer.  The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode: public HeapObject {
+ public:
+  // Obtain a free-list node from a raw address.  This is not a cast because
+  // it does not check nor require that the first word at the address is a map
+  // pointer.
+  static FreeListNode* FromAddress(Address address) {
+    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+  }
+
+  // Set the size in bytes, which can be read with HeapObject::Size().  This
+  // function also writes a map to the first word of the block so that it
+  // looks like a heap object to the garbage collector and heap iteration
+  // functions.
+  void set_size(int size_in_bytes);
+
+  // Accessors for the next field.
+  inline Address next();
+  inline void set_next(Address next);
+
+ private:
+  static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
+
+// The free list for the old space.
+class OldSpaceFreeList BASE_EMBEDDED {
+ public:
+  explicit OldSpaceFreeList(AllocationSpace owner);
+
+  // Clear the free list.
+  void Reset();
+
+  // Return the number of bytes available on the free list.
+  int available() { return available_; }
+
+  // Place a node on the free list.  The block of size 'size_in_bytes'
+  // starting at 'start' is placed on the free list.  The return value is the
+  // number of bytes that have been lost due to internal fragmentation by
+  // freeing the block.  Bookkeeping information will be written to the block,
+  // ie, its contents will be destroyed.  The start address should be word
+  // aligned, and the size should be a non-zero multiple of the word size.
+  int Free(Address start, int size_in_bytes);
+
+  // Allocate a block of size 'size_in_bytes' from the free list.  The block
+  // is unitialized.  A failure is returned if no block is available.  The
+  // number of bytes lost to fragmentation is returned in the output parameter
+  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
+  Object* Allocate(int size_in_bytes, int* wasted_bytes);
+
+ private:
+  // The size range of blocks, in bytes. (Smaller allocations are allowed, but
+  // will always result in waste.)
+  static const int kMinBlockSize = 2 * kPointerSize;
+  static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+
+  // The identity of the owning space, for building allocation Failure
+  // objects.
+  AllocationSpace owner_;
+
+  // Total available bytes in all blocks on this free list.
+  int available_;
+
+  // Blocks are put on exact free lists in an array, indexed by size in words.
+  // The available sizes are kept in an increasingly ordered list. Entries
+  // corresponding to sizes < kMinBlockSize always have an empty free list
+  // (but index kHead is used for the head of the size list).
+  struct SizeNode {
+    // Address of the head FreeListNode of the implied block size or NULL.
+    Address head_node_;
+    // Size (words) of the next larger available size if head_node_ != NULL.
+    int next_size_;
+  };
+  static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
+  SizeNode free_[kFreeListsLength];
+
+  // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
+  static const int kHead = kMinBlockSize / kPointerSize - 1;
+  static const int kEnd = kMaxInt;
+
+  // We keep a "finger" in the size list to speed up a common pattern:
+  // repeated requests for the same or increasing sizes.
+  int finger_;
+
+  // Starting from *prev, find and return the smallest size >= index (words),
+  // or kEnd. Update *prev to be the largest size < index, or kHead.
+  int FindSize(int index, int* prev) {
+    int cur = free_[*prev].next_size_;
+    while (cur < index) {
+      *prev = cur;
+      cur = free_[cur].next_size_;
+    }
+    return cur;
+  }
+
+  // Remove an existing element from the size list.
+  void RemoveSize(int index) {
+    int prev = kHead;
+    int cur = FindSize(index, &prev);
+    ASSERT(cur == index);
+    free_[prev].next_size_ = free_[cur].next_size_;
+    finger_ = prev;
+  }
+
+  // Insert a new element into the size list.
+  void InsertSize(int index) {
+    int prev = kHead;
+    int cur = FindSize(index, &prev);
+    ASSERT(cur != index);
+    free_[prev].next_size_ = index;
+    free_[index].next_size_ = cur;
+  }
+
+  // The size list is not updated during a sequence of calls to Free, but is
+  // rebuilt before the next allocation.
+  void RebuildSizeList();
+  bool needs_rebuild_;
+
+#ifdef DEBUG
+  // Does this free list contain a free block located at the address of 'node'?
+  bool Contains(FreeListNode* node);
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
+};
+
+
+// The free list for the map space.
+class FixedSizeFreeList BASE_EMBEDDED {
+ public:
+  FixedSizeFreeList(AllocationSpace owner, int object_size);
+
+  // Clear the free list.
+  void Reset();
+
+  // Return the number of bytes available on the free list.
+  int available() { return available_; }
+
+  // Place a node on the free list.  The block starting at 'start' (assumed to
+  // have size object_size_) is placed on the free list.  Bookkeeping
+  // information will be written to the block, ie, its contents will be
+  // destroyed.  The start address should be word aligned.
+  void Free(Address start);
+
+  // Allocate a fixed sized block from the free list.  The block is unitialized.
+  // A failure is returned if no block is available.
+  Object* Allocate();
+
+ private:
+  // Available bytes on the free list.
+  int available_;
+
+  // The head of the free list.
+  Address head_;
+
+  // The identity of the owning space, for building allocation Failure
+  // objects.
+  AllocationSpace owner_;
+
+  // The size of the objects in this space.
+  int object_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
+};
+
+
+// -----------------------------------------------------------------------------
+// Old object space (excluding map objects)
+
+class OldSpace : public PagedSpace {
+ public:
+  // Creates an old space object with a given maximum capacity.
+  // The constructor does not allocate pages from OS.
+  explicit OldSpace(int max_capacity,
+                    AllocationSpace id,
+                    Executability executable)
+      : PagedSpace(max_capacity, id, executable), free_list_(id) {
+    page_extra_ = 0;
+  }
+
+  // The bytes available on the free list (ie, not above the linear allocation
+  // pointer).
+  int AvailableFree() { return free_list_.available(); }
+
+  // The top of allocation in a page in this space. Undefined if page is unused.
+  virtual Address PageAllocationTop(Page* page) {
+    return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
+  }
+
+  // Give a block of memory to the space's free list.  It might be added to
+  // the free list or accounted as waste.
+  void Free(Address start, int size_in_bytes) {
+    int wasted_bytes = free_list_.Free(start, size_in_bytes);
+    accounting_stats_.DeallocateBytes(size_in_bytes);
+    accounting_stats_.WasteBytes(wasted_bytes);
+  }
+
+  // Prepare for full garbage collection.  Resets the relocation pointer and
+  // clears the free list.
+  virtual void PrepareForMarkCompact(bool will_compact);
+
+  // Updates the allocation pointer to the relocation top after a mark-compact
+  // collection.
+  virtual void MCCommitRelocationInfo();
+
+#ifdef DEBUG
+  // Reports statistics for the space
+  void ReportStatistics();
+  // Dump the remembered sets in the space to stdout.
+  void PrintRSet();
+#endif
+
+ protected:
+  // Virtual function in the superclass.  Slow path of AllocateRaw.
+  HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+  // Virtual function in the superclass.  Allocate linearly at the start of
+  // the page after current_page (there is assumed to be one).
+  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+
+ private:
+  // The space's free list.
+  OldSpaceFreeList free_list_;
+
+ public:
+  TRACK_MEMORY("OldSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Old space for objects of a fixed size
+
+class FixedSpace : public PagedSpace {
+ public:
+  FixedSpace(int max_capacity,
+             AllocationSpace id,
+             int object_size_in_bytes,
+             const char* name)
+      : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
+        object_size_in_bytes_(object_size_in_bytes),
+        name_(name),
+        free_list_(id, object_size_in_bytes) {
+    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
+  }
+
+  // The top of allocation in a page in this space. Undefined if page is unused.
+  virtual Address PageAllocationTop(Page* page) {
+    return page == TopPageOf(allocation_info_) ? top()
+        : page->ObjectAreaEnd() - page_extra_;
+  }
+
+  int object_size_in_bytes() { return object_size_in_bytes_; }
+
+  // Give a fixed sized block of memory to the space's free list.
+  void Free(Address start) {
+    free_list_.Free(start);
+    accounting_stats_.DeallocateBytes(object_size_in_bytes_);
+  }
+
+  // Prepares for a mark-compact GC.
+  virtual void PrepareForMarkCompact(bool will_compact);
+
+  // Updates the allocation pointer to the relocation top after a mark-compact
+  // collection.
+  virtual void MCCommitRelocationInfo();
+
+#ifdef DEBUG
+  // Reports statistic info of the space
+  void ReportStatistics();
+
+  // Dump the remembered sets in the space to stdout.
+  void PrintRSet();
+#endif
+
+ protected:
+  // Virtual function in the superclass.  Slow path of AllocateRaw.
+  HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+  // Virtual function in the superclass.  Allocate linearly at the start of
+  // the page after current_page (there is assumed to be one).
+  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+
+ private:
+  // The size of objects in this space.
+  int object_size_in_bytes_;
+
+  // The name of this space.
+  const char* name_;
+
+  // The space's free list.
+  FixedSizeFreeList free_list_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Old space for all map objects
+
+class MapSpace : public FixedSpace {
+ public:
+  // Creates a map space object with a maximum capacity.
+  MapSpace(int max_capacity, AllocationSpace id)
+      : FixedSpace(max_capacity, id, Map::kSize, "map") {}
+
+  // Prepares for a mark-compact GC.
+  virtual void PrepareForMarkCompact(bool will_compact);
+
+  // Given an index, returns the page address.
+  Address PageAddress(int page_index) { return page_addresses_[page_index]; }
+
+  // Constants.
+  static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
+
+ protected:
+#ifdef DEBUG
+  virtual void VerifyObject(HeapObject* obj);
+#endif
+
+ private:
+  // An array of page start address in a map space.
+  Address page_addresses_[kMaxMapPageIndex + 1];
+
+ public:
+  TRACK_MEMORY("MapSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Old space for all global object property cell objects
+
+class CellSpace : public FixedSpace {
+ public:
+  // Creates a property cell space object with a maximum capacity.
+  CellSpace(int max_capacity, AllocationSpace id)
+      : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
+
+ protected:
+#ifdef DEBUG
+  virtual void VerifyObject(HeapObject* obj);
+#endif
+
+ public:
+  TRACK_MEMORY("CellSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
+// the large object space. A large object is allocated from OS heap with
+// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
+// A large object always starts at Page::kObjectStartOffset to a page.
+// Large objects do not move during garbage collections.
+
+// A LargeObjectChunk holds exactly one large object page with exactly one
+// large object.
+class LargeObjectChunk {
+ public:
+  // Allocates a new LargeObjectChunk that contains a large object page
+  // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
+  // object and possibly extra remembered set words) bytes after the object
+  // area start of that page. The allocated chunk size is set in the output
+  // parameter chunk_size.
+  static LargeObjectChunk* New(int size_in_bytes,
+                               size_t* chunk_size,
+                               Executability executable);
+
+  // Interpret a raw address as a large object chunk.
+  static LargeObjectChunk* FromAddress(Address address) {
+    return reinterpret_cast<LargeObjectChunk*>(address);
+  }
+
+  // Returns the address of this chunk.
+  Address address() { return reinterpret_cast<Address>(this); }
+
+  // Accessors for the fields of the chunk.
+  LargeObjectChunk* next() { return next_; }
+  void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
+
+  size_t size() { return size_; }
+  void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
+
+  // Returns the object in this chunk.
+  inline HeapObject* GetObject();
+
+  // Given a requested size (including any extra remembered set words),
+  // returns the physical size of a chunk to be allocated.
+  static int ChunkSizeFor(int size_in_bytes);
+
+  // Given a chunk size, returns the object size it can accommodate (not
+  // including any extra remembered set words).  Used by
+  // LargeObjectSpace::Available.  Note that this can overestimate the size
+  // of object that will fit in a chunk---if the object requires extra
+  // remembered set words (eg, for large fixed arrays), the actual object
+  // size for the chunk will be smaller than reported by this function.
+  static int ObjectSizeFor(int chunk_size) {
+    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
+    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
+  }
+
+ private:
+  // A pointer to the next large object chunk in the space or NULL.
+  LargeObjectChunk* next_;
+
+  // The size of this chunk.
+  size_t size_;
+
+ public:
+  TRACK_MEMORY("LargeObjectChunk")
+};
+
+
+class LargeObjectSpace : public Space {
+ public:
+  explicit LargeObjectSpace(AllocationSpace id);
+  virtual ~LargeObjectSpace() {}
+
+  // Initializes internal data structures.
+  bool Setup();
+
+  // Releases internal resources, frees objects in this space.
+  void TearDown();
+
+  // Allocates a (non-FixedArray, non-Code) large object.
+  Object* AllocateRaw(int size_in_bytes);
+  // Allocates a large Code object.
+  Object* AllocateRawCode(int size_in_bytes);
+  // Allocates a large FixedArray.
+  Object* AllocateRawFixedArray(int size_in_bytes);
+
+  // Available bytes for objects in this space, not including any extra
+  // remembered set words.
+  int Available() {
+    return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
+  }
+
+  virtual int Size() {
+    return size_;
+  }
+
+  int PageCount() {
+    return page_count_;
+  }
+
+  // Finds an object for a given address, returns Failure::Exception()
+  // if it is not found. The function iterates through all objects in this
+  // space, may be slow.
+  Object* FindObject(Address a);
+
+  // Clears remembered sets.
+  void ClearRSet();
+
+  // Iterates objects whose remembered set bits are set.
+  void IterateRSet(ObjectSlotCallback func);
+
+  // Frees unmarked objects.
+  void FreeUnmarkedObjects();
+
+  // Checks whether a heap object is in this space; O(1).
+  bool Contains(HeapObject* obj);
+
+  // Checks whether the space is empty.
+  bool IsEmpty() { return first_chunk_ == NULL; }
+
+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect the space by marking it read-only/writable.
+  void Protect();
+  void Unprotect();
+#endif
+
+#ifdef DEBUG
+  virtual void Verify();
+  virtual void Print();
+  void ReportStatistics();
+  void CollectCodeStatistics();
+  // Dump the remembered sets in the space to stdout.
+  void PrintRSet();
+#endif
+  // Checks whether an address is in the object area in this space.  It
+  // iterates all objects in the space. May be slow.
+  bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
+
+ private:
+  // The head of the linked list of large object chunks.
+  LargeObjectChunk* first_chunk_;
+  int size_;  // allocated bytes
+  int page_count_;  // number of chunks
+
+
+  // Shared implementation of AllocateRaw, AllocateRawCode and
+  // AllocateRawFixedArray.
+  Object* AllocateRawInternal(int requested_size,
+                              int object_size,
+                              Executability executable);
+
+  // Returns the number of extra bytes (rounded up to the nearest full word)
+  // required for extra_object_bytes of extra pointers (in bytes).
+  static inline int ExtraRSetBytesFor(int extra_object_bytes);
+
+  friend class LargeObjectIterator;
+
+ public:
+  TRACK_MEMORY("LargeObjectSpace")
+};
+
+
+class LargeObjectIterator: public ObjectIterator {
+ public:
+  explicit LargeObjectIterator(LargeObjectSpace* space);
+  LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
+
+  bool has_next() { return current_ != NULL; }
+  HeapObject* next();
+
+  // implementation of ObjectIterator.
+  virtual bool has_next_object() { return has_next(); }
+  virtual HeapObject* next_object() { return next(); }
+
+ private:
+  LargeObjectChunk* current_;
+  HeapObjectCallback size_func_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_SPACES_H_
diff --git a/src/string-stream.cc b/src/string-stream.cc
new file mode 100644
index 0000000..8c62a45
--- /dev/null
+++ b/src/string-stream.cc
@@ -0,0 +1,584 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "factory.h"
+#include "string-stream.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kMentionedObjectCacheMaxSize = 256;
+static List<HeapObject*, PreallocatedStorage>* debug_object_cache = NULL;
+static Object* current_security_token = NULL;
+
+
+char* HeapStringAllocator::allocate(unsigned bytes) {
+  space_ = NewArray<char>(bytes);
+  return space_;
+}
+
+
+NoAllocationStringAllocator::NoAllocationStringAllocator(char* memory,
+                                                         unsigned size) {
+  size_ = size;
+  space_ = memory;
+}
+
+
+bool StringStream::Put(char c) {
+  if (full()) return false;
+  ASSERT(length_ < capacity_);
+  // Since the trailing '\0' is not accounted for in length_ fullness is
+  // indicated by a difference of 1 between length_ and capacity_. Thus when
+  // reaching a difference of 2 we need to grow the buffer.
+  if (length_ == capacity_ - 2) {
+    unsigned new_capacity = capacity_;
+    char* new_buffer = allocator_->grow(&new_capacity);
+    if (new_capacity > capacity_) {
+      capacity_ = new_capacity;
+      buffer_ = new_buffer;
+    } else {
+      // Reached the end of the available buffer.
+      ASSERT(capacity_ >= 5);
+      length_ = capacity_ - 1;  // Indicate fullness of the stream.
+      buffer_[length_ - 4] = '.';
+      buffer_[length_ - 3] = '.';
+      buffer_[length_ - 2] = '.';
+      buffer_[length_ - 1] = '\n';
+      buffer_[length_] = '\0';
+      return false;
+    }
+  }
+  buffer_[length_] = c;
+  buffer_[length_ + 1] = '\0';
+  length_++;
+  return true;
+}
+
+
+// A control character is one that configures a format element.  For
+// instance, in %.5s, .5 are control characters.
+static bool IsControlChar(char c) {
+  switch (c) {
+  case '0': case '1': case '2': case '3': case '4': case '5':
+  case '6': case '7': case '8': case '9': case '.': case '-':
+    return true;
+  default:
+    return false;
+  }
+}
+
+
+void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
+  // If we already ran out of space then return immediately.
+  if (full()) return;
+  int offset = 0;
+  int elm = 0;
+  while (offset < format.length()) {
+    if (format[offset] != '%' || elm == elms.length()) {
+      Put(format[offset]);
+      offset++;
+      continue;
+    }
+    // Read this formatting directive into a temporary buffer
+    EmbeddedVector<char, 24> temp;
+    int format_length = 0;
+    // Skip over the whole control character sequence until the
+    // format element type
+    temp[format_length++] = format[offset++];
+    while (offset < format.length() && IsControlChar(format[offset]))
+      temp[format_length++] = format[offset++];
+    if (offset >= format.length())
+      return;
+    char type = format[offset];
+    temp[format_length++] = type;
+    temp[format_length] = '\0';
+    offset++;
+    FmtElm current = elms[elm++];
+    switch (type) {
+    case 's': {
+      ASSERT_EQ(FmtElm::C_STR, current.type_);
+      const char* value = current.data_.u_c_str_;
+      Add(value);
+      break;
+    }
+    case 'w': {
+      ASSERT_EQ(FmtElm::LC_STR, current.type_);
+      Vector<const uc16> value = *current.data_.u_lc_str_;
+      for (int i = 0; i < value.length(); i++)
+        Put(static_cast<char>(value[i]));
+      break;
+    }
+    case 'o': {
+      ASSERT_EQ(FmtElm::OBJ, current.type_);
+      Object* obj = current.data_.u_obj_;
+      PrintObject(obj);
+      break;
+    }
+    case 'k': {
+      ASSERT_EQ(FmtElm::INT, current.type_);
+      int value = current.data_.u_int_;
+      if (0x20 <= value && value <= 0x7F) {
+        Put(value);
+      } else if (value <= 0xff) {
+        Add("\\x%02x", value);
+      } else {
+        Add("\\u%04x", value);
+      }
+      break;
+    }
+    case 'i': case 'd': case 'u': case 'x': case 'c': case 'X': {
+      int value = current.data_.u_int_;
+      EmbeddedVector<char, 24> formatted;
+      int length = OS::SNPrintF(formatted, temp.start(), value);
+      Add(Vector<const char>(formatted.start(), length));
+      break;
+    }
+    case 'f': case 'g': case 'G': case 'e': case 'E': {
+      double value = current.data_.u_double_;
+      EmbeddedVector<char, 28> formatted;
+      OS::SNPrintF(formatted, temp.start(), value);
+      Add(formatted.start());
+      break;
+    }
+    case 'p': {
+      void* value = current.data_.u_pointer_;
+      EmbeddedVector<char, 20> formatted;
+      OS::SNPrintF(formatted, temp.start(), value);
+      Add(formatted.start());
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+    }
+  }
+
+  // Verify that the buffer is 0-terminated
+  ASSERT(buffer_[length_] == '\0');
+}
+
+
+void StringStream::PrintObject(Object* o) {
+  o->ShortPrint(this);
+  if (o->IsString()) {
+    if (String::cast(o)->length() <= String::kMaxMediumStringSize) {
+      return;
+    }
+  } else if (o->IsNumber() || o->IsOddball()) {
+    return;
+  }
+  if (o->IsHeapObject()) {
+    for (int i = 0; i < debug_object_cache->length(); i++) {
+      if ((*debug_object_cache)[i] == o) {
+        Add("#%d#", i);
+        return;
+      }
+    }
+    if (debug_object_cache->length() < kMentionedObjectCacheMaxSize) {
+      Add("#%d#", debug_object_cache->length());
+      debug_object_cache->Add(HeapObject::cast(o));
+    } else {
+      Add("@%p", o);
+    }
+  }
+}
+
+
+void StringStream::Add(const char* format) {
+  Add(CStrVector(format));
+}
+
+
+void StringStream::Add(Vector<const char> format) {
+  Add(format, Vector<FmtElm>::empty());
+}
+
+
+void StringStream::Add(const char* format, FmtElm arg0) {
+  const char argc = 1;
+  FmtElm argv[argc] = { arg0 };
+  Add(CStrVector(format), Vector<FmtElm>(argv, argc));
+}
+
+
+void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1) {
+  const char argc = 2;
+  FmtElm argv[argc] = { arg0, arg1 };
+  Add(CStrVector(format), Vector<FmtElm>(argv, argc));
+}
+
+
+void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
+                       FmtElm arg2) {
+  const char argc = 3;
+  FmtElm argv[argc] = { arg0, arg1, arg2 };
+  Add(CStrVector(format), Vector<FmtElm>(argv, argc));
+}
+
+
+void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
+                       FmtElm arg2, FmtElm arg3) {
+  const char argc = 4;
+  FmtElm argv[argc] = { arg0, arg1, arg2, arg3 };
+  Add(CStrVector(format), Vector<FmtElm>(argv, argc));
+}
+
+
+SmartPointer<const char> StringStream::ToCString() const {
+  char* str = NewArray<char>(length_ + 1);
+  memcpy(str, buffer_, length_);
+  str[length_] = '\0';
+  return SmartPointer<const char>(str);
+}
+
+
+void StringStream::Log() {
+  LOG(StringEvent("StackDump", buffer_));
+}
+
+
+void StringStream::OutputToStdOut() {
+  // Dump the output to stdout, but make sure to break it up into
+  // manageable chunks to avoid losing parts of the output in the OS
+  // printing code. This is a problem on Windows in particular; see
+  // the VPrint() function implementations in platform-win32.cc.
+  unsigned position = 0;
+  for (unsigned next; (next = position + 2048) < length_; position = next) {
+    char save = buffer_[next];
+    buffer_[next] = '\0';
+    internal::PrintF("%s", &buffer_[position]);
+    buffer_[next] = save;
+  }
+  internal::PrintF("%s", &buffer_[position]);
+}
+
+
+Handle<String> StringStream::ToString() {
+  return Factory::NewStringFromUtf8(Vector<const char>(buffer_, length_));
+}
+
+
+void StringStream::ClearMentionedObjectCache() {
+  current_security_token = NULL;
+  if (debug_object_cache == NULL) {
+    debug_object_cache = new List<HeapObject*, PreallocatedStorage>(0);
+  }
+  debug_object_cache->Clear();
+}
+
+
+#ifdef DEBUG
+bool StringStream::IsMentionedObjectCacheClear() {
+  return (debug_object_cache->length() == 0);
+}
+#endif
+
+
+bool StringStream::Put(String* str) {
+  return Put(str, 0, str->length());
+}
+
+
+bool StringStream::Put(String* str, int start, int end) {
+  StringInputBuffer name_buffer(str);
+  name_buffer.Seek(start);
+  for (int i = start; i < end && name_buffer.has_more(); i++) {
+    int c = name_buffer.GetNext();
+    if (c >= 127 || c < 32) {
+      c = '?';
+    }
+    if (!Put(c)) {
+      return false;  // Output was truncated.
+    }
+  }
+  return true;
+}
+
+
+void StringStream::PrintName(Object* name) {
+  if (name->IsString()) {
+    String* str = String::cast(name);
+    if (str->length() > 0) {
+      Put(str);
+    } else {
+      Add("/* anonymous */");
+    }
+  } else {
+    Add("%o", name);
+  }
+}
+
+
+void StringStream::PrintUsingMap(JSObject* js_object) {
+  Map* map = js_object->map();
+  if (!Heap::Contains(map) ||
+      !map->IsHeapObject() ||
+      !map->IsMap()) {
+    Add("<Invalid map>\n");
+    return;
+  }
+  DescriptorArray* descs = map->instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    switch (descs->GetType(i)) {
+      case FIELD: {
+        Object* key = descs->GetKey(i);
+        if (key->IsString() || key->IsNumber()) {
+          int len = 3;
+          if (key->IsString()) {
+            len = String::cast(key)->length();
+          }
+          for (; len < 18; len++)
+            Put(' ');
+          if (key->IsString()) {
+            Put(String::cast(key));
+          } else {
+            key->ShortPrint();
+          }
+          Add(": ");
+          Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
+          Add("%o\n", value);
+        }
+      }
+      break;
+      default:
+      break;
+    }
+  }
+}
+
+
+void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
+  for (unsigned int i = 0; i < 10 && i < limit; i++) {
+    Object* element = array->get(i);
+    if (element != Heap::the_hole_value()) {
+      for (int len = 1; len < 18; len++)
+        Put(' ');
+      Add("%d: %o\n", i, array->get(i));
+    }
+  }
+  if (limit >= 10) {
+    Add("                  ...\n");
+  }
+}
+
+
+void StringStream::PrintByteArray(ByteArray* byte_array) {
+  unsigned int limit = byte_array->length();
+  for (unsigned int i = 0; i < 10 && i < limit; i++) {
+    byte b = byte_array->get(i);
+    Add("             %d: %3d 0x%02x", i, b, b);
+    if (b >= ' ' && b <= '~') {
+      Add(" '%c'", b);
+    } else if (b == '\n') {
+      Add(" '\n'");
+    } else if (b == '\r') {
+      Add(" '\r'");
+    } else if (b >= 1 && b <= 26) {
+      Add(" ^%c", b + 'A' - 1);
+    }
+    Add("\n");
+  }
+  if (limit >= 10) {
+    Add("                  ...\n");
+  }
+}
+
+
+void StringStream::PrintMentionedObjectCache() {
+  Add("==== Key         ============================================\n\n");
+  for (int i = 0; i < debug_object_cache->length(); i++) {
+    HeapObject* printee = (*debug_object_cache)[i];
+    Add(" #%d# %p: ", i, printee);
+    printee->ShortPrint(this);
+    Add("\n");
+    if (printee->IsJSObject()) {
+      if (printee->IsJSValue()) {
+        Add("           value(): %o\n", JSValue::cast(printee)->value());
+      }
+      PrintUsingMap(JSObject::cast(printee));
+      if (printee->IsJSArray()) {
+        JSArray* array = JSArray::cast(printee);
+        if (array->HasFastElements()) {
+          unsigned int limit = FixedArray::cast(array->elements())->length();
+          unsigned int length =
+            static_cast<uint32_t>(JSArray::cast(array)->length()->Number());
+          if (length < limit) limit = length;
+          PrintFixedArray(FixedArray::cast(array->elements()), limit);
+        }
+      }
+    } else if (printee->IsByteArray()) {
+      PrintByteArray(ByteArray::cast(printee));
+    } else if (printee->IsFixedArray()) {
+      unsigned int limit = FixedArray::cast(printee)->length();
+      PrintFixedArray(FixedArray::cast(printee), limit);
+    }
+  }
+}
+
+
+void StringStream::PrintSecurityTokenIfChanged(Object* f) {
+  if (!f->IsHeapObject() || !Heap::Contains(HeapObject::cast(f))) {
+    return;
+  }
+  Map* map = HeapObject::cast(f)->map();
+  if (!map->IsHeapObject() ||
+      !Heap::Contains(map) ||
+      !map->IsMap() ||
+      !f->IsJSFunction()) {
+    return;
+  }
+
+  JSFunction* fun = JSFunction::cast(f);
+  Object* perhaps_context = fun->unchecked_context();
+  if (perhaps_context->IsHeapObject() &&
+      Heap::Contains(HeapObject::cast(perhaps_context)) &&
+      perhaps_context->IsContext()) {
+    Context* context = fun->context();
+    if (!Heap::Contains(context)) {
+      Add("(Function context is outside heap)\n");
+      return;
+    }
+    Object* token = context->global_context()->security_token();
+    if (token != current_security_token) {
+      Add("Security context: %o\n", token);
+      current_security_token = token;
+    }
+  } else {
+    Add("(Function context is corrupt)\n");
+  }
+}
+
+
+void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
+  if (f->IsHeapObject() &&
+      Heap::Contains(HeapObject::cast(f)) &&
+      Heap::Contains(HeapObject::cast(f)->map()) &&
+      HeapObject::cast(f)->map()->IsMap()) {
+    if (f->IsJSFunction()) {
+      JSFunction* fun = JSFunction::cast(f);
+      // Common case: on-stack function present and resolved.
+      PrintPrototype(fun, receiver);
+      *code = fun->code();
+    } else if (f->IsSymbol()) {
+      // Unresolved and megamorphic calls: Instead of the function
+      // we have the function name on the stack.
+      PrintName(f);
+      Add("/* unresolved */ ");
+    } else {
+      // Unless this is the frame of a built-in function, we should always have
+      // the callee function or name on the stack. If we don't, we have a
+      // problem or a change of the stack frame layout.
+      Add("%o", f);
+      Add("/* warning: no JSFunction object or function name found */ ");
+    }
+    /* } else if (is_trampoline()) {
+       Print("trampoline ");
+    */
+  } else {
+    if (!f->IsHeapObject()) {
+      Add("/* warning: 'function' was not a heap object */ ");
+      return;
+    }
+    if (!Heap::Contains(HeapObject::cast(f))) {
+      Add("/* warning: 'function' was not on the heap */ ");
+      return;
+    }
+    if (!Heap::Contains(HeapObject::cast(f)->map())) {
+      Add("/* warning: function's map was not on the heap */ ");
+      return;
+    }
+    if (!HeapObject::cast(f)->map()->IsMap()) {
+      Add("/* warning: function's map was not a valid map */ ");
+      return;
+    }
+    Add("/* warning: Invalid JSFunction object found */ ");
+  }
+}
+
+
+void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
+  Object* name = fun->shared()->name();
+  bool print_name = false;
+  for (Object* p = receiver; p != Heap::null_value(); p = p->GetPrototype()) {
+    if (p->IsJSObject()) {
+      Object* key = JSObject::cast(p)->SlowReverseLookup(fun);
+      if (key != Heap::undefined_value()) {
+        if (!name->IsString() ||
+            !key->IsString() ||
+            !String::cast(name)->Equals(String::cast(key))) {
+          print_name = true;
+        }
+        if (name->IsString() && String::cast(name)->length() == 0) {
+          print_name = false;
+        }
+        name = key;
+      }
+    } else {
+      print_name = true;
+    }
+  }
+  PrintName(name);
+  // Also known as - if the name in the function doesn't match the name under
+  // which it was looked up.
+  if (print_name) {
+    Add("(aka ");
+    PrintName(fun->shared()->name());
+    Put(')');
+  }
+}
+
+
+char* HeapStringAllocator::grow(unsigned* bytes) {
+  unsigned new_bytes = *bytes * 2;
+  // Check for overflow.
+  if (new_bytes <= *bytes) {
+    return space_;
+  }
+  char* new_space = NewArray<char>(new_bytes);
+  if (new_space == NULL) {
+    return space_;
+  }
+  memcpy(new_space, space_, *bytes);
+  *bytes = new_bytes;
+  DeleteArray(space_);
+  space_ = new_space;
+  return new_space;
+}
+
+
+// Only grow once to the maximum allowable size.
+char* NoAllocationStringAllocator::grow(unsigned* bytes) {
+  ASSERT(size_ >= *bytes);
+  *bytes = size_;
+  return space_;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/string-stream.h b/src/string-stream.h
new file mode 100644
index 0000000..323a6d6
--- /dev/null
+++ b/src/string-stream.h
@@ -0,0 +1,189 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STRING_STREAM_H_
+#define V8_STRING_STREAM_H_
+
+namespace v8 {
+namespace internal {
+
+
+class StringAllocator {
+ public:
+  virtual ~StringAllocator() {}
+  // Allocate a number of bytes.
+  virtual char* allocate(unsigned bytes) = 0;
+  // Allocate a larger number of bytes and copy the old buffer to the new one.
+  // bytes is an input and output parameter passing the old size of the buffer
+  // and returning the new size.  If allocation fails then we return the old
+  // buffer and do not increase the size.
+  virtual char* grow(unsigned* bytes) = 0;
+};
+
+
+// Normal allocator uses new[] and delete[].
+class HeapStringAllocator: public StringAllocator {
+ public:
+  ~HeapStringAllocator() { DeleteArray(space_); }
+  char* allocate(unsigned bytes);
+  char* grow(unsigned* bytes);
+ private:
+  char* space_;
+};
+
+
+// Allocator for use when no new c++ heap allocation is allowed.
+// Given a preallocated buffer up front and does no allocation while
+// building message.
+class NoAllocationStringAllocator: public StringAllocator {
+ public:
+  NoAllocationStringAllocator(char* memory, unsigned size);
+  char* allocate(unsigned bytes) { return space_; }
+  char* grow(unsigned* bytes);
+ private:
+  unsigned size_;
+  char* space_;
+};
+
+
+class FmtElm {
+ public:
+  FmtElm(int value) : type_(INT) {  // NOLINT
+    data_.u_int_ = value;
+  }
+  explicit FmtElm(double value) : type_(DOUBLE) {
+    data_.u_double_ = value;
+  }
+  FmtElm(const char* value) : type_(C_STR) {  // NOLINT
+    data_.u_c_str_ = value;
+  }
+  FmtElm(const Vector<const uc16>& value) : type_(LC_STR) {  // NOLINT
+    data_.u_lc_str_ = &value;
+  }
+  FmtElm(Object* value) : type_(OBJ) {  // NOLINT
+    data_.u_obj_ = value;
+  }
+  FmtElm(Handle<Object> value) : type_(HANDLE) {  // NOLINT
+    data_.u_handle_ = value.location();
+  }
+  FmtElm(void* value) : type_(POINTER) {  // NOLINT
+    data_.u_pointer_ = value;
+  }
+ private:
+  friend class StringStream;
+  enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE, POINTER };
+  Type type_;
+  union {
+    int u_int_;
+    double u_double_;
+    const char* u_c_str_;
+    const Vector<const uc16>* u_lc_str_;
+    Object* u_obj_;
+    Object** u_handle_;
+    void* u_pointer_;
+  } data_;
+};
+
+
+class StringStream {
+ public:
+  explicit StringStream(StringAllocator* allocator):
+    allocator_(allocator),
+    capacity_(kInitialCapacity),
+    length_(0),
+    buffer_(allocator_->allocate(kInitialCapacity)) {
+    buffer_[0] = 0;
+  }
+
+  ~StringStream() {
+  }
+
+  bool Put(char c);
+  bool Put(String* str);
+  bool Put(String* str, int start, int end);
+  void Add(Vector<const char> format, Vector<FmtElm> elms);
+  void Add(const char* format);
+  void Add(Vector<const char> format);
+  void Add(const char* format, FmtElm arg0);
+  void Add(const char* format, FmtElm arg0, FmtElm arg1);
+  void Add(const char* format, FmtElm arg0, FmtElm arg1, FmtElm arg2);
+  void Add(const char* format,
+           FmtElm arg0,
+           FmtElm arg1,
+           FmtElm arg2,
+           FmtElm arg3);
+
+  // Getting the message out.
+  void OutputToStdOut();
+  void Log();
+  Handle<String> ToString();
+  SmartPointer<const char> ToCString() const;
+
+  // Object printing support.
+  void PrintName(Object* o);
+  void PrintFixedArray(FixedArray* array, unsigned int limit);
+  void PrintByteArray(ByteArray* ba);
+  void PrintUsingMap(JSObject* js_object);
+  void PrintPrototype(JSFunction* fun, Object* receiver);
+  void PrintSecurityTokenIfChanged(Object* function);
+  // NOTE: Returns the code in the output parameter.
+  void PrintFunction(Object* function, Object* receiver, Code** code);
+
+  // Reset the stream.
+  void Reset() {
+    length_ = 0;
+    buffer_[0] = 0;
+  }
+
+  // Mentioned object cache support.
+  void PrintMentionedObjectCache();
+  static void ClearMentionedObjectCache();
+#ifdef DEBUG
+  static bool IsMentionedObjectCacheClear();
+#endif
+
+
+  static const int kInitialCapacity = 16;
+
+ private:
+  void PrintObject(Object* obj);
+
+  StringAllocator* allocator_;
+  unsigned capacity_;
+  unsigned length_;  // does not include terminating 0-character
+  char* buffer_;
+
+  bool full() const { return (capacity_ - length_) == 1; }
+  int space() const { return capacity_ - length_; }
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringStream);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_STRING_STREAM_H_
diff --git a/src/string.js b/src/string.js
new file mode 100644
index 0000000..fbdc307
--- /dev/null
+++ b/src/string.js
@@ -0,0 +1,876 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// const $String = global.String;
+// const $NaN = 0/0;
+
+
+// Set the String function and constructor.
+%SetCode($String, function(x) {
+  var value = %_ArgumentsLength() == 0 ? '' : ToString(x);
+  if (%_IsConstructCall()) {
+    %_SetValueOf(this, value);
+  } else {
+    return value;
+  }
+});
+
+%FunctionSetPrototype($String, new $String());
+
+// ECMA-262 section 15.5.4.2
+function StringToString() {
+  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
+    throw new $TypeError('String.prototype.toString is not generic');
+  return %_ValueOf(this);
+}
+
+
+// ECMA-262 section 15.5.4.3
+function StringValueOf() {
+  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
+    throw new $TypeError('String.prototype.valueOf is not generic');
+  return %_ValueOf(this);
+}
+
+
+// ECMA-262, section 15.5.4.4
+function StringCharAt(pos) {
+  var char_code = %_FastCharCodeAt(this, pos);
+  if (!%_IsSmi(char_code)) {
+    var subject = ToString(this);
+    var index = TO_INTEGER(pos);
+    if (index >= subject.length || index < 0) return "";
+    char_code = %StringCharCodeAt(subject, index);
+  }
+  return %CharFromCode(char_code);
+}
+
+
+// ECMA-262 section 15.5.4.5
+function StringCharCodeAt(pos) {
+  var fast_answer = %_FastCharCodeAt(this, pos);
+  if (%_IsSmi(fast_answer)) {
+    return fast_answer;
+  }
+  var subject = ToString(this);
+  var index = TO_INTEGER(pos);
+  return %StringCharCodeAt(subject, index);
+}
+
+
+// ECMA-262, section 15.5.4.6
+function StringConcat() {
+  var len = %_ArgumentsLength();
+  var parts = new $Array(len + 1);
+  parts[0] = ToString(this);
+  for (var i = 0; i < len; i++)
+    parts[i + 1] = ToString(%_Arguments(i));
+  return parts.join('');
+}
+
+// Match ES3 and Safari
+%FunctionSetLength(StringConcat, 1);
+
+
+// ECMA-262 section 15.5.4.7
+function StringIndexOf(searchString /* position */) {  // length == 1
+  var subject_str = ToString(this);
+  var pattern_str = ToString(searchString);
+  var subject_str_len = subject_str.length;
+  var pattern_str_len = pattern_str.length;
+  var index = 0;
+  if (%_ArgumentsLength() > 1) {
+    var arg1 = %_Arguments(1);  // position
+    index = TO_INTEGER(arg1);
+  }
+  if (index < 0) index = 0;
+  if (index > subject_str_len) index = subject_str_len;
+  if (pattern_str_len + index > subject_str_len) return -1;
+  return %StringIndexOf(subject_str, pattern_str, index);
+}
+
+
+// ECMA-262 section 15.5.4.8
+function StringLastIndexOf(searchString /* position */) {  // length == 1
+  var sub = ToString(this);
+  var subLength = sub.length;
+  var pat = ToString(searchString);
+  var patLength = pat.length;
+  var index = subLength - patLength;
+  if (%_ArgumentsLength() > 1) {
+    var position = ToNumber(%_Arguments(1));
+    if (!$isNaN(position)) {
+      position = TO_INTEGER(position);
+      if (position < 0) {
+        position = 0;
+      }
+      if (position + patLength < subLength) {
+        index = position
+      }
+    }
+  }
+  if (index < 0) {
+    return -1;
+  }
+  return %StringLastIndexOf(sub, pat, index);
+}
+
+
+// ECMA-262 section 15.5.4.9
+//
+// This function is implementation specific.  For now, we do not
+// do anything locale specific.
+function StringLocaleCompare(other) {
+  if (%_ArgumentsLength() === 0) return 0;
+
+  var this_str = ToString(this);
+  var other_str = ToString(other);
+  return %StringLocaleCompare(this_str, other_str);
+}
+
+
+// ECMA-262 section 15.5.4.10
+function StringMatch(regexp) {
+  if (!IS_REGEXP(regexp)) regexp = new ORIGINAL_REGEXP(regexp);
+  var subject = ToString(this);
+
+  if (!regexp.global) return regexp.exec(subject);
+  %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
+  // lastMatchInfo is defined in regexp-delay.js.
+  return %StringMatch(subject, regexp, lastMatchInfo);
+}
+
+
+// SubString is an internal function that returns the sub string of 'string'.
+// If resulting string is of length 1, we use the one character cache
+// otherwise we call the runtime system.
+function SubString(string, start, end) {
+  // Use the one character string cache.
+  if (start + 1 == end) {
+    var char_code = %_FastCharCodeAt(string, start);
+    if (!%_IsSmi(char_code)) {
+      char_code = %StringCharCodeAt(string, start);
+    }
+    return %CharFromCode(char_code);
+  }
+  return %StringSlice(string, start, end);
+}
+
+
+// This has the same size as the lastMatchInfo array, and can be used for
+// functions that expect that structure to be returned.  It is used when the
+// needle is a string rather than a regexp.  In this case we can't update
+// lastMatchArray without erroneously affecting the properties on the global
+// RegExp object.
+var reusableMatchInfo = [2, "", "", -1, -1];
+
+
+// ECMA-262, section 15.5.4.11
+function StringReplace(search, replace) {
+  var subject = ToString(this);
+
+  // Delegate to one of the regular expression variants if necessary.
+  if (IS_REGEXP(search)) {
+    %_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
+    if (IS_FUNCTION(replace)) {
+      return StringReplaceRegExpWithFunction(subject, search, replace);
+    } else {
+      return StringReplaceRegExp(subject, search, replace);
+    }
+  }
+
+  // Convert the search argument to a string and search for it.
+  search = ToString(search);
+  var start = %StringIndexOf(subject, search, 0);
+  if (start < 0) return subject;
+  var end = start + search.length;
+
+  var builder = new ReplaceResultBuilder(subject);
+  // prefix
+  builder.addSpecialSlice(0, start);
+
+  // Compute the string to replace with.
+  if (IS_FUNCTION(replace)) {
+    builder.add(replace.call(null, search, start, subject));
+  } else {
+    reusableMatchInfo[CAPTURE0] = start;
+    reusableMatchInfo[CAPTURE1] = end;
+    ExpandReplacement(ToString(replace), subject, reusableMatchInfo, builder);
+  }
+
+  // suffix
+  builder.addSpecialSlice(end, subject.length);
+
+  return builder.generate();
+}
+
+
+// Helper function for regular expressions in String.prototype.replace.
+function StringReplaceRegExp(subject, regexp, replace) {
+  replace = ToString(replace);
+  return %StringReplaceRegExpWithString(subject,
+                                        regexp,
+                                        replace,
+                                        lastMatchInfo);
+};
+
+
+// Expand the $-expressions in the string and return a new string with
+// the result.
+function ExpandReplacement(string, subject, matchInfo, builder) {
+  var next = %StringIndexOf(string, '$', 0);
+  if (next < 0) {
+    builder.add(string);
+    return;
+  }
+
+  // Compute the number of captures; see ECMA-262, 15.5.4.11, p. 102.
+  var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;  // Includes the match.
+
+  if (next > 0) builder.add(SubString(string, 0, next));
+  var length = string.length;
+
+  while (true) {
+    var expansion = '$';
+    var position = next + 1;
+    if (position < length) {
+      var peek = %_FastCharCodeAt(string, position);
+      if (!%_IsSmi(peek)) {
+        peek = %StringCharCodeAt(string, position);
+      }
+      if (peek == 36) {         // $$
+        ++position;
+        builder.add('$');
+      } else if (peek == 38) {  // $& - match
+        ++position;
+        builder.addSpecialSlice(matchInfo[CAPTURE0],
+                                matchInfo[CAPTURE1]);
+      } else if (peek == 96) {  // $` - prefix
+        ++position;
+        builder.addSpecialSlice(0, matchInfo[CAPTURE0]);
+      } else if (peek == 39) {  // $' - suffix
+        ++position;
+        builder.addSpecialSlice(matchInfo[CAPTURE1], subject.length);
+      } else if (peek >= 48 && peek <= 57) {  // $n, 0 <= n <= 9
+        ++position;
+        var n = peek - 48;
+        if (position < length) {
+          peek = %_FastCharCodeAt(string, position);
+          if (!%_IsSmi(peek)) {
+            peek = %StringCharCodeAt(string, position);
+          }
+          // $nn, 01 <= nn <= 99
+          if (n != 0 && peek == 48 || peek >= 49 && peek <= 57) {
+            var nn = n * 10 + (peek - 48);
+            if (nn < m) {
+              // If the two digit capture reference is within range of
+              // the captures, we use it instead of the single digit
+              // one. Otherwise, we fall back to using the single
+              // digit reference. This matches the behavior of
+              // SpiderMonkey.
+              ++position;
+              n = nn;
+            }
+          }
+        }
+        if (0 < n && n < m) {
+          addCaptureString(builder, matchInfo, n);
+        } else {
+          // Because of the captures range check in the parsing of two
+          // digit capture references, we can only enter here when a
+          // single digit capture reference is outside the range of
+          // captures.
+          builder.add('$');
+          --position;
+        }
+      } else {
+        builder.add('$');
+      }
+    } else {
+      builder.add('$');
+    }
+
+    // Go the the next $ in the string.
+    next = %StringIndexOf(string, '$', position);
+
+    // Return if there are no more $ characters in the string. If we
+    // haven't reached the end, we need to append the suffix.
+    if (next < 0) {
+      if (position < length) {
+        builder.add(SubString(string, position, length));
+      }
+      return;
+    }
+
+    // Append substring between the previous and the next $ character.
+    builder.add(SubString(string, position, next));
+  }
+};
+
+
+// Compute the string of a given regular expression capture.
+function CaptureString(string, lastCaptureInfo, index) {
+  // Scale the index.
+  var scaled = index << 1;
+  // Compute start and end.
+  var start = lastCaptureInfo[CAPTURE(scaled)];
+  var end = lastCaptureInfo[CAPTURE(scaled + 1)];
+  // If either start or end is missing return undefined.
+  if (start < 0 || end < 0) return;
+  return SubString(string, start, end);
+};
+
+
+// Add the string of a given regular expression capture to the
+// ReplaceResultBuilder
+function addCaptureString(builder, matchInfo, index) {
+  // Scale the index.
+  var scaled = index << 1;
+  // Compute start and end.
+  var start = matchInfo[CAPTURE(scaled)];
+  var end = matchInfo[CAPTURE(scaled + 1)];
+  // If either start or end is missing return.
+  if (start < 0 || end <= start) return;
+  builder.addSpecialSlice(start, end);
+};
+
+
+// Helper function for replacing regular expressions with the result of a
+// function application in String.prototype.replace.  The function application
+// must be interleaved with the regexp matching (contrary to ECMA-262
+// 15.5.4.11) to mimic SpiderMonkey and KJS behavior when the function uses
+// the static properties of the RegExp constructor.  Example:
+//     'abcd'.replace(/(.)/g, function() { return RegExp.$1; }
+// should be 'abcd' and not 'dddd' (or anything else).
+function StringReplaceRegExpWithFunction(subject, regexp, replace) {
+  var matchInfo = DoRegExpExec(regexp, subject, 0);
+  if (IS_NULL(matchInfo)) return subject;
+
+  var result = new ReplaceResultBuilder(subject);
+  // There's at least one match.  If the regexp is global, we have to loop
+  // over all matches.  The loop is not in C++ code here like the one in
+  // RegExp.prototype.exec, because of the interleaved function application.
+  // Unfortunately, that means this code is nearly duplicated, here and in
+  // jsregexp.cc.
+  if (regexp.global) {
+    var previous = 0;
+    do {
+      result.addSpecialSlice(previous, matchInfo[CAPTURE0]);
+      var startOfMatch = matchInfo[CAPTURE0];
+      previous = matchInfo[CAPTURE1];
+      result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+      // Can't use matchInfo any more from here, since the function could
+      // overwrite it.
+      // Continue with the next match.
+      // Increment previous if we matched an empty string, as per ECMA-262
+      // 15.5.4.10.
+      if (previous == startOfMatch) {
+        // Add the skipped character to the output, if any.
+        if (previous < subject.length) {
+          result.addSpecialSlice(previous, previous + 1);
+        }
+        previous++;
+      }
+
+      // Per ECMA-262 15.10.6.2, if the previous index is greater than the
+      // string length, there is no match
+      matchInfo = (previous > subject.length)
+          ? null
+          : DoRegExpExec(regexp, subject, previous);
+    } while (!IS_NULL(matchInfo));
+
+    // Tack on the final right substring after the last match, if necessary.
+    if (previous < subject.length) {
+      result.addSpecialSlice(previous, subject.length);
+    }
+  } else { // Not a global regexp, no need to loop.
+    result.addSpecialSlice(0, matchInfo[CAPTURE0]);
+    var endOfMatch = matchInfo[CAPTURE1];
+    result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+    // Can't use matchInfo any more from here, since the function could
+    // overwrite it.
+    result.addSpecialSlice(endOfMatch, subject.length);
+  }
+
+  return result.generate();
+}
+
+
+// Helper function to apply a string replacement function once.
+function ApplyReplacementFunction(replace, matchInfo, subject) {
+  // Compute the parameter list consisting of the match, captures, index,
+  // and subject for the replace function invocation.
+  var index = matchInfo[CAPTURE0];
+  // The number of captures plus one for the match.
+  var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
+  if (m == 1) {
+    var s = CaptureString(subject, matchInfo, 0);
+    // Don't call directly to avoid exposing the built-in global object.
+    return replace.call(null, s, index, subject);
+  }
+  var parameters = $Array(m + 2);
+  for (var j = 0; j < m; j++) {
+    parameters[j] = CaptureString(subject, matchInfo, j);
+  }
+  parameters[j] = index;
+  parameters[j + 1] = subject;
+  return replace.apply(null, parameters);
+}
+
+
+// ECMA-262 section 15.5.4.12
+function StringSearch(re) {
+  var regexp = new ORIGINAL_REGEXP(re);
+  var s = ToString(this);
+  var last_idx = regexp.lastIndex; // keep old lastIndex
+  regexp.lastIndex = 0;            // ignore re.global property
+  var result = regexp.exec(s);
+  regexp.lastIndex = last_idx;     // restore lastIndex
+  if (result == null)
+    return -1;
+  else
+    return result.index;
+}
+
+
+// ECMA-262 section 15.5.4.13
+function StringSlice(start, end) {
+  var s = ToString(this);
+  var s_len = s.length;
+  var start_i = TO_INTEGER(start);
+  var end_i = s_len;
+  if (end !== void 0)
+    end_i = TO_INTEGER(end);
+
+  if (start_i < 0) {
+    start_i += s_len;
+    if (start_i < 0)
+      start_i = 0;
+  } else {
+    if (start_i > s_len)
+      start_i = s_len;
+  }
+
+  if (end_i < 0) {
+    end_i += s_len;
+    if (end_i < 0)
+      end_i = 0;
+  } else {
+    if (end_i > s_len)
+      end_i = s_len;
+  }
+
+  var num_c = end_i - start_i;
+  if (num_c < 0)
+    num_c = 0;
+
+  return SubString(s, start_i, start_i + num_c);
+}
+
+
+// ECMA-262 section 15.5.4.14
+function StringSplit(separator, limit) {
+  var subject = ToString(this);
+  limit = (limit === void 0) ? 0xffffffff : ToUint32(limit);
+  if (limit === 0) return [];
+
+  // ECMA-262 says that if separator is undefined, the result should
+  // be an array of size 1 containing the entire string.  SpiderMonkey
+  // and KJS have this behaviour only when no separator is given.  If
+  // undefined is explicitly given, they convert it to a string and
+  // use that.  We do as SpiderMonkey and KJS.
+  if (%_ArgumentsLength() === 0) {
+    return [subject];
+  }
+
+  var length = subject.length;
+  if (IS_REGEXP(separator)) {
+    %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
+  } else {
+    separator = ToString(separator);
+    // If the separator string is empty then return the elements in the subject.
+    if (separator.length == 0) {
+      var result = $Array(length);
+      for (var i = 0; i < length; i++) result[i] = subject[i];
+      return result;
+    }
+  }
+
+  if (length === 0) {
+    if (splitMatch(separator, subject, 0, 0) != null) return [];
+    return [subject];
+  }
+
+  var currentIndex = 0;
+  var startIndex = 0;
+  var result = [];
+
+  while (true) {
+
+    if (startIndex === length) {
+      result[result.length] = subject.slice(currentIndex, length);
+      return result;
+    }
+
+    var matchInfo = splitMatch(separator, subject, currentIndex, startIndex);
+
+    if (IS_NULL(matchInfo)) {
+      result[result.length] = subject.slice(currentIndex, length);
+      return result;
+    }
+
+    var endIndex = matchInfo[CAPTURE1];
+
+    // We ignore a zero-length match at the currentIndex.
+    if (startIndex === endIndex && endIndex === currentIndex) {
+      startIndex++;
+      continue;
+    }
+
+    result[result.length] = SubString(subject, currentIndex, matchInfo[CAPTURE0]);
+    if (result.length === limit) return result;
+
+    for (var i = 2; i < NUMBER_OF_CAPTURES(matchInfo); i += 2) {
+      var start = matchInfo[CAPTURE(i)];
+      var end = matchInfo[CAPTURE(i + 1)];
+      if (start != -1 && end != -1) {
+        result[result.length] = SubString(subject, start, end);
+      } else {
+        result[result.length] = void 0;
+      }
+      if (result.length === limit) return result;
+    }
+
+    startIndex = currentIndex = endIndex;
+  }
+}
+
+
+// ECMA-262 section 15.5.4.14
+// Helper function used by split.  This version returns the matchInfo
+// instead of allocating a new array with basically the same information.
+function splitMatch(separator, subject, current_index, start_index) {
+  if (IS_REGEXP(separator)) {
+    var matchInfo = DoRegExpExec(separator, subject, start_index);
+    if (matchInfo == null) return null;
+    // Section 15.5.4.14 paragraph two says that we do not allow zero length
+    // matches at the end of the string.
+    if (matchInfo[CAPTURE0] === subject.length) return null;
+    return matchInfo;
+  }
+
+  var separatorIndex = subject.indexOf(separator, start_index);
+  if (separatorIndex === -1) return null;
+
+  reusableMatchInfo[CAPTURE0] = separatorIndex;
+  reusableMatchInfo[CAPTURE1] = separatorIndex + separator.length;
+  return reusableMatchInfo;
+};
+
+
+// ECMA-262 section 15.5.4.15
+function StringSubstring(start, end) {
+  var s = ToString(this);
+  var s_len = s.length;
+  var start_i = TO_INTEGER(start);
+  var end_i = s_len;
+  if (!IS_UNDEFINED(end))
+    end_i = TO_INTEGER(end);
+
+  if (start_i < 0) start_i = 0;
+  if (start_i > s_len) start_i = s_len;
+  if (end_i < 0) end_i = 0;
+  if (end_i > s_len) end_i = s_len;
+
+  if (start_i > end_i) {
+    var tmp = end_i;
+    end_i = start_i;
+    start_i = tmp;
+  }
+
+  return SubString(s, start_i, end_i);
+}
+
+
+// This is not a part of ECMA-262.
+function StringSubstr(start, n) {
+  var s = ToString(this);
+  var len;
+
+  // Correct n: If not given, set to string length; if explicitly
+  // set to undefined, zero, or negative, returns empty string.
+  if (n === void 0) {
+    len = s.length;
+  } else {
+    len = TO_INTEGER(n);
+    if (len <= 0) return '';
+  }
+
+  // Correct start: If not given (or undefined), set to zero; otherwise
+  // convert to integer and handle negative case.
+  if (start === void 0) {
+    start = 0;
+  } else {
+    start = TO_INTEGER(start);
+    // If positive, and greater than or equal to the string length,
+    // return empty string.
+    if (start >= s.length) return '';
+    // If negative and absolute value is larger than the string length,
+    // use zero.
+    if (start < 0) {
+      start += s.length;
+      if (start < 0) start = 0;
+    }
+  }
+
+  var end = start + len;
+  if (end > s.length) end = s.length;
+
+  return SubString(s, start, end);
+}
+
+
+// ECMA-262, 15.5.4.16
+function StringToLowerCase() {
+  return %StringToLowerCase(ToString(this));
+}
+
+
+// ECMA-262, 15.5.4.17
+function StringToLocaleLowerCase() {
+  return %StringToLowerCase(ToString(this));
+}
+
+
+// ECMA-262, 15.5.4.18
+function StringToUpperCase() {
+  return %StringToUpperCase(ToString(this));
+}
+
+
+// ECMA-262, 15.5.4.19
+function StringToLocaleUpperCase() {
+  return %StringToUpperCase(ToString(this));
+}
+
+
+// ECMA-262, section 15.5.3.2
+function StringFromCharCode(code) {
+  var n = %_ArgumentsLength();
+  if (n == 1) return %CharFromCode(ToNumber(code) & 0xffff)
+
+  // NOTE: This is not super-efficient, but it is necessary because we
+  // want to avoid converting to numbers from within the virtual
+  // machine. Maybe we can find another way of doing this?
+  var codes = new $Array(n);
+  for (var i = 0; i < n; i++) codes[i] = ToNumber(%_Arguments(i));
+  return %StringFromCharCodeArray(codes);
+}
+
+
+// Helper function for very basic XSS protection.
+function HtmlEscape(str) {
+  return ToString(str).replace(/</g, "&lt;")
+                      .replace(/>/g, "&gt;")
+                      .replace(/"/g, "&quot;")
+                      .replace(/'/g, "&#039;");
+};
+
+
+// Compatibility support for KJS.
+// Tested by mozilla/js/tests/js1_5/Regress/regress-276103.js.
+function StringLink(s) {
+  return "<a href=\"" + HtmlEscape(s) + "\">" + this + "</a>";
+}
+
+
+function StringAnchor(name) {
+  return "<a name=\"" + HtmlEscape(name) + "\">" + this + "</a>";
+}
+
+
+function StringFontcolor(color) {
+  return "<font color=\"" + HtmlEscape(color) + "\">" + this + "</font>";
+}
+
+
+function StringFontsize(size) {
+  return "<font size=\"" + HtmlEscape(size) + "\">" + this + "</font>";
+}
+
+
+function StringBig() {
+  return "<big>" + this + "</big>";
+}
+
+
+function StringBlink() {
+  return "<blink>" + this + "</blink>";
+}
+
+
+function StringBold() {
+  return "<b>" + this + "</b>";
+}
+
+
+function StringFixed() {
+  return "<tt>" + this + "</tt>";
+}
+
+
+function StringItalics() {
+  return "<i>" + this + "</i>";
+}
+
+
+function StringSmall() {
+  return "<small>" + this + "</small>";
+}
+
+
+function StringStrike() {
+  return "<strike>" + this + "</strike>";
+}
+
+
+function StringSub() {
+  return "<sub>" + this + "</sub>";
+}
+
+
+function StringSup() {
+  return "<sup>" + this + "</sup>";
+}
+
+
+// StringBuilder support.
+
+function StringBuilder() {
+  this.elements = new $Array();
+}
+
+
+function ReplaceResultBuilder(str) {
+  this.elements = new $Array();
+  this.special_string = str;
+}
+
+
+ReplaceResultBuilder.prototype.add =
+StringBuilder.prototype.add = function(str) {
+  if (!IS_STRING(str)) str = ToString(str);
+  if (str.length > 0) {
+    var elements = this.elements;
+    elements[elements.length] = str;
+  }
+}
+
+
+ReplaceResultBuilder.prototype.addSpecialSlice = function(start, end) {
+  var len = end - start;
+  if (len == 0) return;
+  var elements = this.elements;
+  if (start >= 0 && len >= 0 && start < 0x80000 && len < 0x800) {
+    elements[elements.length] = (start << 11) + len;
+  } else {
+    elements[elements.length] = SubString(this.special_string, start, end);
+  }
+}
+
+
+StringBuilder.prototype.generate = function() {
+  return %StringBuilderConcat(this.elements, "");
+}
+
+
+ReplaceResultBuilder.prototype.generate = function() {
+  return %StringBuilderConcat(this.elements, this.special_string);
+}
+
+
+function StringToJSON(key) {
+  return CheckJSONPrimitive(this.valueOf());
+}
+
+
+// -------------------------------------------------------------------
+
+function SetupString() {
+  // Setup the constructor property on the String prototype object.
+  %SetProperty($String.prototype, "constructor", $String, DONT_ENUM);
+
+
+  // Setup the non-enumerable functions on the String object.
+  InstallFunctions($String, DONT_ENUM, $Array(
+    "fromCharCode", StringFromCharCode
+  ));
+
+
+  // Setup the non-enumerable functions on the String prototype object.
+  InstallFunctionsOnHiddenPrototype($String.prototype, DONT_ENUM, $Array(
+    "valueOf", StringValueOf,
+    "toString", StringToString,
+    "charAt", StringCharAt,
+    "charCodeAt", StringCharCodeAt,
+    "concat", StringConcat,
+    "indexOf", StringIndexOf,
+    "lastIndexOf", StringLastIndexOf,
+    "localeCompare", StringLocaleCompare,
+    "match", StringMatch,
+    "replace", StringReplace,
+    "search", StringSearch,
+    "slice", StringSlice,
+    "split", StringSplit,
+    "substring", StringSubstring,
+    "substr", StringSubstr,
+    "toLowerCase", StringToLowerCase,
+    "toLocaleLowerCase", StringToLocaleLowerCase,
+    "toUpperCase", StringToUpperCase,
+    "toLocaleUpperCase", StringToLocaleUpperCase,
+    "link", StringLink,
+    "anchor", StringAnchor,
+    "fontcolor", StringFontcolor,
+    "fontsize", StringFontsize,
+    "big", StringBig,
+    "blink", StringBlink,
+    "bold", StringBold,
+    "fixed", StringFixed,
+    "italics", StringItalics,
+    "small", StringSmall,
+    "strike", StringStrike,
+    "sub", StringSub,
+    "sup", StringSup,
+    "toJSON", StringToJSON
+  ));
+}
+
+
+SetupString();
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
new file mode 100644
index 0000000..e10dc61
--- /dev/null
+++ b/src/stub-cache.cc
@@ -0,0 +1,1097 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "arguments.h"
+#include "ic-inl.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------
+// StubCache implementation.
+
+
+StubCache::Entry StubCache::primary_[StubCache::kPrimaryTableSize];
+StubCache::Entry StubCache::secondary_[StubCache::kSecondaryTableSize];
+
+void StubCache::Initialize(bool create_heap_objects) {
+  ASSERT(IsPowerOf2(kPrimaryTableSize));
+  ASSERT(IsPowerOf2(kSecondaryTableSize));
+  if (create_heap_objects) {
+    HandleScope scope;
+    Clear();
+  }
+}
+
+
+Code* StubCache::Set(String* name, Map* map, Code* code) {
+  // Get the flags from the code.
+  Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
+
+  // Validate that the name does not move on scavenge, and that we
+  // can use identity checks instead of string equality checks.
+  ASSERT(!Heap::InNewSpace(name));
+  ASSERT(name->IsSymbol());
+
+  // The state bits are not important to the hash function because
+  // the stub cache only contains monomorphic stubs. Make sure that
+  // the bits are the least significant so they will be the ones
+  // masked out.
+  ASSERT(Code::ExtractICStateFromFlags(flags) == MONOMORPHIC);
+  ASSERT(Code::kFlagsICStateShift == 0);
+
+  // Make sure that the code type is not included in the hash.
+  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Compute the primary entry.
+  int primary_offset = PrimaryOffset(name, flags, map);
+  Entry* primary = entry(primary_, primary_offset);
+  Code* hit = primary->value;
+
+  // If the primary entry has useful data in it, we retire it to the
+  // secondary cache before overwriting it.
+  if (hit != Builtins::builtin(Builtins::Illegal)) {
+    Code::Flags primary_flags = Code::RemoveTypeFromFlags(hit->flags());
+    int secondary_offset =
+        SecondaryOffset(primary->key, primary_flags, primary_offset);
+    Entry* secondary = entry(secondary_, secondary_offset);
+    *secondary = *primary;
+  }
+
+  // Update primary cache.
+  primary->key = name;
+  primary->value = code;
+  return code;
+}
+
+
+Object* StubCache::ComputeLoadField(String* name,
+                                    JSObject* receiver,
+                                    JSObject* holder,
+                                    int field_index) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    code = compiler.CompileLoadField(receiver, holder, field_index, name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return code;
+  }
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeLoadCallback(String* name,
+                                       JSObject* receiver,
+                                       JSObject* holder,
+                                       AccessorInfo* callback) {
+  ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    code = compiler.CompileLoadCallback(receiver, holder, callback, name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return code;
+  }
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeLoadConstant(String* name,
+                                       JSObject* receiver,
+                                       JSObject* holder,
+                                       Object* value) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    code = compiler.CompileLoadConstant(receiver, holder, value, name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return code;
+  }
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeLoadInterceptor(String* name,
+                                          JSObject* receiver,
+                                          JSObject* holder) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    code = compiler.CompileLoadInterceptor(receiver, holder, name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return code;
+  }
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) {
+  Code* code = Builtins::builtin(Builtins::LoadIC_Normal);
+  return Set(name, receiver->map(), code);
+}
+
+
+Object* StubCache::ComputeLoadGlobal(String* name,
+                                     JSObject* receiver,
+                                     GlobalObject* holder,
+                                     JSGlobalPropertyCell* cell,
+                                     bool is_dont_delete) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    code = compiler.CompileLoadGlobal(receiver,
+                                      holder,
+                                      cell,
+                                      name,
+                                      is_dont_delete);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return code;
+  }
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeKeyedLoadField(String* name,
+                                         JSObject* receiver,
+                                         JSObject* holder,
+                                         int field_index) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    code = compiler.CompileLoadField(name, receiver, holder, field_index);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadConstant(String* name,
+                                            JSObject* receiver,
+                                            JSObject* holder,
+                                            Object* value) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    code = compiler.CompileLoadConstant(name, receiver, holder, value);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadInterceptor(String* name,
+                                               JSObject* receiver,
+                                               JSObject* holder) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    code = compiler.CompileLoadInterceptor(receiver, holder, name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadCallback(String* name,
+                                            JSObject* receiver,
+                                            JSObject* holder,
+                                            AccessorInfo* callback) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    code = compiler.CompileLoadCallback(name, receiver, holder, callback);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return code;
+}
+
+
+
+Object* StubCache::ComputeKeyedLoadArrayLength(String* name,
+                                               JSArray* receiver) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    code = compiler.CompileLoadArrayLength(name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadStringLength(String* name,
+                                                String* receiver) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    code = compiler.CompileLoadStringLength(name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return code;
+}
+
+
+Object* StubCache::ComputeKeyedLoadFunctionPrototype(String* name,
+                                                     JSFunction* receiver) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    code = compiler.CompileLoadFunctionPrototype(name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return code;
+}
+
+
+Object* StubCache::ComputeStoreField(String* name,
+                                     JSObject* receiver,
+                                     int field_index,
+                                     Map* transition) {
+  PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    StoreStubCompiler compiler;
+    code = compiler.CompileStoreField(receiver, field_index, transition, name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeStoreGlobal(String* name,
+                                      GlobalObject* receiver,
+                                      JSGlobalPropertyCell* cell) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    StoreStubCompiler compiler;
+    code = compiler.CompileStoreGlobal(receiver, cell, name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return code;
+  }
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeStoreCallback(String* name,
+                                        JSObject* receiver,
+                                        AccessorInfo* callback) {
+  ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, CALLBACKS);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    StoreStubCompiler compiler;
+    code = compiler.CompileStoreCallback(receiver, callback, name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeStoreInterceptor(String* name,
+                                           JSObject* receiver) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::STORE_IC, INTERCEPTOR);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    StoreStubCompiler compiler;
+    code = compiler.CompileStoreInterceptor(receiver, name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeKeyedStoreField(String* name, JSObject* receiver,
+                                          int field_index, Map* transition) {
+  PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedStoreStubCompiler compiler;
+    code = compiler.CompileStoreField(receiver, field_index, transition, name);
+    if (code->IsFailure()) return code;
+    LOG(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return code;
+}
+
+
+Object* StubCache::ComputeCallConstant(int argc,
+                                       InLoopFlag in_loop,
+                                       String* name,
+                                       Object* object,
+                                       JSObject* holder,
+                                       JSFunction* function) {
+  // Compute the check type and the map.
+  Map* map = IC::GetCodeCacheMapForObject(object);
+
+  // Compute check type based on receiver/holder.
+  StubCompiler::CheckType check = StubCompiler::RECEIVER_MAP_CHECK;
+  if (object->IsString()) {
+    check = StubCompiler::STRING_CHECK;
+  } else if (object->IsNumber()) {
+    check = StubCompiler::NUMBER_CHECK;
+  } else if (object->IsBoolean()) {
+    check = StubCompiler::BOOLEAN_CHECK;
+  }
+
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::CALL_IC,
+                                    CONSTANT_FUNCTION,
+                                    in_loop,
+                                    argc);
+  Object* code = map->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    if (object->IsJSObject()) {
+      Object* opt =
+          Top::LookupSpecialFunction(JSObject::cast(object), holder, function);
+      if (opt->IsJSFunction()) {
+        check = StubCompiler::JSARRAY_HAS_FAST_ELEMENTS_CHECK;
+        function = JSFunction::cast(opt);
+      }
+    }
+    // If the function hasn't been compiled yet, we cannot do it now
+    // because it may cause GC. To avoid this issue, we return an
+    // internal error which will make sure we do not update any
+    // caches.
+    if (!function->is_compiled()) return Failure::InternalError();
+    // Compile the stub - only create stubs for fully compiled functions.
+    CallStubCompiler compiler(argc, in_loop);
+    code = compiler.CompileCallConstant(object, holder, function, name, check);
+    if (code->IsFailure()) return code;
+    ASSERT_EQ(flags, Code::cast(code)->flags());
+    LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return Set(name, map, Code::cast(code));
+}
+
+
+Object* StubCache::ComputeCallField(int argc,
+                                    InLoopFlag in_loop,
+                                    String* name,
+                                    Object* object,
+                                    JSObject* holder,
+                                    int index) {
+  // Compute the check type and the map.
+  Map* map = IC::GetCodeCacheMapForObject(object);
+
+  // TODO(1233596): We cannot do receiver map check for non-JS objects
+  // because they may be represented as immediates without a
+  // map. Instead, we check against the map in the holder.
+  if (object->IsNumber() || object->IsBoolean() || object->IsString()) {
+    object = holder;
+  }
+
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
+                                                    FIELD,
+                                                    in_loop,
+                                                    argc);
+  Object* code = map->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    CallStubCompiler compiler(argc, in_loop);
+    code = compiler.CompileCallField(object, holder, index, name);
+    if (code->IsFailure()) return code;
+    ASSERT_EQ(flags, Code::cast(code)->flags());
+    LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return Set(name, map, Code::cast(code));
+}
+
+
+Object* StubCache::ComputeCallInterceptor(int argc,
+                                          String* name,
+                                          Object* object,
+                                          JSObject* holder) {
+  // Compute the check type and the map.
+  // If the object is a value, we use the prototype map for the cache.
+  Map* map = IC::GetCodeCacheMapForObject(object);
+
+  // TODO(1233596): We cannot do receiver map check for non-JS objects
+  // because they may be represented as immediates without a
+  // map. Instead, we check against the map in the holder.
+  if (object->IsNumber() || object->IsBoolean() || object->IsString()) {
+    object = holder;
+  }
+
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::CALL_IC,
+                                    INTERCEPTOR,
+                                    NOT_IN_LOOP,
+                                    argc);
+  Object* code = map->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    CallStubCompiler compiler(argc, NOT_IN_LOOP);
+    code = compiler.CompileCallInterceptor(object, holder, name);
+    if (code->IsFailure()) return code;
+    ASSERT_EQ(flags, Code::cast(code)->flags());
+    LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return Set(name, map, Code::cast(code));
+}
+
+
+Object* StubCache::ComputeCallNormal(int argc,
+                                     InLoopFlag in_loop,
+                                     String* name,
+                                     JSObject* receiver) {
+  Object* code = ComputeCallNormal(argc, in_loop);
+  if (code->IsFailure()) return code;
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+Object* StubCache::ComputeCallGlobal(int argc,
+                                     InLoopFlag in_loop,
+                                     String* name,
+                                     JSObject* receiver,
+                                     GlobalObject* holder,
+                                     JSGlobalPropertyCell* cell,
+                                     JSFunction* function) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::CALL_IC, NORMAL, in_loop, argc);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    // If the function hasn't been compiled yet, we cannot do it now
+    // because it may cause GC. To avoid this issue, we return an
+    // internal error which will make sure we do not update any
+    // caches.
+    if (!function->is_compiled()) return Failure::InternalError();
+    CallStubCompiler compiler(argc, in_loop);
+    code = compiler.CompileCallGlobal(receiver, holder, cell, function, name);
+    if (code->IsFailure()) return code;
+    ASSERT_EQ(flags, Code::cast(code)->flags());
+    LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    if (result->IsFailure()) return code;
+  }
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
+static Object* GetProbeValue(Code::Flags flags) {
+  // Use raw_unchecked... so we don't get assert failures during GC.
+  NumberDictionary* dictionary = Heap::raw_unchecked_non_monomorphic_cache();
+  int entry = dictionary->FindEntry(flags);
+  if (entry != -1) return dictionary->ValueAt(entry);
+  return Heap::raw_unchecked_undefined_value();
+}
+
+
+static Object* ProbeCache(Code::Flags flags) {
+  Object* probe = GetProbeValue(flags);
+  if (probe != Heap::undefined_value()) return probe;
+  // Seed the cache with an undefined value to make sure that any
+  // generated code object can always be inserted into the cache
+  // without causing  allocation failures.
+  Object* result =
+      Heap::non_monomorphic_cache()->AtNumberPut(flags,
+                                                 Heap::undefined_value());
+  if (result->IsFailure()) return result;
+  Heap::public_set_non_monomorphic_cache(NumberDictionary::cast(result));
+  return probe;
+}
+
+
+static Object* FillCache(Object* code) {
+  if (code->IsCode()) {
+    int entry =
+        Heap::non_monomorphic_cache()->FindEntry(
+            Code::cast(code)->flags());
+    // The entry must be present see comment in ProbeCache.
+    ASSERT(entry != -1);
+    ASSERT(Heap::non_monomorphic_cache()->ValueAt(entry) ==
+           Heap::undefined_value());
+    Heap::non_monomorphic_cache()->ValueAtPut(entry, code);
+    CHECK(GetProbeValue(Code::cast(code)->flags()) == code);
+  }
+  return code;
+}
+
+
+Code* StubCache::FindCallInitialize(int argc, InLoopFlag in_loop) {
+  Code::Flags flags =
+      Code::ComputeFlags(Code::CALL_IC, in_loop, UNINITIALIZED, NORMAL, argc);
+  Object* result = ProbeCache(flags);
+  ASSERT(!result->IsUndefined());
+  // This might be called during the marking phase of the collector
+  // hence the unchecked cast.
+  return reinterpret_cast<Code*>(result);
+}
+
+
+Object* StubCache::ComputeCallInitialize(int argc, InLoopFlag in_loop) {
+  Code::Flags flags =
+      Code::ComputeFlags(Code::CALL_IC, in_loop, UNINITIALIZED, NORMAL, argc);
+  Object* probe = ProbeCache(flags);
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(compiler.CompileCallInitialize(flags));
+}
+
+
+Object* StubCache::ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop) {
+  Code::Flags flags =
+      Code::ComputeFlags(Code::CALL_IC, in_loop, PREMONOMORPHIC, NORMAL, argc);
+  Object* probe = ProbeCache(flags);
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(compiler.CompileCallPreMonomorphic(flags));
+}
+
+
+Object* StubCache::ComputeCallNormal(int argc, InLoopFlag in_loop) {
+  Code::Flags flags =
+      Code::ComputeFlags(Code::CALL_IC, in_loop, MONOMORPHIC, NORMAL, argc);
+  Object* probe = ProbeCache(flags);
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(compiler.CompileCallNormal(flags));
+}
+
+
+Object* StubCache::ComputeCallMegamorphic(int argc, InLoopFlag in_loop) {
+  Code::Flags flags =
+      Code::ComputeFlags(Code::CALL_IC, in_loop, MEGAMORPHIC, NORMAL, argc);
+  Object* probe = ProbeCache(flags);
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(compiler.CompileCallMegamorphic(flags));
+}
+
+
+Object* StubCache::ComputeCallMiss(int argc) {
+  Code::Flags flags =
+      Code::ComputeFlags(Code::STUB, NOT_IN_LOOP, MEGAMORPHIC, NORMAL, argc);
+  Object* probe = ProbeCache(flags);
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(compiler.CompileCallMiss(flags));
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+Object* StubCache::ComputeCallDebugBreak(int argc) {
+  Code::Flags flags =
+      Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, DEBUG_BREAK, NORMAL, argc);
+  Object* probe = ProbeCache(flags);
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(compiler.CompileCallDebugBreak(flags));
+}
+
+
+Object* StubCache::ComputeCallDebugPrepareStepIn(int argc) {
+  Code::Flags flags =
+      Code::ComputeFlags(Code::CALL_IC,
+                         NOT_IN_LOOP,
+                         DEBUG_PREPARE_STEP_IN,
+                         NORMAL,
+                         argc);
+  Object* probe = ProbeCache(flags);
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(compiler.CompileCallDebugPrepareStepIn(flags));
+}
+#endif
+
+
+Object* StubCache::ComputeLazyCompile(int argc) {
+  Code::Flags flags =
+      Code::ComputeFlags(Code::STUB, NOT_IN_LOOP, UNINITIALIZED, NORMAL, argc);
+  Object* probe = ProbeCache(flags);
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  Object* result = FillCache(compiler.CompileLazyCompile(flags));
+  if (result->IsCode()) {
+    Code* code = Code::cast(result);
+    USE(code);
+    LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+                        code, code->arguments_count()));
+  }
+  return result;
+}
+
+
+void StubCache::Clear() {
+  for (int i = 0; i < kPrimaryTableSize; i++) {
+    primary_[i].key = Heap::empty_string();
+    primary_[i].value = Builtins::builtin(Builtins::Illegal);
+  }
+  for (int j = 0; j < kSecondaryTableSize; j++) {
+    secondary_[j].key = Heap::empty_string();
+    secondary_[j].value = Builtins::builtin(Builtins::Illegal);
+  }
+}
+
+
+// ------------------------------------------------------------------------
+// StubCompiler implementation.
+
+
+// Support function for computing call IC miss stubs.
+Handle<Code> ComputeCallMiss(int argc) {
+  CALL_HEAP_FUNCTION(StubCache::ComputeCallMiss(argc), Code);
+}
+
+
+
+Object* LoadCallbackProperty(Arguments args) {
+  AccessorInfo* callback = AccessorInfo::cast(args[2]);
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
+  ASSERT(fun != NULL);
+  v8::AccessorInfo info(args.arguments());
+  HandleScope scope;
+  v8::Handle<v8::Value> result;
+  {
+    // Leaving JavaScript.
+    VMState state(EXTERNAL);
+    result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
+  }
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  if (result.IsEmpty()) return Heap::undefined_value();
+  return *v8::Utils::OpenHandle(*result);
+}
+
+
+Object* StoreCallbackProperty(Arguments args) {
+  JSObject* recv = JSObject::cast(args[0]);
+  AccessorInfo* callback = AccessorInfo::cast(args[1]);
+  Address setter_address = v8::ToCData<Address>(callback->setter());
+  v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
+  ASSERT(fun != NULL);
+  Handle<String> name = args.at<String>(2);
+  Handle<Object> value = args.at<Object>(3);
+  HandleScope scope;
+  LOG(ApiNamedPropertyAccess("store", recv, *name));
+  CustomArguments custom_args(callback->data(), recv, recv);
+  v8::AccessorInfo info(custom_args.end());
+  {
+    // Leaving JavaScript.
+    VMState state(EXTERNAL);
+    fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+  }
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  return *value;
+}
+
+/**
+ * Attempts to load a property with an interceptor (which must be present),
+ * but doesn't search the prototype chain.
+ *
+ * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
+ * provide any value for the given name.
+ */
+Object* LoadPropertyWithInterceptorOnly(Arguments args) {
+  JSObject* receiver_handle = JSObject::cast(args[0]);
+  JSObject* holder_handle = JSObject::cast(args[1]);
+  Handle<String> name_handle = args.at<String>(2);
+  Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(3);
+  Object* data_handle = args[4];
+
+  Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
+  v8::NamedPropertyGetter getter =
+      FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
+  ASSERT(getter != NULL);
+
+  {
+    // Use the interceptor getter.
+    CustomArguments args(data_handle, receiver_handle, holder_handle);
+    v8::AccessorInfo info(args.end());
+    HandleScope scope;
+    v8::Handle<v8::Value> r;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      r = getter(v8::Utils::ToLocal(name_handle), info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (!r.IsEmpty()) {
+      return *v8::Utils::OpenHandle(*r);
+    }
+  }
+
+  return Heap::no_interceptor_result_sentinel();
+}
+
+
+static Object* ThrowReferenceError(String* name) {
+  // If the load is non-contextual, just return the undefined result.
+  // Note that both keyed and non-keyed loads may end up here, so we
+  // can't use either LoadIC or KeyedLoadIC constructors.
+  IC ic(IC::NO_EXTRA_FRAME);
+  ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
+  if (!ic.is_contextual()) return Heap::undefined_value();
+
+  // Throw a reference error.
+  HandleScope scope;
+  Handle<String> name_handle(name);
+  Handle<Object> error =
+      Factory::NewReferenceError("not_defined",
+                                  HandleVector(&name_handle, 1));
+  return Top::Throw(*error);
+}
+
+
+static Object* LoadWithInterceptor(Arguments* args,
+                                   PropertyAttributes* attrs) {
+  Handle<JSObject> receiver_handle = args->at<JSObject>(0);
+  Handle<JSObject> holder_handle = args->at<JSObject>(1);
+  Handle<String> name_handle = args->at<String>(2);
+  Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(3);
+  Handle<Object> data_handle = args->at<Object>(4);
+
+  Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
+  v8::NamedPropertyGetter getter =
+      FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
+  ASSERT(getter != NULL);
+
+  {
+    // Use the interceptor getter.
+    CustomArguments args(*data_handle, *receiver_handle, *holder_handle);
+    v8::AccessorInfo info(args.end());
+    HandleScope scope;
+    v8::Handle<v8::Value> r;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      r = getter(v8::Utils::ToLocal(name_handle), info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (!r.IsEmpty()) {
+      *attrs = NONE;
+      return *v8::Utils::OpenHandle(*r);
+    }
+  }
+
+  Object* result = holder_handle->GetPropertyPostInterceptor(
+      *receiver_handle,
+      *name_handle,
+      attrs);
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  return result;
+}
+
+
+/**
+ * Loads a property with an interceptor performing post interceptor
+ * lookup if interceptor failed.
+ */
+Object* LoadPropertyWithInterceptorForLoad(Arguments args) {
+  PropertyAttributes attr = NONE;
+  Object* result = LoadWithInterceptor(&args, &attr);
+  if (result->IsFailure()) return result;
+
+  // If the property is present, return it.
+  if (attr != ABSENT) return result;
+  return ThrowReferenceError(String::cast(args[2]));
+}
+
+
+Object* LoadPropertyWithInterceptorForCall(Arguments args) {
+  PropertyAttributes attr;
+  Object* result = LoadWithInterceptor(&args, &attr);
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  // This is call IC. In this case, we simply return the undefined result which
+  // will lead to an exception when trying to invoke the result as a
+  // function.
+  return result;
+}
+
+
+Object* StoreInterceptorProperty(Arguments args) {
+  JSObject* recv = JSObject::cast(args[0]);
+  String* name = String::cast(args[1]);
+  Object* value = args[2];
+  ASSERT(recv->HasNamedInterceptor());
+  PropertyAttributes attr = NONE;
+  Object* result = recv->SetPropertyWithInterceptor(name, value, attr);
+  return result;
+}
+
+
+Object* StubCompiler::CompileCallInitialize(Code::Flags flags) {
+  HandleScope scope;
+  int argc = Code::ExtractArgumentsCountFromFlags(flags);
+  CallIC::GenerateInitialize(masm(), argc);
+  Object* result = GetCodeWithFlags(flags, "CompileCallInitialize");
+  if (!result->IsFailure()) {
+    Counters::call_initialize_stubs.Increment();
+    Code* code = Code::cast(result);
+    USE(code);
+    LOG(CodeCreateEvent(Logger::CALL_INITIALIZE_TAG,
+                        code, code->arguments_count()));
+  }
+  return result;
+}
+
+
+Object* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
+  HandleScope scope;
+  int argc = Code::ExtractArgumentsCountFromFlags(flags);
+  // The code of the PreMonomorphic stub is the same as the code
+  // of the Initialized stub.  They just differ on the code object flags.
+  CallIC::GenerateInitialize(masm(), argc);
+  Object* result = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
+  if (!result->IsFailure()) {
+    Counters::call_premonomorphic_stubs.Increment();
+    Code* code = Code::cast(result);
+    USE(code);
+    LOG(CodeCreateEvent(Logger::CALL_PRE_MONOMORPHIC_TAG,
+                        code, code->arguments_count()));
+  }
+  return result;
+}
+
+
+Object* StubCompiler::CompileCallNormal(Code::Flags flags) {
+  HandleScope scope;
+  int argc = Code::ExtractArgumentsCountFromFlags(flags);
+  CallIC::GenerateNormal(masm(), argc);
+  Object* result = GetCodeWithFlags(flags, "CompileCallNormal");
+  if (!result->IsFailure()) {
+    Counters::call_normal_stubs.Increment();
+    Code* code = Code::cast(result);
+    USE(code);
+    LOG(CodeCreateEvent(Logger::CALL_NORMAL_TAG,
+                        code, code->arguments_count()));
+  }
+  return result;
+}
+
+
+Object* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
+  HandleScope scope;
+  int argc = Code::ExtractArgumentsCountFromFlags(flags);
+  CallIC::GenerateMegamorphic(masm(), argc);
+  Object* result = GetCodeWithFlags(flags, "CompileCallMegamorphic");
+  if (!result->IsFailure()) {
+    Counters::call_megamorphic_stubs.Increment();
+    Code* code = Code::cast(result);
+    USE(code);
+    LOG(CodeCreateEvent(Logger::CALL_MEGAMORPHIC_TAG,
+                        code, code->arguments_count()));
+  }
+  return result;
+}
+
+
+Object* StubCompiler::CompileCallMiss(Code::Flags flags) {
+  HandleScope scope;
+  int argc = Code::ExtractArgumentsCountFromFlags(flags);
+  CallIC::GenerateMiss(masm(), argc);
+  Object* result = GetCodeWithFlags(flags, "CompileCallMiss");
+  if (!result->IsFailure()) {
+    Counters::call_megamorphic_stubs.Increment();
+    Code* code = Code::cast(result);
+    USE(code);
+    LOG(CodeCreateEvent(Logger::CALL_MISS_TAG, code, code->arguments_count()));
+  }
+  return result;
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+Object* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
+  HandleScope scope;
+  Debug::GenerateCallICDebugBreak(masm());
+  Object* result = GetCodeWithFlags(flags, "CompileCallDebugBreak");
+  if (!result->IsFailure()) {
+    Code* code = Code::cast(result);
+    USE(code);
+    LOG(CodeCreateEvent(Logger::CALL_DEBUG_BREAK_TAG,
+                        code, code->arguments_count()));
+  }
+  return result;
+}
+
+
+Object* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
+  HandleScope scope;
+  // Use the same code for the the step in preparations as we do for
+  // the miss case.
+  int argc = Code::ExtractArgumentsCountFromFlags(flags);
+  CallIC::GenerateMiss(masm(), argc);
+  Object* result = GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
+  if (!result->IsFailure()) {
+    Code* code = Code::cast(result);
+    USE(code);
+    LOG(CodeCreateEvent(Logger::CALL_DEBUG_PREPARE_STEP_IN_TAG,
+                        code, code->arguments_count()));
+  }
+  return result;
+}
+#endif
+
+
+Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, const char* name) {
+  // Check for allocation failures during stub compilation.
+  if (failure_->IsFailure()) return failure_;
+
+  // Create code object in the heap.
+  CodeDesc desc;
+  masm_.GetCode(&desc);
+  Object* result = Heap::CreateCode(desc, NULL, flags, masm_.CodeObject());
+#ifdef ENABLE_DISASSEMBLER
+  if (FLAG_print_code_stubs && !result->IsFailure()) {
+    Code::cast(result)->Disassemble(name);
+  }
+#endif
+  return result;
+}
+
+
+Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, String* name) {
+  if (FLAG_print_code_stubs && (name != NULL)) {
+    return GetCodeWithFlags(flags, *name->ToCString());
+  }
+  return GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
+}
+
+
+Object* LoadStubCompiler::GetCode(PropertyType type, String* name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
+  return GetCodeWithFlags(flags, name);
+}
+
+
+Object* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, type);
+  return GetCodeWithFlags(flags, name);
+}
+
+
+Object* StoreStubCompiler::GetCode(PropertyType type, String* name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type);
+  return GetCodeWithFlags(flags, name);
+}
+
+
+Object* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type);
+  return GetCodeWithFlags(flags, name);
+}
+
+
+Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
+  int argc = arguments_.immediate();
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
+                                                    type,
+                                                    in_loop_,
+                                                    argc);
+  return GetCodeWithFlags(flags, name);
+}
+
+
+Object* ConstructStubCompiler::GetCode() {
+  Code::Flags flags = Code::ComputeFlags(Code::STUB);
+  Object* result = GetCodeWithFlags(flags, "ConstructStub");
+  if (!result->IsFailure()) {
+    Code* code = Code::cast(result);
+    USE(code);
+    LOG(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
+  }
+  return result;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/stub-cache.h b/src/stub-cache.h
new file mode 100644
index 0000000..e268920
--- /dev/null
+++ b/src/stub-cache.h
@@ -0,0 +1,577 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STUB_CACHE_H_
+#define V8_STUB_CACHE_H_
+
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+// The stub cache is used for megamorphic calls and property accesses.
+// It maps (map, name, type)->Code*
+
+// The design of the table uses the inline cache stubs used for
+// mono-morphic calls. The beauty of this, we do not have to
+// invalidate the cache whenever a prototype map is changed.  The stub
+// validates the map chain as in the mono-morphic case.
+
+class SCTableReference;
+
+class StubCache : public AllStatic {
+ public:
+  struct Entry {
+    String* key;
+    Code* value;
+  };
+
+
+  static void Initialize(bool create_heap_objects);
+
+  // Computes the right stub matching. Inserts the result in the
+  // cache before returning.  This might compile a stub if needed.
+  static Object* ComputeLoadField(String* name,
+                                  JSObject* receiver,
+                                  JSObject* holder,
+                                  int field_index);
+
+  static Object* ComputeLoadCallback(String* name,
+                                     JSObject* receiver,
+                                     JSObject* holder,
+                                     AccessorInfo* callback);
+
+  static Object* ComputeLoadConstant(String* name,
+                                     JSObject* receiver,
+                                     JSObject* holder,
+                                     Object* value);
+
+  static Object* ComputeLoadInterceptor(String* name,
+                                        JSObject* receiver,
+                                        JSObject* holder);
+
+  static Object* ComputeLoadNormal(String* name, JSObject* receiver);
+
+
+  static Object* ComputeLoadGlobal(String* name,
+                                   JSObject* receiver,
+                                   GlobalObject* holder,
+                                   JSGlobalPropertyCell* cell,
+                                   bool is_dont_delete);
+
+
+  // ---
+
+  static Object* ComputeKeyedLoadField(String* name,
+                                       JSObject* receiver,
+                                       JSObject* holder,
+                                       int field_index);
+
+  static Object* ComputeKeyedLoadCallback(String* name,
+                                          JSObject* receiver,
+                                          JSObject* holder,
+                                          AccessorInfo* callback);
+
+  static Object* ComputeKeyedLoadConstant(String* name, JSObject* receiver,
+                                          JSObject* holder, Object* value);
+
+  static Object* ComputeKeyedLoadInterceptor(String* name,
+                                             JSObject* receiver,
+                                             JSObject* holder);
+
+  static Object* ComputeKeyedLoadArrayLength(String* name, JSArray* receiver);
+
+  static Object* ComputeKeyedLoadStringLength(String* name,
+                                              String* receiver);
+
+  static Object* ComputeKeyedLoadFunctionPrototype(String* name,
+                                                   JSFunction* receiver);
+
+  // ---
+
+  static Object* ComputeStoreField(String* name,
+                                   JSObject* receiver,
+                                   int field_index,
+                                   Map* transition = NULL);
+
+  static Object* ComputeStoreGlobal(String* name,
+                                    GlobalObject* receiver,
+                                    JSGlobalPropertyCell* cell);
+
+  static Object* ComputeStoreCallback(String* name,
+                                      JSObject* receiver,
+                                      AccessorInfo* callback);
+
+  static Object* ComputeStoreInterceptor(String* name, JSObject* receiver);
+
+  // ---
+
+  static Object* ComputeKeyedStoreField(String* name,
+                                        JSObject* receiver,
+                                        int field_index,
+                                        Map* transition = NULL);
+
+  // ---
+
+  static Object* ComputeCallField(int argc,
+                                  InLoopFlag in_loop,
+                                  String* name,
+                                  Object* object,
+                                  JSObject* holder,
+                                  int index);
+
+  static Object* ComputeCallConstant(int argc,
+                                     InLoopFlag in_loop,
+                                     String* name,
+                                     Object* object,
+                                     JSObject* holder,
+                                     JSFunction* function);
+
+  static Object* ComputeCallNormal(int argc,
+                                   InLoopFlag in_loop,
+                                   String* name,
+                                   JSObject* receiver);
+
+  static Object* ComputeCallInterceptor(int argc,
+                                        String* name,
+                                        Object* object,
+                                        JSObject* holder);
+
+  static Object* ComputeCallGlobal(int argc,
+                                   InLoopFlag in_loop,
+                                   String* name,
+                                   JSObject* receiver,
+                                   GlobalObject* holder,
+                                   JSGlobalPropertyCell* cell,
+                                   JSFunction* function);
+
+  // ---
+
+  static Object* ComputeCallInitialize(int argc, InLoopFlag in_loop);
+  static Object* ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop);
+  static Object* ComputeCallNormal(int argc, InLoopFlag in_loop);
+  static Object* ComputeCallMegamorphic(int argc, InLoopFlag in_loop);
+  static Object* ComputeCallMiss(int argc);
+
+  // Finds the Code object stored in the Heap::non_monomorphic_cache().
+  static Code* FindCallInitialize(int argc, InLoopFlag in_loop);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  static Object* ComputeCallDebugBreak(int argc);
+  static Object* ComputeCallDebugPrepareStepIn(int argc);
+#endif
+
+  static Object* ComputeLazyCompile(int argc);
+
+
+  // Update cache for entry hash(name, map).
+  static Code* Set(String* name, Map* map, Code* code);
+
+  // Clear the lookup table (@ mark compact collection).
+  static void Clear();
+
+  // Functions for generating stubs at startup.
+  static void GenerateMiss(MacroAssembler* masm);
+
+  // Generate code for probing the stub cache table.
+  // If extra != no_reg it might be used as am extra scratch register.
+  static void GenerateProbe(MacroAssembler* masm,
+                            Code::Flags flags,
+                            Register receiver,
+                            Register name,
+                            Register scratch,
+                            Register extra);
+
+  enum Table {
+    kPrimary,
+    kSecondary
+  };
+
+ private:
+  friend class SCTableReference;
+  static const int kPrimaryTableSize = 2048;
+  static const int kSecondaryTableSize = 512;
+  static Entry primary_[];
+  static Entry secondary_[];
+
+  // Computes the hashed offsets for primary and secondary caches.
+  static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
+    // This works well because the heap object tag size and the hash
+    // shift are equal.  Shifting down the length field to get the
+    // hash code would effectively throw away two bits of the hash
+    // code.
+    ASSERT(kHeapObjectTagSize == String::kHashShift);
+    // Compute the hash of the name (use entire length field).
+    ASSERT(name->HasHashCode());
+    uint32_t field = name->length_field();
+    // Using only the low bits in 64-bit mode is unlikely to increase the
+    // risk of collision even if the heap is spread over an area larger than
+    // 4Gb (and not at all if it isn't).
+    uint32_t map_low32bits =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
+    // We always set the in_loop bit to zero when generating the lookup code
+    // so do it here too so the hash codes match.
+    uint32_t iflags =
+        (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
+    // Base the offset on a simple combination of name, flags, and map.
+    uint32_t key = (map_low32bits + field) ^ iflags;
+    return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
+  }
+
+  static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
+    // Use the seed from the primary cache in the secondary cache.
+    uint32_t string_low32bits =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
+    // We always set the in_loop bit to zero when generating the lookup code
+    // so do it here too so the hash codes match.
+    uint32_t iflags =
+        (static_cast<uint32_t>(flags) & ~Code::kFlagsICInLoopMask);
+    uint32_t key = seed - string_low32bits + iflags;
+    return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
+  }
+
+  // Compute the entry for a given offset in exactly the same way as
+  // we do in generated code.  We generate an hash code that already
+  // ends in String::kHashShift 0s.  Then we shift it so it is a multiple
+  // of sizeof(Entry).  This makes it easier to avoid making mistakes
+  // in the hashed offset computations.
+  static Entry* entry(Entry* table, int offset) {
+    const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
+    return reinterpret_cast<Entry*>(
+        reinterpret_cast<Address>(table) + (offset << shift_amount));
+  }
+};
+
+
+class SCTableReference {
+ public:
+  static SCTableReference keyReference(StubCache::Table table) {
+    return SCTableReference(
+        reinterpret_cast<Address>(&first_entry(table)->key));
+  }
+
+
+  static SCTableReference valueReference(StubCache::Table table) {
+    return SCTableReference(
+        reinterpret_cast<Address>(&first_entry(table)->value));
+  }
+
+  Address address() const { return address_; }
+
+ private:
+  explicit SCTableReference(Address address) : address_(address) {}
+
+  static StubCache::Entry* first_entry(StubCache::Table table) {
+    switch (table) {
+      case StubCache::kPrimary: return StubCache::primary_;
+      case StubCache::kSecondary: return StubCache::secondary_;
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+  Address address_;
+};
+
+// ------------------------------------------------------------------------
+
+
+// Support functions for IC stubs for callbacks.
+Object* LoadCallbackProperty(Arguments args);
+Object* StoreCallbackProperty(Arguments args);
+
+
+// Support functions for IC stubs for interceptors.
+Object* LoadPropertyWithInterceptorOnly(Arguments args);
+Object* LoadPropertyWithInterceptorForLoad(Arguments args);
+Object* LoadPropertyWithInterceptorForCall(Arguments args);
+Object* StoreInterceptorProperty(Arguments args);
+Object* CallInterceptorProperty(Arguments args);
+
+
+// Support function for computing call IC miss stubs.
+Handle<Code> ComputeCallMiss(int argc);
+
+
+// The stub compiler compiles stubs for the stub cache.
+class StubCompiler BASE_EMBEDDED {
+ public:
+  enum CheckType {
+    RECEIVER_MAP_CHECK,
+    STRING_CHECK,
+    NUMBER_CHECK,
+    BOOLEAN_CHECK,
+    JSARRAY_HAS_FAST_ELEMENTS_CHECK
+  };
+
+  StubCompiler() : scope_(), masm_(NULL, 256), failure_(NULL) { }
+
+  Object* CompileCallInitialize(Code::Flags flags);
+  Object* CompileCallPreMonomorphic(Code::Flags flags);
+  Object* CompileCallNormal(Code::Flags flags);
+  Object* CompileCallMegamorphic(Code::Flags flags);
+  Object* CompileCallMiss(Code::Flags flags);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  Object* CompileCallDebugBreak(Code::Flags flags);
+  Object* CompileCallDebugPrepareStepIn(Code::Flags flags);
+#endif
+  Object* CompileLazyCompile(Code::Flags flags);
+
+  // Static functions for generating parts of stubs.
+  static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+                                                  int index,
+                                                  Register prototype);
+  static void GenerateFastPropertyLoad(MacroAssembler* masm,
+                                       Register dst, Register src,
+                                       JSObject* holder, int index);
+
+  static void GenerateLoadArrayLength(MacroAssembler* masm,
+                                      Register receiver,
+                                      Register scratch,
+                                      Label* miss_label);
+  static void GenerateLoadStringLength(MacroAssembler* masm,
+                                       Register receiver,
+                                       Register scratch,
+                                       Label* miss_label);
+  static void GenerateLoadStringLength2(MacroAssembler* masm,
+                                        Register receiver,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* miss_label);
+  static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
+                                            Register receiver,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Label* miss_label);
+  static void GenerateStoreField(MacroAssembler* masm,
+                                 Builtins::Name storage_extend,
+                                 JSObject* object,
+                                 int index,
+                                 Map* transition,
+                                 Register receiver_reg,
+                                 Register name_reg,
+                                 Register scratch,
+                                 Label* miss_label);
+  static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
+
+  // Check the integrity of the prototype chain to make sure that the
+  // current IC is still valid.
+  Register CheckPrototypes(JSObject* object,
+                           Register object_reg,
+                           JSObject* holder,
+                           Register holder_reg,
+                           Register scratch,
+                           String* name,
+                           Label* miss);
+
+ protected:
+  Object* GetCodeWithFlags(Code::Flags flags, const char* name);
+  Object* GetCodeWithFlags(Code::Flags flags, String* name);
+
+  MacroAssembler* masm() { return &masm_; }
+  void set_failure(Failure* failure) { failure_ = failure; }
+
+  void GenerateLoadField(JSObject* object,
+                         JSObject* holder,
+                         Register receiver,
+                         Register scratch1,
+                         Register scratch2,
+                         int index,
+                         String* name,
+                         Label* miss);
+
+  void GenerateLoadCallback(JSObject* object,
+                            JSObject* holder,
+                            Register receiver,
+                            Register name_reg,
+                            Register scratch1,
+                            Register scratch2,
+                            AccessorInfo* callback,
+                            String* name,
+                            Label* miss);
+
+  void GenerateLoadConstant(JSObject* object,
+                            JSObject* holder,
+                            Register receiver,
+                            Register scratch1,
+                            Register scratch2,
+                            Object* value,
+                            String* name,
+                            Label* miss);
+
+  void GenerateLoadInterceptor(JSObject* object,
+                               JSObject* holder,
+                               LookupResult* lookup,
+                               Register receiver,
+                               Register name_reg,
+                               Register scratch1,
+                               Register scratch2,
+                               String* name,
+                               Label* miss);
+
+ private:
+  HandleScope scope_;
+  MacroAssembler masm_;
+  Failure* failure_;
+};
+
+
+class LoadStubCompiler: public StubCompiler {
+ public:
+  Object* CompileLoadField(JSObject* object,
+                           JSObject* holder,
+                           int index,
+                           String* name);
+  Object* CompileLoadCallback(JSObject* object,
+                              JSObject* holder,
+                              AccessorInfo* callback,
+                              String* name);
+  Object* CompileLoadConstant(JSObject* object,
+                              JSObject* holder,
+                              Object* value,
+                              String* name);
+  Object* CompileLoadInterceptor(JSObject* object,
+                                 JSObject* holder,
+                                 String* name);
+
+  Object* CompileLoadGlobal(JSObject* object,
+                            GlobalObject* holder,
+                            JSGlobalPropertyCell* cell,
+                            String* name,
+                            bool is_dont_delete);
+
+ private:
+  Object* GetCode(PropertyType type, String* name);
+};
+
+
+class KeyedLoadStubCompiler: public StubCompiler {
+ public:
+  Object* CompileLoadField(String* name,
+                           JSObject* object,
+                           JSObject* holder,
+                           int index);
+  Object* CompileLoadCallback(String* name,
+                              JSObject* object,
+                              JSObject* holder,
+                              AccessorInfo* callback);
+  Object* CompileLoadConstant(String* name,
+                              JSObject* object,
+                              JSObject* holder,
+                              Object* value);
+  Object* CompileLoadInterceptor(JSObject* object,
+                                 JSObject* holder,
+                                 String* name);
+  Object* CompileLoadArrayLength(String* name);
+  Object* CompileLoadStringLength(String* name);
+  Object* CompileLoadFunctionPrototype(String* name);
+
+ private:
+  Object* GetCode(PropertyType type, String* name);
+};
+
+
+class StoreStubCompiler: public StubCompiler {
+ public:
+  Object* CompileStoreField(JSObject* object,
+                            int index,
+                            Map* transition,
+                            String* name);
+  Object* CompileStoreCallback(JSObject* object,
+                               AccessorInfo* callbacks,
+                               String* name);
+  Object* CompileStoreInterceptor(JSObject* object, String* name);
+  Object* CompileStoreGlobal(GlobalObject* object,
+                             JSGlobalPropertyCell* holder,
+                             String* name);
+
+
+ private:
+  Object* GetCode(PropertyType type, String* name);
+};
+
+
+class KeyedStoreStubCompiler: public StubCompiler {
+ public:
+  Object* CompileStoreField(JSObject* object,
+                            int index,
+                            Map* transition,
+                            String* name);
+
+ private:
+  Object* GetCode(PropertyType type, String* name);
+};
+
+
+class CallStubCompiler: public StubCompiler {
+ public:
+  explicit CallStubCompiler(int argc, InLoopFlag in_loop)
+      : arguments_(argc), in_loop_(in_loop) { }
+
+  Object* CompileCallField(Object* object,
+                           JSObject* holder,
+                           int index,
+                           String* name);
+  Object* CompileCallConstant(Object* object,
+                              JSObject* holder,
+                              JSFunction* function,
+                              String* name,
+                              CheckType check);
+  Object* CompileCallInterceptor(Object* object,
+                                 JSObject* holder,
+                                 String* name);
+  Object* CompileCallGlobal(JSObject* object,
+                            GlobalObject* holder,
+                            JSGlobalPropertyCell* cell,
+                            JSFunction* function,
+                            String* name);
+
+ private:
+  const ParameterCount arguments_;
+  const InLoopFlag in_loop_;
+
+  const ParameterCount& arguments() { return arguments_; }
+
+  Object* GetCode(PropertyType type, String* name);
+};
+
+
+class ConstructStubCompiler: public StubCompiler {
+ public:
+  explicit ConstructStubCompiler() {}
+
+  Object* CompileConstructStub(SharedFunctionInfo* shared);
+
+ private:
+  Object* GetCode();
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_STUB_CACHE_H_
diff --git a/src/third_party/dtoa/COPYING b/src/third_party/dtoa/COPYING
new file mode 100644
index 0000000..c991754
--- /dev/null
+++ b/src/third_party/dtoa/COPYING
@@ -0,0 +1,15 @@
+The author of this software is David M. Gay.
+
+Copyright (c) 1991, 2000, 2001 by Lucent Technologies.
+
+Permission to use, copy, modify, and distribute this software for any
+purpose without fee is hereby granted, provided that this entire
+notice is included in all copies of any software which is or includes
+a copy or modification of this software and in all copies of the
+supporting documentation for such software.
+
+THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+IMPLIED WARRANTY.  IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES
+ANY REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+PURPOSE.
diff --git a/src/third_party/dtoa/dtoa.c b/src/third_party/dtoa/dtoa.c
new file mode 100644
index 0000000..8917d9d
--- /dev/null
+++ b/src/third_party/dtoa/dtoa.c
@@ -0,0 +1,3330 @@
+/****************************************************************
+ *
+ * The author of this software is David M. Gay.
+ *
+ * Copyright (c) 1991, 2000, 2001 by Lucent Technologies.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose without fee is hereby granted, provided that this entire notice
+ * is included in all copies of any software which is or includes a copy
+ * or modification of this software and in all copies of the supporting
+ * documentation for such software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTY.  IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
+ * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ ***************************************************************/
+
+/* Please send bug reports to David M. Gay (dmg at acm dot org,
+ * with " at " changed at "@" and " dot " changed to ".").	*/
+
+/* On a machine with IEEE extended-precision registers, it is
+ * necessary to specify double-precision (53-bit) rounding precision
+ * before invoking strtod or dtoa.  If the machine uses (the equivalent
+ * of) Intel 80x87 arithmetic, the call
+ *	_control87(PC_53, MCW_PC);
+ * does this with many compilers.  Whether this or another call is
+ * appropriate depends on the compiler; for this to work, it may be
+ * necessary to #include "float.h" or another system-dependent header
+ * file.
+ */
+
+/* strtod for IEEE-, VAX-, and IBM-arithmetic machines.
+ *
+ * This strtod returns a nearest machine number to the input decimal
+ * string (or sets errno to ERANGE).  With IEEE arithmetic, ties are
+ * broken by the IEEE round-even rule.  Otherwise ties are broken by
+ * biased rounding (add half and chop).
+ *
+ * Inspired loosely by William D. Clinger's paper "How to Read Floating
+ * Point Numbers Accurately" [Proc. ACM SIGPLAN '90, pp. 92-101].
+ *
+ * Modifications:
+ *
+ *	1. We only require IEEE, IBM, or VAX double-precision
+ *		arithmetic (not IEEE double-extended).
+ *	2. We get by with floating-point arithmetic in a case that
+ *		Clinger missed -- when we're computing d * 10^n
+ *		for a small integer d and the integer n is not too
+ *		much larger than 22 (the maximum integer k for which
+ *		we can represent 10^k exactly), we may be able to
+ *		compute (d*10^k) * 10^(e-k) with just one roundoff.
+ *	3. Rather than a bit-at-a-time adjustment of the binary
+ *		result in the hard case, we use floating-point
+ *		arithmetic to determine the adjustment to within
+ *		one bit; only in really hard cases do we need to
+ *		compute a second residual.
+ *	4. Because of 3., we don't need a large table of powers of 10
+ *		for ten-to-e (just some small tables, e.g. of 10^k
+ *		for 0 <= k <= 22).
+ */
+
+/*
+ * #define IEEE_8087 for IEEE-arithmetic machines where the least
+ *	significant byte has the lowest address.
+ * #define IEEE_MC68k for IEEE-arithmetic machines where the most
+ *	significant byte has the lowest address.
+ * #define Long int on machines with 32-bit ints and 64-bit longs.
+ * #define IBM for IBM mainframe-style floating-point arithmetic.
+ * #define VAX for VAX-style floating-point arithmetic (D_floating).
+ * #define No_leftright to omit left-right logic in fast floating-point
+ *	computation of dtoa.
+ * #define Honor_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3
+ *	and strtod and dtoa should round accordingly.
+ * #define Check_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3
+ *	and Honor_FLT_ROUNDS is not #defined.
+ * #define RND_PRODQUOT to use rnd_prod and rnd_quot (assembly routines
+ *	that use extended-precision instructions to compute rounded
+ *	products and quotients) with IBM.
+ * #define ROUND_BIASED for IEEE-format with biased rounding.
+ * #define Inaccurate_Divide for IEEE-format with correctly rounded
+ *	products but inaccurate quotients, e.g., for Intel i860.
+ * #define NO_LONG_LONG on machines that do not have a "long long"
+ *	integer type (of >= 64 bits).  On such machines, you can
+ *	#define Just_16 to store 16 bits per 32-bit Long when doing
+ *	high-precision integer arithmetic.  Whether this speeds things
+ *	up or slows things down depends on the machine and the number
+ *	being converted.  If long long is available and the name is
+ *	something other than "long long", #define Llong to be the name,
+ *	and if "unsigned Llong" does not work as an unsigned version of
+ *	Llong, #define #ULLong to be the corresponding unsigned type.
+ * #define KR_headers for old-style C function headers.
+ * #define Bad_float_h if your system lacks a float.h or if it does not
+ *	define some or all of DBL_DIG, DBL_MAX_10_EXP, DBL_MAX_EXP,
+ *	FLT_RADIX, FLT_ROUNDS, and DBL_MAX.
+ * #define MALLOC your_malloc, where your_malloc(n) acts like malloc(n)
+ *	if memory is available and otherwise does something you deem
+ *	appropriate.  If MALLOC is undefined, malloc will be invoked
+ *	directly -- and assumed always to succeed.
+ * #define Omit_Private_Memory to omit logic (added Jan. 1998) for making
+ *	memory allocations from a private pool of memory when possible.
+ *	When used, the private pool is PRIVATE_MEM bytes long:  2304 bytes,
+ *	unless #defined to be a different length.  This default length
+ *	suffices to get rid of MALLOC calls except for unusual cases,
+ *	such as decimal-to-binary conversion of a very long string of
+ *	digits.  The longest string dtoa can return is about 751 bytes
+ *	long.  For conversions by strtod of strings of 800 digits and
+ *	all dtoa conversions in single-threaded executions with 8-byte
+ *	pointers, PRIVATE_MEM >= 7400 appears to suffice; with 4-byte
+ *	pointers, PRIVATE_MEM >= 7112 appears adequate.
+ * #define INFNAN_CHECK on IEEE systems to cause strtod to check for
+ *	Infinity and NaN (case insensitively).  On some systems (e.g.,
+ *	some HP systems), it may be necessary to #define NAN_WORD0
+ *	appropriately -- to the most significant word of a quiet NaN.
+ *	(On HP Series 700/800 machines, -DNAN_WORD0=0x7ff40000 works.)
+ *	When INFNAN_CHECK is #defined and No_Hex_NaN is not #defined,
+ *	strtod also accepts (case insensitively) strings of the form
+ *	NaN(x), where x is a string of hexadecimal digits and spaces;
+ *	if there is only one string of hexadecimal digits, it is taken
+ *	for the 52 fraction bits of the resulting NaN; if there are two
+ *	or more strings of hex digits, the first is for the high 20 bits,
+ *	the second and subsequent for the low 32 bits, with intervening
+ *	white space ignored; but if this results in none of the 52
+ *	fraction bits being on (an IEEE Infinity symbol), then NAN_WORD0
+ *	and NAN_WORD1 are used instead.
+ * #define MULTIPLE_THREADS if the system offers preemptively scheduled
+ *	multiple threads.  In this case, you must provide (or suitably
+ *	#define) two locks, acquired by ACQUIRE_DTOA_LOCK(n) and freed
+ *	by FREE_DTOA_LOCK(n) for n = 0 or 1.  (The second lock, accessed
+ *	in pow5mult, ensures lazy evaluation of only one copy of high
+ *	powers of 5; omitting this lock would introduce a small
+ *	probability of wasting memory, but would otherwise be harmless.)
+ *	You must also invoke freedtoa(s) to free the value s returned by
+ *	dtoa.  You may do so whether or not MULTIPLE_THREADS is #defined.
+ * #define NO_IEEE_Scale to disable new (Feb. 1997) logic in strtod that
+ *	avoids underflows on inputs whose result does not underflow.
+ *	If you #define NO_IEEE_Scale on a machine that uses IEEE-format
+ *	floating-point numbers and flushes underflows to zero rather
+ *	than implementing gradual underflow, then you must also #define
+ *	Sudden_Underflow.
+ * #define YES_ALIAS to permit aliasing certain double values with
+ *	arrays of ULongs.  This leads to slightly better code with
+ *	some compilers and was always used prior to 19990916, but it
+ *	is not strictly legal and can cause trouble with aggressively
+ *	optimizing compilers (e.g., gcc 2.95.1 under -O2).
+ * #define USE_LOCALE to use the current locale's decimal_point value.
+ * #define SET_INEXACT if IEEE arithmetic is being used and extra
+ *	computation should be done to set the inexact flag when the
+ *	result is inexact and avoid setting inexact when the result
+ *	is exact.  In this case, dtoa.c must be compiled in
+ *	an environment, perhaps provided by #include "dtoa.c" in a
+ *	suitable wrapper, that defines two functions,
+ *		int get_inexact(void);
+ *		void clear_inexact(void);
+ *	such that get_inexact() returns a nonzero value if the
+ *	inexact bit is already set, and clear_inexact() sets the
+ *	inexact bit to 0.  When SET_INEXACT is #defined, strtod
+ *	also does extra computations to set the underflow and overflow
+ *	flags when appropriate (i.e., when the result is tiny and
+ *	inexact or when it is a numeric value rounded to +-infinity).
+ * #define NO_ERRNO if strtod should not assign errno = ERANGE when
+ *	the result overflows to +-Infinity or underflows to 0.
+ */
+
+#ifndef Long
+#define Long long
+#endif
+#ifndef ULong
+typedef unsigned Long ULong;
+#endif
+
+#ifdef DEBUG
+#include "stdio.h"
+#define Bug(x) {fprintf(stderr, "%s\n", x); exit(1);}
+#endif
+
+#include "stdlib.h"
+#include "string.h"
+
+#ifdef USE_LOCALE
+#include "locale.h"
+#endif
+
+#ifdef MALLOC
+#ifdef KR_headers
+extern char *MALLOC();
+#else
+extern void *MALLOC(size_t);
+#endif
+#else
+#define MALLOC malloc
+#endif
+
+#ifndef Omit_Private_Memory
+#ifndef PRIVATE_MEM
+#define PRIVATE_MEM 2304
+#endif
+#define PRIVATE_mem ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double))
+static double private_mem[PRIVATE_mem], *pmem_next = private_mem;
+#endif
+
+#undef IEEE_Arith
+#undef Avoid_Underflow
+#ifdef IEEE_MC68k
+#define IEEE_Arith
+#endif
+#ifdef IEEE_8087
+#define IEEE_Arith
+#endif
+
+#include "errno.h"
+
+#ifdef Bad_float_h
+
+#ifdef IEEE_Arith
+#define DBL_DIG 15
+#define DBL_MAX_10_EXP 308
+#define DBL_MAX_EXP 1024
+#define FLT_RADIX 2
+#endif /*IEEE_Arith*/
+
+#ifdef IBM
+#define DBL_DIG 16
+#define DBL_MAX_10_EXP 75
+#define DBL_MAX_EXP 63
+#define FLT_RADIX 16
+#define DBL_MAX 7.2370055773322621e+75
+#endif
+
+#ifdef VAX
+#define DBL_DIG 16
+#define DBL_MAX_10_EXP 38
+#define DBL_MAX_EXP 127
+#define FLT_RADIX 2
+#define DBL_MAX 1.7014118346046923e+38
+#endif
+
+#ifndef LONG_MAX
+#define LONG_MAX 2147483647
+#endif
+
+#else /* ifndef Bad_float_h */
+#include "float.h"
+#endif /* Bad_float_h */
+
+#ifndef __MATH_H__
+#include "math.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef CONST
+#ifdef KR_headers
+#define CONST /* blank */
+#else
+#define CONST const
+#endif
+#endif
+
+#if defined(IEEE_8087) + defined(IEEE_MC68k) + defined(VAX) + defined(IBM) != 1
+Exactly one of IEEE_8087, IEEE_MC68k, VAX, or IBM should be defined.
+#endif
+
+typedef union { double d; ULong L[2]; } U;
+
+#ifdef YES_ALIAS
+#define dval(x) x
+#ifdef IEEE_8087
+#define word0(x) ((ULong *)&x)[1]
+#define word1(x) ((ULong *)&x)[0]
+#else
+#define word0(x) ((ULong *)&x)[0]
+#define word1(x) ((ULong *)&x)[1]
+#endif
+#else
+#ifdef IEEE_8087
+#define word0(x) ((U*)&x)->L[1]
+#define word1(x) ((U*)&x)->L[0]
+#else
+#define word0(x) ((U*)&x)->L[0]
+#define word1(x) ((U*)&x)->L[1]
+#endif
+#define dval(x) ((U*)&x)->d
+#endif
+
+/* The following definition of Storeinc is appropriate for MIPS processors.
+ * An alternative that might be better on some machines is
+ * #define Storeinc(a,b,c) (*a++ = b << 16 | c & 0xffff)
+ */
+#if defined(IEEE_8087) + defined(VAX)
+#define Storeinc(a,b,c) (((unsigned short *)a)[1] = (unsigned short)b, \
+((unsigned short *)a)[0] = (unsigned short)c, a++)
+#else
+#define Storeinc(a,b,c) (((unsigned short *)a)[0] = (unsigned short)b, \
+((unsigned short *)a)[1] = (unsigned short)c, a++)
+#endif
+
+/* #define P DBL_MANT_DIG */
+/* Ten_pmax = floor(P*log(2)/log(5)) */
+/* Bletch = (highest power of 2 < DBL_MAX_10_EXP) / 16 */
+/* Quick_max = floor((P-1)*log(FLT_RADIX)/log(10) - 1) */
+/* Int_max = floor(P*log(FLT_RADIX)/log(10) - 1) */
+
+#ifdef IEEE_Arith
+#define Exp_shift  20
+#define Exp_shift1 20
+#define Exp_msk1    0x100000
+#define Exp_msk11   0x100000
+#define Exp_mask  0x7ff00000
+#define P 53
+#define Bias 1023
+#define Emin (-1022)
+#define Exp_1  0x3ff00000
+#define Exp_11 0x3ff00000
+#define Ebits 11
+#define Frac_mask  0xfffff
+#define Frac_mask1 0xfffff
+#define Ten_pmax 22
+#define Bletch 0x10
+#define Bndry_mask  0xfffff
+#define Bndry_mask1 0xfffff
+#define LSB 1
+#define Sign_bit 0x80000000
+#define Log2P 1
+#define Tiny0 0
+#define Tiny1 1
+#define Quick_max 14
+#define Int_max 14
+#ifndef NO_IEEE_Scale
+#define Avoid_Underflow
+#ifdef Flush_Denorm	/* debugging option */
+#undef Sudden_Underflow
+#endif
+#endif
+
+#ifndef Flt_Rounds
+#ifdef FLT_ROUNDS
+#define Flt_Rounds FLT_ROUNDS
+#else
+#define Flt_Rounds 1
+#endif
+#endif /*Flt_Rounds*/
+
+#ifdef Honor_FLT_ROUNDS
+#define Rounding rounding
+#undef Check_FLT_ROUNDS
+#define Check_FLT_ROUNDS
+#else
+#define Rounding Flt_Rounds
+#endif
+
+#else /* ifndef IEEE_Arith */
+#undef Check_FLT_ROUNDS
+#undef Honor_FLT_ROUNDS
+#undef SET_INEXACT
+#undef  Sudden_Underflow
+#define Sudden_Underflow
+#ifdef IBM
+#undef Flt_Rounds
+#define Flt_Rounds 0
+#define Exp_shift  24
+#define Exp_shift1 24
+#define Exp_msk1   0x1000000
+#define Exp_msk11  0x1000000
+#define Exp_mask  0x7f000000
+#define P 14
+#define Bias 65
+#define Exp_1  0x41000000
+#define Exp_11 0x41000000
+#define Ebits 8	/* exponent has 7 bits, but 8 is the right value in b2d */
+#define Frac_mask  0xffffff
+#define Frac_mask1 0xffffff
+#define Bletch 4
+#define Ten_pmax 22
+#define Bndry_mask  0xefffff
+#define Bndry_mask1 0xffffff
+#define LSB 1
+#define Sign_bit 0x80000000
+#define Log2P 4
+#define Tiny0 0x100000
+#define Tiny1 0
+#define Quick_max 14
+#define Int_max 15
+#else /* VAX */
+#undef Flt_Rounds
+#define Flt_Rounds 1
+#define Exp_shift  23
+#define Exp_shift1 7
+#define Exp_msk1    0x80
+#define Exp_msk11   0x800000
+#define Exp_mask  0x7f80
+#define P 56
+#define Bias 129
+#define Exp_1  0x40800000
+#define Exp_11 0x4080
+#define Ebits 8
+#define Frac_mask  0x7fffff
+#define Frac_mask1 0xffff007f
+#define Ten_pmax 24
+#define Bletch 2
+#define Bndry_mask  0xffff007f
+#define Bndry_mask1 0xffff007f
+#define LSB 0x10000
+#define Sign_bit 0x8000
+#define Log2P 1
+#define Tiny0 0x80
+#define Tiny1 0
+#define Quick_max 15
+#define Int_max 15
+#endif /* IBM, VAX */
+#endif /* IEEE_Arith */
+
+#ifndef IEEE_Arith
+#define ROUND_BIASED
+#endif
+
+#ifdef RND_PRODQUOT
+#define rounded_product(a,b) a = rnd_prod(a, b)
+#define rounded_quotient(a,b) a = rnd_quot(a, b)
+#ifdef KR_headers
+extern double rnd_prod(), rnd_quot();
+#else
+extern double rnd_prod(double, double), rnd_quot(double, double);
+#endif
+#else
+#define rounded_product(a,b) a *= b
+#define rounded_quotient(a,b) a /= b
+#endif
+
+#define Big0 (Frac_mask1 | Exp_msk1*(DBL_MAX_EXP+Bias-1))
+#define Big1 0xffffffff
+
+#ifndef Pack_32
+#define Pack_32
+#endif
+
+#ifdef KR_headers
+#define FFFFFFFF ((((unsigned long)0xffff)<<16)|(unsigned long)0xffff)
+#else
+#define FFFFFFFF 0xffffffffUL
+#endif
+
+#ifdef NO_LONG_LONG
+#undef ULLong
+#ifdef Just_16
+#undef Pack_32
+/* When Pack_32 is not defined, we store 16 bits per 32-bit Long.
+ * This makes some inner loops simpler and sometimes saves work
+ * during multiplications, but it often seems to make things slightly
+ * slower.  Hence the default is now to store 32 bits per Long.
+ */
+#endif
+#else	/* long long available */
+#ifndef Llong
+#define Llong long long
+#endif
+#ifndef ULLong
+#define ULLong unsigned Llong
+#endif
+#endif /* NO_LONG_LONG */
+
+#ifndef MULTIPLE_THREADS
+#define ACQUIRE_DTOA_LOCK(n)	/*nothing*/
+#define FREE_DTOA_LOCK(n)	/*nothing*/
+#endif
+
+#define Kmax 15
+
+#ifdef __cplusplus
+extern "C" double strtod(const char *s00, char **se);
+extern "C" char *dtoa(double d, int mode, int ndigits,
+			int *decpt, int *sign, char **rve);
+#endif
+
+ struct
+Bigint {
+	struct Bigint *next;
+	int k, maxwds, sign, wds;
+	ULong x[1];
+	};
+
+ typedef struct Bigint Bigint;
+
+ static Bigint *freelist[Kmax+1];
+
+ static Bigint *
+Balloc
+#ifdef KR_headers
+	(k) int k;
+#else
+	(int k)
+#endif
+{
+	int x;
+	Bigint *rv;
+#ifndef Omit_Private_Memory
+	unsigned int len;
+#endif
+
+	ACQUIRE_DTOA_LOCK(0);
+        /* The k > Kmax case does not need ACQUIRE_DTOA_LOCK(0). */
+        /* but this case seems very unlikely. */
+	if (k <= Kmax && (rv = freelist[k])) {
+		freelist[k] = rv->next;
+		}
+	else {
+		x = 1 << k;
+#ifdef Omit_Private_Memory
+		rv = (Bigint *)MALLOC(sizeof(Bigint) + (x-1)*sizeof(ULong));
+#else
+		len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1)
+			/sizeof(double);
+		if (k <= Kmax && pmem_next - private_mem + len <= PRIVATE_mem) {
+			rv = (Bigint*)pmem_next;
+			pmem_next += len;
+			}
+		else
+			rv = (Bigint*)MALLOC(len*sizeof(double));
+#endif
+		rv->k = k;
+		rv->maxwds = x;
+		}
+	FREE_DTOA_LOCK(0);
+	rv->sign = rv->wds = 0;
+	return rv;
+	}
+
+ static void
+Bfree
+#ifdef KR_headers
+	(v) Bigint *v;
+#else
+	(Bigint *v)
+#endif
+{
+	if (v) {
+                if (v->k > Kmax)
+                        free((void*)v);
+                else {
+         		ACQUIRE_DTOA_LOCK(0);
+         		v->next = freelist[v->k];
+        		freelist[v->k] = v;
+        		FREE_DTOA_LOCK(0);
+                        }
+		}
+	}
+
+#define Bcopy(x,y) memcpy((char *)&x->sign, (char *)&y->sign, \
+y->wds*sizeof(Long) + 2*sizeof(int))
+
+ static Bigint *
+multadd
+#ifdef KR_headers
+	(b, m, a) Bigint *b; int m, a;
+#else
+	(Bigint *b, int m, int a)	/* multiply by m and add a */
+#endif
+{
+	int i, wds;
+#ifdef ULLong
+	ULong *x;
+	ULLong carry, y;
+#else
+	ULong carry, *x, y;
+#ifdef Pack_32
+	ULong xi, z;
+#endif
+#endif
+	Bigint *b1;
+
+	wds = b->wds;
+	x = b->x;
+	i = 0;
+	carry = a;
+	do {
+#ifdef ULLong
+		y = *x * (ULLong)m + carry;
+		carry = y >> 32;
+		*x++ = y & FFFFFFFF;
+#else
+#ifdef Pack_32
+		xi = *x;
+		y = (xi & 0xffff) * m + carry;
+		z = (xi >> 16) * m + (y >> 16);
+		carry = z >> 16;
+		*x++ = (z << 16) + (y & 0xffff);
+#else
+		y = *x * m + carry;
+		carry = y >> 16;
+		*x++ = y & 0xffff;
+#endif
+#endif
+		}
+		while(++i < wds);
+	if (carry) {
+		if (wds >= b->maxwds) {
+			b1 = Balloc(b->k+1);
+			Bcopy(b1, b);
+			Bfree(b);
+			b = b1;
+			}
+		b->x[wds++] = carry;
+		b->wds = wds;
+		}
+	return b;
+	}
+
+ static Bigint *
+s2b
+#ifdef KR_headers
+	(s, nd0, nd, y9) CONST char *s; int nd0, nd; ULong y9;
+#else
+	(CONST char *s, int nd0, int nd, ULong y9)
+#endif
+{
+	Bigint *b;
+	int i, k;
+	Long x, y;
+
+	x = (nd + 8) / 9;
+	for(k = 0, y = 1; x > y; y <<= 1, k++) ;
+#ifdef Pack_32
+	b = Balloc(k);
+	b->x[0] = y9;
+	b->wds = 1;
+#else
+	b = Balloc(k+1);
+	b->x[0] = y9 & 0xffff;
+	b->wds = (b->x[1] = y9 >> 16) ? 2 : 1;
+#endif
+
+	i = 9;
+	if (9 < nd0) {
+		s += 9;
+		do b = multadd(b, 10, *s++ - '0');
+			while(++i < nd0);
+		s++;
+		}
+	else
+		s += 10;
+	for(; i < nd; i++)
+		b = multadd(b, 10, *s++ - '0');
+	return b;
+	}
+
+ static int
+hi0bits
+#ifdef KR_headers
+	(x) register ULong x;
+#else
+	(register ULong x)
+#endif
+{
+	register int k = 0;
+
+	if (!(x & 0xffff0000)) {
+		k = 16;
+		x <<= 16;
+		}
+	if (!(x & 0xff000000)) {
+		k += 8;
+		x <<= 8;
+		}
+	if (!(x & 0xf0000000)) {
+		k += 4;
+		x <<= 4;
+		}
+	if (!(x & 0xc0000000)) {
+		k += 2;
+		x <<= 2;
+		}
+	if (!(x & 0x80000000)) {
+		k++;
+		if (!(x & 0x40000000))
+			return 32;
+		}
+	return k;
+	}
+
+ static int
+lo0bits
+#ifdef KR_headers
+	(y) ULong *y;
+#else
+	(ULong *y)
+#endif
+{
+	register int k;
+	register ULong x = *y;
+
+	if (x & 7) {
+		if (x & 1)
+			return 0;
+		if (x & 2) {
+			*y = x >> 1;
+			return 1;
+			}
+		*y = x >> 2;
+		return 2;
+		}
+	k = 0;
+	if (!(x & 0xffff)) {
+		k = 16;
+		x >>= 16;
+		}
+	if (!(x & 0xff)) {
+		k += 8;
+		x >>= 8;
+		}
+	if (!(x & 0xf)) {
+		k += 4;
+		x >>= 4;
+		}
+	if (!(x & 0x3)) {
+		k += 2;
+		x >>= 2;
+		}
+	if (!(x & 1)) {
+		k++;
+		x >>= 1;
+		if (!x)
+			return 32;
+		}
+	*y = x;
+	return k;
+	}
+
+ static Bigint *
+i2b
+#ifdef KR_headers
+	(i) int i;
+#else
+	(int i)
+#endif
+{
+	Bigint *b;
+
+	b = Balloc(1);
+	b->x[0] = i;
+	b->wds = 1;
+	return b;
+	}
+
+ static Bigint *
+mult
+#ifdef KR_headers
+	(a, b) Bigint *a, *b;
+#else
+	(Bigint *a, Bigint *b)
+#endif
+{
+	Bigint *c;
+	int k, wa, wb, wc;
+	ULong *x, *xa, *xae, *xb, *xbe, *xc, *xc0;
+	ULong y;
+#ifdef ULLong
+	ULLong carry, z;
+#else
+	ULong carry, z;
+#ifdef Pack_32
+	ULong z2;
+#endif
+#endif
+
+	if (a->wds < b->wds) {
+		c = a;
+		a = b;
+		b = c;
+		}
+	k = a->k;
+	wa = a->wds;
+	wb = b->wds;
+	wc = wa + wb;
+	if (wc > a->maxwds)
+		k++;
+	c = Balloc(k);
+	for(x = c->x, xa = x + wc; x < xa; x++)
+		*x = 0;
+	xa = a->x;
+	xae = xa + wa;
+	xb = b->x;
+	xbe = xb + wb;
+	xc0 = c->x;
+#ifdef ULLong
+	for(; xb < xbe; xc0++) {
+		if ((y = *xb++)) {
+			x = xa;
+			xc = xc0;
+			carry = 0;
+			do {
+				z = *x++ * (ULLong)y + *xc + carry;
+				carry = z >> 32;
+				*xc++ = z & FFFFFFFF;
+				}
+				while(x < xae);
+			*xc = carry;
+			}
+		}
+#else
+#ifdef Pack_32
+	for(; xb < xbe; xb++, xc0++) {
+		if (y = *xb & 0xffff) {
+			x = xa;
+			xc = xc0;
+			carry = 0;
+			do {
+				z = (*x & 0xffff) * y + (*xc & 0xffff) + carry;
+				carry = z >> 16;
+				z2 = (*x++ >> 16) * y + (*xc >> 16) + carry;
+				carry = z2 >> 16;
+				Storeinc(xc, z2, z);
+				}
+				while(x < xae);
+			*xc = carry;
+			}
+		if (y = *xb >> 16) {
+			x = xa;
+			xc = xc0;
+			carry = 0;
+			z2 = *xc;
+			do {
+				z = (*x & 0xffff) * y + (*xc >> 16) + carry;
+				carry = z >> 16;
+				Storeinc(xc, z, z2);
+				z2 = (*x++ >> 16) * y + (*xc & 0xffff) + carry;
+				carry = z2 >> 16;
+				}
+				while(x < xae);
+			*xc = z2;
+			}
+		}
+#else
+	for(; xb < xbe; xc0++) {
+		if (y = *xb++) {
+			x = xa;
+			xc = xc0;
+			carry = 0;
+			do {
+				z = *x++ * y + *xc + carry;
+				carry = z >> 16;
+				*xc++ = z & 0xffff;
+				}
+				while(x < xae);
+			*xc = carry;
+			}
+		}
+#endif
+#endif
+	for(xc0 = c->x, xc = xc0 + wc; wc > 0 && !*--xc; --wc) ;
+	c->wds = wc;
+	return c;
+	}
+
+ static Bigint *p5s;
+
+ static Bigint *
+pow5mult
+#ifdef KR_headers
+	(b, k) Bigint *b; int k;
+#else
+	(Bigint *b, int k)
+#endif
+{
+	Bigint *b1, *p5, *p51;
+	int i;
+	static int p05[3] = { 5, 25, 125 };
+
+	if ((i = k & 3))
+		b = multadd(b, p05[i-1], 0);
+
+	if (!(k >>= 2))
+		return b;
+	if (!(p5 = p5s)) {
+		/* first time */
+#ifdef MULTIPLE_THREADS
+		ACQUIRE_DTOA_LOCK(1);
+		if (!(p5 = p5s)) {
+			p5 = p5s = i2b(625);
+			p5->next = 0;
+			}
+		FREE_DTOA_LOCK(1);
+#else
+		p5 = p5s = i2b(625);
+		p5->next = 0;
+#endif
+		}
+	for(;;) {
+		if (k & 1) {
+			b1 = mult(b, p5);
+			Bfree(b);
+			b = b1;
+			}
+		if (!(k >>= 1))
+			break;
+		if (!(p51 = p5->next)) {
+#ifdef MULTIPLE_THREADS
+			ACQUIRE_DTOA_LOCK(1);
+			if (!(p51 = p5->next)) {
+				p51 = p5->next = mult(p5,p5);
+				p51->next = 0;
+				}
+			FREE_DTOA_LOCK(1);
+#else
+			p51 = p5->next = mult(p5,p5);
+			p51->next = 0;
+#endif
+			}
+		p5 = p51;
+		}
+	return b;
+	}
+
+ static Bigint *
+lshift
+#ifdef KR_headers
+	(b, k) Bigint *b; int k;
+#else
+	(Bigint *b, int k)
+#endif
+{
+	int i, k1, n, n1;
+	Bigint *b1;
+	ULong *x, *x1, *xe, z;
+
+#ifdef Pack_32
+	n = k >> 5;
+#else
+	n = k >> 4;
+#endif
+	k1 = b->k;
+	n1 = n + b->wds + 1;
+	for(i = b->maxwds; n1 > i; i <<= 1)
+		k1++;
+	b1 = Balloc(k1);
+	x1 = b1->x;
+	for(i = 0; i < n; i++)
+		*x1++ = 0;
+	x = b->x;
+	xe = x + b->wds;
+#ifdef Pack_32
+	if (k &= 0x1f) {
+		k1 = 32 - k;
+		z = 0;
+		do {
+			*x1++ = *x << k | z;
+			z = *x++ >> k1;
+			}
+			while(x < xe);
+		if ((*x1 = z))
+			++n1;
+		}
+#else
+	if (k &= 0xf) {
+		k1 = 16 - k;
+		z = 0;
+		do {
+			*x1++ = *x << k  & 0xffff | z;
+			z = *x++ >> k1;
+			}
+			while(x < xe);
+		if (*x1 = z)
+			++n1;
+		}
+#endif
+	else do
+		*x1++ = *x++;
+		while(x < xe);
+	b1->wds = n1 - 1;
+	Bfree(b);
+	return b1;
+	}
+
+ static int
+cmp
+#ifdef KR_headers
+	(a, b) Bigint *a, *b;
+#else
+	(Bigint *a, Bigint *b)
+#endif
+{
+	ULong *xa, *xa0, *xb, *xb0;
+	int i, j;
+
+	i = a->wds;
+	j = b->wds;
+#ifdef DEBUG
+	if (i > 1 && !a->x[i-1])
+		Bug("cmp called with a->x[a->wds-1] == 0");
+	if (j > 1 && !b->x[j-1])
+		Bug("cmp called with b->x[b->wds-1] == 0");
+#endif
+	if (i -= j)
+		return i;
+	xa0 = a->x;
+	xa = xa0 + j;
+	xb0 = b->x;
+	xb = xb0 + j;
+	for(;;) {
+		if (*--xa != *--xb)
+			return *xa < *xb ? -1 : 1;
+		if (xa <= xa0)
+			break;
+		}
+	return 0;
+	}
+
+ static Bigint *
+diff
+#ifdef KR_headers
+	(a, b) Bigint *a, *b;
+#else
+	(Bigint *a, Bigint *b)
+#endif
+{
+	Bigint *c;
+	int i, wa, wb;
+	ULong *xa, *xae, *xb, *xbe, *xc;
+#ifdef ULLong
+	ULLong borrow, y;
+#else
+	ULong borrow, y;
+#ifdef Pack_32
+	ULong z;
+#endif
+#endif
+
+	i = cmp(a,b);
+	if (!i) {
+		c = Balloc(0);
+		c->wds = 1;
+		c->x[0] = 0;
+		return c;
+		}
+	if (i < 0) {
+		c = a;
+		a = b;
+		b = c;
+		i = 1;
+		}
+	else
+		i = 0;
+	c = Balloc(a->k);
+	c->sign = i;
+	wa = a->wds;
+	xa = a->x;
+	xae = xa + wa;
+	wb = b->wds;
+	xb = b->x;
+	xbe = xb + wb;
+	xc = c->x;
+	borrow = 0;
+#ifdef ULLong
+	do {
+		y = (ULLong)*xa++ - *xb++ - borrow;
+		borrow = y >> 32 & (ULong)1;
+		*xc++ = y & FFFFFFFF;
+		}
+		while(xb < xbe);
+	while(xa < xae) {
+		y = *xa++ - borrow;
+		borrow = y >> 32 & (ULong)1;
+		*xc++ = y & FFFFFFFF;
+		}
+#else
+#ifdef Pack_32
+	do {
+		y = (*xa & 0xffff) - (*xb & 0xffff) - borrow;
+		borrow = (y & 0x10000) >> 16;
+		z = (*xa++ >> 16) - (*xb++ >> 16) - borrow;
+		borrow = (z & 0x10000) >> 16;
+		Storeinc(xc, z, y);
+		}
+		while(xb < xbe);
+	while(xa < xae) {
+		y = (*xa & 0xffff) - borrow;
+		borrow = (y & 0x10000) >> 16;
+		z = (*xa++ >> 16) - borrow;
+		borrow = (z & 0x10000) >> 16;
+		Storeinc(xc, z, y);
+		}
+#else
+	do {
+		y = *xa++ - *xb++ - borrow;
+		borrow = (y & 0x10000) >> 16;
+		*xc++ = y & 0xffff;
+		}
+		while(xb < xbe);
+	while(xa < xae) {
+		y = *xa++ - borrow;
+		borrow = (y & 0x10000) >> 16;
+		*xc++ = y & 0xffff;
+		}
+#endif
+#endif
+	while(!*--xc)
+		wa--;
+	c->wds = wa;
+	return c;
+	}
+
+ static double
+ulp
+#ifdef KR_headers
+	(x) double x;
+#else
+	(double x)
+#endif
+{
+	register Long L;
+	double a;
+
+	L = (word0(x) & Exp_mask) - (P-1)*Exp_msk1;
+#ifndef Avoid_Underflow
+#ifndef Sudden_Underflow
+	if (L > 0) {
+#endif
+#endif
+#ifdef IBM
+		L |= Exp_msk1 >> 4;
+#endif
+		word0(a) = L;
+		word1(a) = 0;
+#ifndef Avoid_Underflow
+#ifndef Sudden_Underflow
+		}
+	else {
+		L = -L >> Exp_shift;
+		if (L < Exp_shift) {
+			word0(a) = 0x80000 >> L;
+			word1(a) = 0;
+			}
+		else {
+			word0(a) = 0;
+			L -= Exp_shift;
+			word1(a) = L >= 31 ? 1 : 1 << 31 - L;
+			}
+		}
+#endif
+#endif
+	return dval(a);
+	}
+
+ static double
+b2d
+#ifdef KR_headers
+	(a, e) Bigint *a; int *e;
+#else
+	(Bigint *a, int *e)
+#endif
+{
+	ULong *xa, *xa0, w, y, z;
+	int k;
+	double d;
+#ifdef VAX
+	ULong d0, d1;
+#else
+#define d0 word0(d)
+#define d1 word1(d)
+#endif
+
+	xa0 = a->x;
+	xa = xa0 + a->wds;
+	y = *--xa;
+#ifdef DEBUG
+	if (!y) Bug("zero y in b2d");
+#endif
+	k = hi0bits(y);
+	*e = 32 - k;
+#ifdef Pack_32
+	if (k < Ebits) {
+		d0 = Exp_1 | (y >> (Ebits - k));
+		w = xa > xa0 ? *--xa : 0;
+		d1 = (y << ((32-Ebits) + k)) | (w >> (Ebits - k));
+		goto ret_d;
+		}
+	z = xa > xa0 ? *--xa : 0;
+	if (k -= Ebits) {
+		d0 = Exp_1 | (y << k) | (z >> (32 - k));
+		y = xa > xa0 ? *--xa : 0;
+		d1 = (z << k) | (y >> (32 - k));
+		}
+	else {
+		d0 = Exp_1 | y;
+		d1 = z;
+		}
+#else
+	if (k < Ebits + 16) {
+		z = xa > xa0 ? *--xa : 0;
+		d0 = Exp_1 | (y << (k - Ebits)) | (z >> (Ebits + 16 - k));
+		w = xa > xa0 ? *--xa : 0;
+		y = xa > xa0 ? *--xa : 0;
+		d1 = (z << (k + 16 - Ebits)) | (w << (k - Ebits)) | (y >> (16 + Ebits - k));
+		goto ret_d;
+		}
+	z = xa > xa0 ? *--xa : 0;
+	w = xa > xa0 ? *--xa : 0;
+	k -= Ebits + 16;
+	d0 = Exp_1 | y << k + 16 | z << k | w >> 16 - k;
+	y = xa > xa0 ? *--xa : 0;
+	d1 = w << k + 16 | y << k;
+#endif
+ ret_d:
+#ifdef VAX
+	word0(d) = d0 >> 16 | d0 << 16;
+	word1(d) = d1 >> 16 | d1 << 16;
+#else
+#undef d0
+#undef d1
+#endif
+	return dval(d);
+	}
+
+ static Bigint *
+d2b
+#ifdef KR_headers
+	(d, e, bits) double d; int *e, *bits;
+#else
+	(double d, int *e, int *bits)
+#endif
+{
+	Bigint *b;
+	int de, k;
+	ULong *x, y, z;
+#ifndef Sudden_Underflow
+	int i;
+#endif
+#ifdef VAX
+	ULong d0, d1;
+	d0 = word0(d) >> 16 | word0(d) << 16;
+	d1 = word1(d) >> 16 | word1(d) << 16;
+#else
+#define d0 word0(d)
+#define d1 word1(d)
+#endif
+
+#ifdef Pack_32
+	b = Balloc(1);
+#else
+	b = Balloc(2);
+#endif
+	x = b->x;
+
+	z = d0 & Frac_mask;
+	d0 &= 0x7fffffff;	/* clear sign bit, which we ignore */
+#ifdef Sudden_Underflow
+	de = (int)(d0 >> Exp_shift);
+#ifndef IBM
+	z |= Exp_msk11;
+#endif
+#else
+	if ((de = (int)(d0 >> Exp_shift)))
+		z |= Exp_msk1;
+#endif
+#ifdef Pack_32
+	if ((y = d1)) {
+		if ((k = lo0bits(&y))) {
+			x[0] = y | (z << (32 - k));
+			z >>= k;
+			}
+		else
+			x[0] = y;
+#ifndef Sudden_Underflow
+		i =
+#endif
+		    b->wds = (x[1] = z) ? 2 : 1;
+		}
+	else {
+               /* This assertion fails for "1e-500" and other very 
+                * small numbers.  It provides the right result (0) 
+                * though. This assert has also been removed from KJS's
+                * version of dtoa.c.
+                *
+                * #ifdef DEBUG
+                *     if (!z) Bug("zero z in b2d");
+                * #endif
+                */
+		k = lo0bits(&z);
+		x[0] = z;
+#ifndef Sudden_Underflow
+		i =
+#endif
+		    b->wds = 1;
+		k += 32;
+		}
+#else
+	if (y = d1) {
+		if (k = lo0bits(&y))
+			if (k >= 16) {
+				x[0] = y | z << 32 - k & 0xffff;
+				x[1] = z >> k - 16 & 0xffff;
+				x[2] = z >> k;
+				i = 2;
+				}
+			else {
+				x[0] = y & 0xffff;
+				x[1] = y >> 16 | z << 16 - k & 0xffff;
+				x[2] = z >> k & 0xffff;
+				x[3] = z >> k+16;
+				i = 3;
+				}
+		else {
+			x[0] = y & 0xffff;
+			x[1] = y >> 16;
+			x[2] = z & 0xffff;
+			x[3] = z >> 16;
+			i = 3;
+			}
+		}
+	else {
+#ifdef DEBUG
+		if (!z)
+			Bug("Zero passed to d2b");
+#endif
+		k = lo0bits(&z);
+		if (k >= 16) {
+			x[0] = z;
+			i = 0;
+			}
+		else {
+			x[0] = z & 0xffff;
+			x[1] = z >> 16;
+			i = 1;
+			}
+		k += 32;
+		}
+	while(!x[i])
+		--i;
+	b->wds = i + 1;
+#endif
+#ifndef Sudden_Underflow
+	if (de) {
+#endif
+#ifdef IBM
+		*e = (de - Bias - (P-1) << 2) + k;
+		*bits = 4*P + 8 - k - hi0bits(word0(d) & Frac_mask);
+#else
+		*e = de - Bias - (P-1) + k;
+		*bits = P - k;
+#endif
+#ifndef Sudden_Underflow
+		}
+	else {
+		*e = de - Bias - (P-1) + 1 + k;
+#ifdef Pack_32
+		*bits = 32*i - hi0bits(x[i-1]);
+#else
+		*bits = (i+2)*16 - hi0bits(x[i]);
+#endif
+		}
+#endif
+	return b;
+	}
+#undef d0
+#undef d1
+
+ static double
+ratio
+#ifdef KR_headers
+	(a, b) Bigint *a, *b;
+#else
+	(Bigint *a, Bigint *b)
+#endif
+{
+	double da, db;
+	int k, ka, kb;
+
+	dval(da) = b2d(a, &ka);
+	dval(db) = b2d(b, &kb);
+#ifdef Pack_32
+	k = ka - kb + 32*(a->wds - b->wds);
+#else
+	k = ka - kb + 16*(a->wds - b->wds);
+#endif
+#ifdef IBM
+	if (k > 0) {
+		word0(da) += (k >> 2)*Exp_msk1;
+		if (k &= 3)
+			dval(da) *= 1 << k;
+		}
+	else {
+		k = -k;
+		word0(db) += (k >> 2)*Exp_msk1;
+		if (k &= 3)
+			dval(db) *= 1 << k;
+		}
+#else
+	if (k > 0)
+		word0(da) += k*Exp_msk1;
+	else {
+		k = -k;
+		word0(db) += k*Exp_msk1;
+		}
+#endif
+	return dval(da) / dval(db);
+	}
+
+ static CONST double
+tens[] = {
+		1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+		1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+		1e20, 1e21, 1e22
+#ifdef VAX
+		, 1e23, 1e24
+#endif
+		};
+
+ static CONST double
+#ifdef IEEE_Arith
+bigtens[] = { 1e16, 1e32, 1e64, 1e128, 1e256 };
+static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128,
+#ifdef Avoid_Underflow
+		9007199254740992.*9007199254740992.e-256
+		/* = 2^106 * 1e-53 */
+#else
+		1e-256
+#endif
+		};
+/* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */
+/* flag unnecessarily.  It leads to a song and dance at the end of strtod. */
+#define Scale_Bit 0x10
+#define n_bigtens 5
+#else
+#ifdef IBM
+bigtens[] = { 1e16, 1e32, 1e64 };
+static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64 };
+#define n_bigtens 3
+#else
+bigtens[] = { 1e16, 1e32 };
+static CONST double tinytens[] = { 1e-16, 1e-32 };
+#define n_bigtens 2
+#endif
+#endif
+
+#ifndef IEEE_Arith
+#undef INFNAN_CHECK
+#endif
+
+#ifdef INFNAN_CHECK
+
+#ifndef NAN_WORD0
+#define NAN_WORD0 0x7ff80000
+#endif
+
+#ifndef NAN_WORD1
+#define NAN_WORD1 0
+#endif
+
+ static int
+match
+#ifdef KR_headers
+	(sp, t) char **sp, *t;
+#else
+	(CONST char **sp, char *t)
+#endif
+{
+	int c, d;
+	CONST char *s = *sp;
+
+	while(d = *t++) {
+		if ((c = *++s) >= 'A' && c <= 'Z')
+			c += 'a' - 'A';
+		if (c != d)
+			return 0;
+		}
+	*sp = s + 1;
+	return 1;
+	}
+
+#ifndef No_Hex_NaN
+ static void
+hexnan
+#ifdef KR_headers
+	(rvp, sp) double *rvp; CONST char **sp;
+#else
+	(double *rvp, CONST char **sp)
+#endif
+{
+	ULong c, x[2];
+	CONST char *s;
+	int havedig, udx0, xshift;
+
+	x[0] = x[1] = 0;
+	havedig = xshift = 0;
+	udx0 = 1;
+	s = *sp;
+	while(c = *(CONST unsigned char*)++s) {
+		if (c >= '0' && c <= '9')
+			c -= '0';
+		else if (c >= 'a' && c <= 'f')
+			c += 10 - 'a';
+		else if (c >= 'A' && c <= 'F')
+			c += 10 - 'A';
+		else if (c <= ' ') {
+			if (udx0 && havedig) {
+				udx0 = 0;
+				xshift = 1;
+				}
+			continue;
+			}
+		else if (/*(*/ c == ')' && havedig) {
+			*sp = s + 1;
+			break;
+			}
+		else
+			return;	/* invalid form: don't change *sp */
+		havedig = 1;
+		if (xshift) {
+			xshift = 0;
+			x[0] = x[1];
+			x[1] = 0;
+			}
+		if (udx0)
+			x[0] = (x[0] << 4) | (x[1] >> 28);
+		x[1] = (x[1] << 4) | c;
+		}
+	if ((x[0] &= 0xfffff) || x[1]) {
+		word0(*rvp) = Exp_mask | x[0];
+		word1(*rvp) = x[1];
+		}
+	}
+#endif /*No_Hex_NaN*/
+#endif /* INFNAN_CHECK */
+
+ double
+strtod
+#ifdef KR_headers
+	(s00, se) CONST char *s00; char **se;
+#else
+	(CONST char *s00, char **se)
+#endif
+{
+#ifdef Avoid_Underflow
+	int scale;
+#endif
+	int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign,
+		 e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign;
+	CONST char *s, *s0, *s1;
+	double aadj, aadj1, adj, rv, rv0;
+	Long L;
+	ULong y, z;
+	Bigint *bb = NULL, *bb1, *bd = NULL, *bd0, *bs = NULL, *delta = NULL;
+#ifdef SET_INEXACT
+	int inexact, oldinexact;
+#endif
+#ifdef Honor_FLT_ROUNDS
+	int rounding;
+#endif
+#ifdef USE_LOCALE
+	CONST char *s2;
+#endif
+
+	sign = nz0 = nz = 0;
+	dval(rv) = 0.;
+	for(s = s00;;s++) switch(*s) {
+		case '-':
+			sign = 1;
+			/* no break */
+		case '+':
+			if (*++s)
+				goto break2;
+			/* no break */
+		case 0:
+			goto ret0;
+		case '\t':
+		case '\n':
+		case '\v':
+		case '\f':
+		case '\r':
+		case ' ':
+			continue;
+		default:
+			goto break2;
+		}
+ break2:
+	if (*s == '0') {
+		nz0 = 1;
+		while(*++s == '0') ;
+		if (!*s)
+			goto ret;
+		}
+	s0 = s;
+	y = z = 0;
+	for(nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++)
+		if (nd < 9)
+			y = 10*y + c - '0';
+		else if (nd < 16)
+			z = 10*z + c - '0';
+	nd0 = nd;
+#ifdef USE_LOCALE
+	s1 = localeconv()->decimal_point;
+	if (c == *s1) {
+		c = '.';
+		if (*++s1) {
+			s2 = s;
+			for(;;) {
+				if (*++s2 != *s1) {
+					c = 0;
+					break;
+					}
+				if (!*++s1) {
+					s = s2;
+					break;
+					}
+				}
+			}
+		}
+#endif
+	if (c == '.') {
+		c = *++s;
+		if (!nd) {
+			for(; c == '0'; c = *++s)
+				nz++;
+			if (c > '0' && c <= '9') {
+				s0 = s;
+				nf += nz;
+				nz = 0;
+				goto have_dig;
+				}
+			goto dig_done;
+			}
+		for(; c >= '0' && c <= '9'; c = *++s) {
+ have_dig:
+			nz++;
+			if (c -= '0') {
+				nf += nz;
+				for(i = 1; i < nz; i++)
+					if (nd++ < 9)
+						y *= 10;
+					else if (nd <= DBL_DIG + 1)
+						z *= 10;
+				if (nd++ < 9)
+					y = 10*y + c;
+				else if (nd <= DBL_DIG + 1)
+					z = 10*z + c;
+				nz = 0;
+				}
+			}
+		}
+ dig_done:
+	e = 0;
+	if (c == 'e' || c == 'E') {
+		if (!nd && !nz && !nz0) {
+			goto ret0;
+			}
+		s00 = s;
+		esign = 0;
+		switch(c = *++s) {
+			case '-':
+				esign = 1;
+			case '+':
+				c = *++s;
+			}
+		if (c >= '0' && c <= '9') {
+			while(c == '0')
+				c = *++s;
+			if (c > '0' && c <= '9') {
+				L = c - '0';
+				s1 = s;
+				while((c = *++s) >= '0' && c <= '9')
+					L = 10*L + c - '0';
+				if (s - s1 > 8 || L > 19999)
+					/* Avoid confusion from exponents
+					 * so large that e might overflow.
+					 */
+					e = 19999; /* safe for 16 bit ints */
+				else
+					e = (int)L;
+				if (esign)
+					e = -e;
+				}
+			else
+				e = 0;
+			}
+		else
+			s = s00;
+		}
+	if (!nd) {
+		if (!nz && !nz0) {
+#ifdef INFNAN_CHECK
+			/* Check for Nan and Infinity */
+			switch(c) {
+			  case 'i':
+			  case 'I':
+				if (match(&s,"nf")) {
+					--s;
+					if (!match(&s,"inity"))
+						++s;
+					word0(rv) = 0x7ff00000;
+					word1(rv) = 0;
+					goto ret;
+					}
+				break;
+			  case 'n':
+			  case 'N':
+				if (match(&s, "an")) {
+					word0(rv) = NAN_WORD0;
+					word1(rv) = NAN_WORD1;
+#ifndef No_Hex_NaN
+					if (*s == '(') /*)*/
+						hexnan(&rv, &s);
+#endif
+					goto ret;
+					}
+			  }
+#endif /* INFNAN_CHECK */
+ ret0:
+			s = s00;
+			sign = 0;
+			}
+		goto ret;
+		}
+	e1 = e -= nf;
+
+	/* Now we have nd0 digits, starting at s0, followed by a
+	 * decimal point, followed by nd-nd0 digits.  The number we're
+	 * after is the integer represented by those digits times
+	 * 10**e */
+
+	if (!nd0)
+		nd0 = nd;
+	k = nd < DBL_DIG + 1 ? nd : DBL_DIG + 1;
+	dval(rv) = y;
+	if (k > 9) {
+#ifdef SET_INEXACT
+		if (k > DBL_DIG)
+			oldinexact = get_inexact();
+#endif
+		dval(rv) = tens[k - 9] * dval(rv) + z;
+		}
+	bd0 = 0;
+	if (nd <= DBL_DIG
+#ifndef RND_PRODQUOT
+#ifndef Honor_FLT_ROUNDS
+		&& Flt_Rounds == 1
+#endif
+#endif
+			) {
+		if (!e)
+			goto ret;
+		if (e > 0) {
+			if (e <= Ten_pmax) {
+#ifdef VAX
+				goto vax_ovfl_check;
+#else
+#ifdef Honor_FLT_ROUNDS
+				/* round correctly FLT_ROUNDS = 2 or 3 */
+				if (sign) {
+					rv = -rv;
+					sign = 0;
+					}
+#endif
+				/* rv = */ rounded_product(dval(rv), tens[e]);
+				goto ret;
+#endif
+				}
+			i = DBL_DIG - nd;
+			if (e <= Ten_pmax + i) {
+				/* A fancier test would sometimes let us do
+				 * this for larger i values.
+				 */
+#ifdef Honor_FLT_ROUNDS
+				/* round correctly FLT_ROUNDS = 2 or 3 */
+				if (sign) {
+					rv = -rv;
+					sign = 0;
+					}
+#endif
+				e -= i;
+				dval(rv) *= tens[i];
+#ifdef VAX
+				/* VAX exponent range is so narrow we must
+				 * worry about overflow here...
+				 */
+ vax_ovfl_check:
+				word0(rv) -= P*Exp_msk1;
+				/* rv = */ rounded_product(dval(rv), tens[e]);
+				if ((word0(rv) & Exp_mask)
+				 > Exp_msk1*(DBL_MAX_EXP+Bias-1-P))
+					goto ovfl;
+				word0(rv) += P*Exp_msk1;
+#else
+				/* rv = */ rounded_product(dval(rv), tens[e]);
+#endif
+				goto ret;
+				}
+			}
+#ifndef Inaccurate_Divide
+		else if (e >= -Ten_pmax) {
+#ifdef Honor_FLT_ROUNDS
+			/* round correctly FLT_ROUNDS = 2 or 3 */
+			if (sign) {
+				rv = -rv;
+				sign = 0;
+				}
+#endif
+			/* rv = */ rounded_quotient(dval(rv), tens[-e]);
+			goto ret;
+			}
+#endif
+		}
+	e1 += nd - k;
+
+#ifdef IEEE_Arith
+#ifdef SET_INEXACT
+	inexact = 1;
+	if (k <= DBL_DIG)
+		oldinexact = get_inexact();
+#endif
+#ifdef Avoid_Underflow
+	scale = 0;
+#endif
+#ifdef Honor_FLT_ROUNDS
+	if ((rounding = Flt_Rounds) >= 2) {
+		if (sign)
+			rounding = rounding == 2 ? 0 : 2;
+		else
+			if (rounding != 2)
+				rounding = 0;
+		}
+#endif
+#endif /*IEEE_Arith*/
+
+	/* Get starting approximation = rv * 10**e1 */
+
+	if (e1 > 0) {
+		if ((i = e1 & 15))
+			dval(rv) *= tens[i];
+		if (e1 &= ~15) {
+			if (e1 > DBL_MAX_10_EXP) {
+ ovfl:
+#ifndef NO_ERRNO
+				errno = ERANGE;
+#endif
+				/* Can't trust HUGE_VAL */
+#ifdef IEEE_Arith
+#ifdef Honor_FLT_ROUNDS
+				switch(rounding) {
+				  case 0: /* toward 0 */
+				  case 3: /* toward -infinity */
+					word0(rv) = Big0;
+					word1(rv) = Big1;
+					break;
+				  default:
+					word0(rv) = Exp_mask;
+					word1(rv) = 0;
+				  }
+#else /*Honor_FLT_ROUNDS*/
+				word0(rv) = Exp_mask;
+				word1(rv) = 0;
+#endif /*Honor_FLT_ROUNDS*/
+#ifdef SET_INEXACT
+				/* set overflow bit */
+				dval(rv0) = 1e300;
+				dval(rv0) *= dval(rv0);
+#endif
+#else /*IEEE_Arith*/
+				word0(rv) = Big0;
+				word1(rv) = Big1;
+#endif /*IEEE_Arith*/
+				if (bd0)
+					goto retfree;
+				goto ret;
+				}
+			e1 >>= 4;
+			for(j = 0; e1 > 1; j++, e1 >>= 1)
+				if (e1 & 1)
+					dval(rv) *= bigtens[j];
+		/* The last multiplication could overflow. */
+			word0(rv) -= P*Exp_msk1;
+			dval(rv) *= bigtens[j];
+			if ((z = word0(rv) & Exp_mask)
+			 > Exp_msk1*(DBL_MAX_EXP+Bias-P))
+				goto ovfl;
+			if (z > Exp_msk1*(DBL_MAX_EXP+Bias-1-P)) {
+				/* set to largest number */
+				/* (Can't trust DBL_MAX) */
+				word0(rv) = Big0;
+				word1(rv) = Big1;
+				}
+			else
+				word0(rv) += P*Exp_msk1;
+			}
+		}
+	else if (e1 < 0) {
+		e1 = -e1;
+		if ((i = e1 & 15))
+			dval(rv) /= tens[i];
+		if (e1 >>= 4) {
+			if (e1 >= 1 << n_bigtens)
+				goto undfl;
+#ifdef Avoid_Underflow
+			if (e1 & Scale_Bit)
+				scale = 2*P;
+			for(j = 0; e1 > 0; j++, e1 >>= 1)
+				if (e1 & 1)
+					dval(rv) *= tinytens[j];
+			if (scale && (j = 2*P + 1 - ((word0(rv) & Exp_mask)
+						>> Exp_shift)) > 0) {
+				/* scaled rv is denormal; zap j low bits */
+				if (j >= 32) {
+					word1(rv) = 0;
+					if (j >= 53)
+					 word0(rv) = (P+2)*Exp_msk1;
+					else
+					 word0(rv) &= 0xffffffff << (j-32);
+					}
+				else
+					word1(rv) &= 0xffffffff << j;
+				}
+#else
+			for(j = 0; e1 > 1; j++, e1 >>= 1)
+				if (e1 & 1)
+					dval(rv) *= tinytens[j];
+			/* The last multiplication could underflow. */
+			dval(rv0) = dval(rv);
+			dval(rv) *= tinytens[j];
+			if (!dval(rv)) {
+				dval(rv) = 2.*dval(rv0);
+				dval(rv) *= tinytens[j];
+#endif
+				if (!dval(rv)) {
+ undfl:
+					dval(rv) = 0.;
+#ifndef NO_ERRNO
+					errno = ERANGE;
+#endif
+					if (bd0)
+						goto retfree;
+					goto ret;
+					}
+#ifndef Avoid_Underflow
+				word0(rv) = Tiny0;
+				word1(rv) = Tiny1;
+				/* The refinement below will clean
+				 * this approximation up.
+				 */
+				}
+#endif
+			}
+		}
+
+	/* Now the hard part -- adjusting rv to the correct value.*/
+
+	/* Put digits into bd: true value = bd * 10^e */
+
+	bd0 = s2b(s0, nd0, nd, y);
+
+	for(;;) {
+		bd = Balloc(bd0->k);
+		Bcopy(bd, bd0);
+		bb = d2b(dval(rv), &bbe, &bbbits);	/* rv = bb * 2^bbe */
+		bs = i2b(1);
+
+		if (e >= 0) {
+			bb2 = bb5 = 0;
+			bd2 = bd5 = e;
+			}
+		else {
+			bb2 = bb5 = -e;
+			bd2 = bd5 = 0;
+			}
+		if (bbe >= 0)
+			bb2 += bbe;
+		else
+			bd2 -= bbe;
+		bs2 = bb2;
+#ifdef Honor_FLT_ROUNDS
+		if (rounding != 1)
+			bs2++;
+#endif
+#ifdef Avoid_Underflow
+		j = bbe - scale;
+		i = j + bbbits - 1;	/* logb(rv) */
+		if (i < Emin)	/* denormal */
+			j += P - Emin;
+		else
+			j = P + 1 - bbbits;
+#else /*Avoid_Underflow*/
+#ifdef Sudden_Underflow
+#ifdef IBM
+		j = 1 + 4*P - 3 - bbbits + ((bbe + bbbits - 1) & 3);
+#else
+		j = P + 1 - bbbits;
+#endif
+#else /*Sudden_Underflow*/
+		j = bbe;
+		i = j + bbbits - 1;	/* logb(rv) */
+		if (i < Emin)	/* denormal */
+			j += P - Emin;
+		else
+			j = P + 1 - bbbits;
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+		bb2 += j;
+		bd2 += j;
+#ifdef Avoid_Underflow
+		bd2 += scale;
+#endif
+		i = bb2 < bd2 ? bb2 : bd2;
+		if (i > bs2)
+			i = bs2;
+		if (i > 0) {
+			bb2 -= i;
+			bd2 -= i;
+			bs2 -= i;
+			}
+		if (bb5 > 0) {
+			bs = pow5mult(bs, bb5);
+			bb1 = mult(bs, bb);
+			Bfree(bb);
+			bb = bb1;
+			}
+		if (bb2 > 0)
+			bb = lshift(bb, bb2);
+		if (bd5 > 0)
+			bd = pow5mult(bd, bd5);
+		if (bd2 > 0)
+			bd = lshift(bd, bd2);
+		if (bs2 > 0)
+			bs = lshift(bs, bs2);
+		delta = diff(bb, bd);
+		dsign = delta->sign;
+		delta->sign = 0;
+		i = cmp(delta, bs);
+#ifdef Honor_FLT_ROUNDS
+		if (rounding != 1) {
+			if (i < 0) {
+				/* Error is less than an ulp */
+				if (!delta->x[0] && delta->wds <= 1) {
+					/* exact */
+#ifdef SET_INEXACT
+					inexact = 0;
+#endif
+					break;
+					}
+				if (rounding) {
+					if (dsign) {
+						adj = 1.;
+						goto apply_adj;
+						}
+					}
+				else if (!dsign) {
+					adj = -1.;
+					if (!word1(rv)
+					 && !(word0(rv) & Frac_mask)) {
+						y = word0(rv) & Exp_mask;
+#ifdef Avoid_Underflow
+						if (!scale || y > 2*P*Exp_msk1)
+#else
+						if (y)
+#endif
+						  {
+						  delta = lshift(delta,Log2P);
+						  if (cmp(delta, bs) <= 0)
+							adj = -0.5;
+						  }
+						}
+ apply_adj:
+#ifdef Avoid_Underflow
+					if (scale && (y = word0(rv) & Exp_mask)
+						<= 2*P*Exp_msk1)
+					  word0(adj) += (2*P+1)*Exp_msk1 - y;
+#else
+#ifdef Sudden_Underflow
+					if ((word0(rv) & Exp_mask) <=
+							P*Exp_msk1) {
+						word0(rv) += P*Exp_msk1;
+						dval(rv) += adj*ulp(dval(rv));
+						word0(rv) -= P*Exp_msk1;
+						}
+					else
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+					dval(rv) += adj*ulp(dval(rv));
+					}
+				break;
+				}
+			adj = ratio(delta, bs);
+			if (adj < 1.)
+				adj = 1.;
+			if (adj <= 0x7ffffffe) {
+				/* adj = rounding ? ceil(adj) : floor(adj); */
+				y = adj;
+				if (y != adj) {
+					if (!((rounding>>1) ^ dsign))
+						y++;
+					adj = y;
+					}
+				}
+#ifdef Avoid_Underflow
+			if (scale && (y = word0(rv) & Exp_mask) <= 2*P*Exp_msk1)
+				word0(adj) += (2*P+1)*Exp_msk1 - y;
+#else
+#ifdef Sudden_Underflow
+			if ((word0(rv) & Exp_mask) <= P*Exp_msk1) {
+				word0(rv) += P*Exp_msk1;
+				adj *= ulp(dval(rv));
+				if (dsign)
+					dval(rv) += adj;
+				else
+					dval(rv) -= adj;
+				word0(rv) -= P*Exp_msk1;
+				goto cont;
+				}
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+			adj *= ulp(dval(rv));
+			if (dsign)
+				dval(rv) += adj;
+			else
+				dval(rv) -= adj;
+			goto cont;
+			}
+#endif /*Honor_FLT_ROUNDS*/
+
+		if (i < 0) {
+			/* Error is less than half an ulp -- check for
+			 * special case of mantissa a power of two.
+			 */
+			if (dsign || word1(rv) || word0(rv) & Bndry_mask
+#ifdef IEEE_Arith
+#ifdef Avoid_Underflow
+			 || (word0(rv) & Exp_mask) <= (2*P+1)*Exp_msk1
+#else
+			 || (word0(rv) & Exp_mask) <= Exp_msk1
+#endif
+#endif
+				) {
+#ifdef SET_INEXACT
+				if (!delta->x[0] && delta->wds <= 1)
+					inexact = 0;
+#endif
+				break;
+				}
+			if (!delta->x[0] && delta->wds <= 1) {
+				/* exact result */
+#ifdef SET_INEXACT
+				inexact = 0;
+#endif
+				break;
+				}
+			delta = lshift(delta,Log2P);
+			if (cmp(delta, bs) > 0)
+				goto drop_down;
+			break;
+			}
+		if (i == 0) {
+			/* exactly half-way between */
+			if (dsign) {
+				if ((word0(rv) & Bndry_mask1) == Bndry_mask1
+				 &&  word1(rv) == (
+#ifdef Avoid_Underflow
+			(scale && (y = word0(rv) & Exp_mask) <= 2*P*Exp_msk1)
+		? (0xffffffff & (0xffffffff << (2*P+1-(y>>Exp_shift)))) :
+#endif
+						   0xffffffff)) {
+					/*boundary case -- increment exponent*/
+					word0(rv) = (word0(rv) & Exp_mask)
+						+ Exp_msk1
+#ifdef IBM
+						| Exp_msk1 >> 4
+#endif
+						;
+					word1(rv) = 0;
+#ifdef Avoid_Underflow
+					dsign = 0;
+#endif
+					break;
+					}
+				}
+			else if (!(word0(rv) & Bndry_mask) && !word1(rv)) {
+ drop_down:
+				/* boundary case -- decrement exponent */
+#ifdef Sudden_Underflow /*{{*/
+				L = word0(rv) & Exp_mask;
+#ifdef IBM
+				if (L <  Exp_msk1)
+#else
+#ifdef Avoid_Underflow
+				if (L <= (scale ? (2*P+1)*Exp_msk1 : Exp_msk1))
+#else
+				if (L <= Exp_msk1)
+#endif /*Avoid_Underflow*/
+#endif /*IBM*/
+					goto undfl;
+				L -= Exp_msk1;
+#else /*Sudden_Underflow}{*/
+#ifdef Avoid_Underflow
+				if (scale) {
+					L = word0(rv) & Exp_mask;
+					if (L <= (2*P+1)*Exp_msk1) {
+						if (L > (P+2)*Exp_msk1)
+							/* round even ==> */
+							/* accept rv */
+							break;
+						/* rv = smallest denormal */
+						goto undfl;
+						}
+					}
+#endif /*Avoid_Underflow*/
+				L = (word0(rv) & Exp_mask) - Exp_msk1;
+#endif /*Sudden_Underflow}}*/
+				word0(rv) = L | Bndry_mask1;
+				word1(rv) = 0xffffffff;
+#ifdef IBM
+				goto cont;
+#else
+				break;
+#endif
+				}
+#ifndef ROUND_BIASED
+			if (!(word1(rv) & LSB))
+				break;
+#endif
+			if (dsign)
+				dval(rv) += ulp(dval(rv));
+#ifndef ROUND_BIASED
+			else {
+				dval(rv) -= ulp(dval(rv));
+#ifndef Sudden_Underflow
+				if (!dval(rv))
+					goto undfl;
+#endif
+				}
+#ifdef Avoid_Underflow
+			dsign = 1 - dsign;
+#endif
+#endif
+			break;
+			}
+		if ((aadj = ratio(delta, bs)) <= 2.) {
+			if (dsign)
+				aadj = aadj1 = 1.;
+			else if (word1(rv) || word0(rv) & Bndry_mask) {
+#ifndef Sudden_Underflow
+				if (word1(rv) == Tiny1 && !word0(rv))
+					goto undfl;
+#endif
+				aadj = 1.;
+				aadj1 = -1.;
+				}
+			else {
+				/* special case -- power of FLT_RADIX to be */
+				/* rounded down... */
+
+				if (aadj < 2./FLT_RADIX)
+					aadj = 1./FLT_RADIX;
+				else
+					aadj *= 0.5;
+				aadj1 = -aadj;
+				}
+			}
+		else {
+			aadj *= 0.5;
+			aadj1 = dsign ? aadj : -aadj;
+#ifdef Check_FLT_ROUNDS
+			switch(Rounding) {
+				case 2: /* towards +infinity */
+					aadj1 -= 0.5;
+					break;
+				case 0: /* towards 0 */
+				case 3: /* towards -infinity */
+					aadj1 += 0.5;
+				}
+#else
+			if (Flt_Rounds == 0)
+				aadj1 += 0.5;
+#endif /*Check_FLT_ROUNDS*/
+			}
+		y = word0(rv) & Exp_mask;
+
+		/* Check for overflow */
+
+		if (y == Exp_msk1*(DBL_MAX_EXP+Bias-1)) {
+			dval(rv0) = dval(rv);
+			word0(rv) -= P*Exp_msk1;
+			adj = aadj1 * ulp(dval(rv));
+			dval(rv) += adj;
+			if ((word0(rv) & Exp_mask) >=
+					Exp_msk1*(DBL_MAX_EXP+Bias-P)) {
+				if (word0(rv0) == Big0 && word1(rv0) == Big1)
+					goto ovfl;
+				word0(rv) = Big0;
+				word1(rv) = Big1;
+				goto cont;
+				}
+			else
+				word0(rv) += P*Exp_msk1;
+			}
+		else {
+#ifdef Avoid_Underflow
+			if (scale && y <= 2*P*Exp_msk1) {
+				if (aadj <= 0x7fffffff) {
+					if ((z = aadj) <= 0)
+						z = 1;
+					aadj = z;
+					aadj1 = dsign ? aadj : -aadj;
+					}
+				word0(aadj1) += (2*P+1)*Exp_msk1 - y;
+				}
+			adj = aadj1 * ulp(dval(rv));
+			dval(rv) += adj;
+#else
+#ifdef Sudden_Underflow
+			if ((word0(rv) & Exp_mask) <= P*Exp_msk1) {
+				dval(rv0) = dval(rv);
+				word0(rv) += P*Exp_msk1;
+				adj = aadj1 * ulp(dval(rv));
+				dval(rv) += adj;
+#ifdef IBM
+				if ((word0(rv) & Exp_mask) <  P*Exp_msk1)
+#else
+				if ((word0(rv) & Exp_mask) <= P*Exp_msk1)
+#endif
+					{
+					if (word0(rv0) == Tiny0
+					 && word1(rv0) == Tiny1)
+						goto undfl;
+					word0(rv) = Tiny0;
+					word1(rv) = Tiny1;
+					goto cont;
+					}
+				else
+					word0(rv) -= P*Exp_msk1;
+				}
+			else {
+				adj = aadj1 * ulp(dval(rv));
+				dval(rv) += adj;
+				}
+#else /*Sudden_Underflow*/
+			/* Compute adj so that the IEEE rounding rules will
+			 * correctly round rv + adj in some half-way cases.
+			 * If rv * ulp(rv) is denormalized (i.e.,
+			 * y <= (P-1)*Exp_msk1), we must adjust aadj to avoid
+			 * trouble from bits lost to denormalization;
+			 * example: 1.2e-307 .
+			 */
+			if (y <= (P-1)*Exp_msk1 && aadj > 1.) {
+				aadj1 = (double)(int)(aadj + 0.5);
+				if (!dsign)
+					aadj1 = -aadj1;
+				}
+			adj = aadj1 * ulp(dval(rv));
+			dval(rv) += adj;
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+			}
+		z = word0(rv) & Exp_mask;
+#ifndef SET_INEXACT
+#ifdef Avoid_Underflow
+		if (!scale)
+#endif
+		if (y == z) {
+			/* Can we stop now? */
+			L = (Long)aadj;
+			aadj -= L;
+			/* The tolerances below are conservative. */
+			if (dsign || word1(rv) || word0(rv) & Bndry_mask) {
+				if (aadj < .4999999 || aadj > .5000001)
+					break;
+				}
+			else if (aadj < .4999999/FLT_RADIX)
+				break;
+			}
+#endif
+ cont:
+		Bfree(bb);
+		Bfree(bd);
+		Bfree(bs);
+		Bfree(delta);
+		}
+#ifdef SET_INEXACT
+	if (inexact) {
+		if (!oldinexact) {
+			word0(rv0) = Exp_1 + (70 << Exp_shift);
+			word1(rv0) = 0;
+			dval(rv0) += 1.;
+			}
+		}
+	else if (!oldinexact)
+		clear_inexact();
+#endif
+#ifdef Avoid_Underflow
+	if (scale) {
+		word0(rv0) = Exp_1 - 2*P*Exp_msk1;
+		word1(rv0) = 0;
+		dval(rv) *= dval(rv0);
+#ifndef NO_ERRNO
+		/* try to avoid the bug of testing an 8087 register value */
+		if (word0(rv) == 0 && word1(rv) == 0)
+			errno = ERANGE;
+#endif
+		}
+#endif /* Avoid_Underflow */
+#ifdef SET_INEXACT
+	if (inexact && !(word0(rv) & Exp_mask)) {
+		/* set underflow bit */
+		dval(rv0) = 1e-300;
+		dval(rv0) *= dval(rv0);
+		}
+#endif
+ retfree:
+	Bfree(bb);
+	Bfree(bd);
+	Bfree(bs);
+	Bfree(bd0);
+	Bfree(delta);
+ ret:
+	if (se)
+		*se = (char *)s;
+	return sign ? -dval(rv) : dval(rv);
+	}
+
+ static int
+quorem
+#ifdef KR_headers
+	(b, S) Bigint *b, *S;
+#else
+	(Bigint *b, Bigint *S)
+#endif
+{
+	int n;
+	ULong *bx, *bxe, q, *sx, *sxe;
+#ifdef ULLong
+	ULLong borrow, carry, y, ys;
+#else
+	ULong borrow, carry, y, ys;
+#ifdef Pack_32
+	ULong si, z, zs;
+#endif
+#endif
+
+	n = S->wds;
+#ifdef DEBUG
+	/*debug*/ if (b->wds > n)
+	/*debug*/	Bug("oversize b in quorem");
+#endif
+	if (b->wds < n)
+		return 0;
+	sx = S->x;
+	sxe = sx + --n;
+	bx = b->x;
+	bxe = bx + n;
+	q = *bxe / (*sxe + 1);	/* ensure q <= true quotient */
+#ifdef DEBUG
+	/*debug*/ if (q > 9)
+	/*debug*/	Bug("oversized quotient in quorem");
+#endif
+	if (q) {
+		borrow = 0;
+		carry = 0;
+		do {
+#ifdef ULLong
+			ys = *sx++ * (ULLong)q + carry;
+			carry = ys >> 32;
+			y = *bx - (ys & FFFFFFFF) - borrow;
+			borrow = y >> 32 & (ULong)1;
+			*bx++ = y & FFFFFFFF;
+#else
+#ifdef Pack_32
+			si = *sx++;
+			ys = (si & 0xffff) * q + carry;
+			zs = (si >> 16) * q + (ys >> 16);
+			carry = zs >> 16;
+			y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+			borrow = (y & 0x10000) >> 16;
+			z = (*bx >> 16) - (zs & 0xffff) - borrow;
+			borrow = (z & 0x10000) >> 16;
+			Storeinc(bx, z, y);
+#else
+			ys = *sx++ * q + carry;
+			carry = ys >> 16;
+			y = *bx - (ys & 0xffff) - borrow;
+			borrow = (y & 0x10000) >> 16;
+			*bx++ = y & 0xffff;
+#endif
+#endif
+			}
+			while(sx <= sxe);
+		if (!*bxe) {
+			bx = b->x;
+			while(--bxe > bx && !*bxe)
+				--n;
+			b->wds = n;
+			}
+		}
+	if (cmp(b, S) >= 0) {
+		q++;
+		borrow = 0;
+		carry = 0;
+		bx = b->x;
+		sx = S->x;
+		do {
+#ifdef ULLong
+			ys = *sx++ + carry;
+			carry = ys >> 32;
+			y = *bx - (ys & FFFFFFFF) - borrow;
+			borrow = y >> 32 & (ULong)1;
+			*bx++ = y & FFFFFFFF;
+#else
+#ifdef Pack_32
+			si = *sx++;
+			ys = (si & 0xffff) + carry;
+			zs = (si >> 16) + (ys >> 16);
+			carry = zs >> 16;
+			y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+			borrow = (y & 0x10000) >> 16;
+			z = (*bx >> 16) - (zs & 0xffff) - borrow;
+			borrow = (z & 0x10000) >> 16;
+			Storeinc(bx, z, y);
+#else
+			ys = *sx++ + carry;
+			carry = ys >> 16;
+			y = *bx - (ys & 0xffff) - borrow;
+			borrow = (y & 0x10000) >> 16;
+			*bx++ = y & 0xffff;
+#endif
+#endif
+			}
+			while(sx <= sxe);
+		bx = b->x;
+		bxe = bx + n;
+		if (!*bxe) {
+			while(--bxe > bx && !*bxe)
+				--n;
+			b->wds = n;
+			}
+		}
+	return q;
+	}
+
+#ifndef MULTIPLE_THREADS
+ static char *dtoa_result;
+#endif
+
+ static char *
+#ifdef KR_headers
+rv_alloc(i) int i;
+#else
+rv_alloc(int i)
+#endif
+{
+	int j, k, *r;
+
+	j = sizeof(ULong);
+	for(k = 0;
+		sizeof(Bigint) - sizeof(ULong) - sizeof(int) + j <= i;
+		j <<= 1)
+			k++;
+	r = (int*)Balloc(k);
+	*r = k;
+	return
+#ifndef MULTIPLE_THREADS
+	dtoa_result =
+#endif
+		(char *)(r+1);
+	}
+
+ static char *
+#ifdef KR_headers
+nrv_alloc(s, rve, n) char *s, **rve; int n;
+#else
+nrv_alloc(const char *s, char **rve, int n)
+#endif
+{
+	char *rv, *t;
+
+	t = rv = rv_alloc(n);
+	while ((*t = *s++)) t++;
+	if (rve)
+		*rve = t;
+	return rv;
+	}
+
+/* freedtoa(s) must be used to free values s returned by dtoa
+ * when MULTIPLE_THREADS is #defined.  It should be used in all cases,
+ * but for consistency with earlier versions of dtoa, it is optional
+ * when MULTIPLE_THREADS is not defined.
+ */
+
+ void
+#ifdef KR_headers
+freedtoa(s) char *s;
+#else
+freedtoa(char *s)
+#endif
+{
+	Bigint *b = (Bigint *)((int *)s - 1);
+	b->maxwds = 1 << (b->k = *(int*)b);
+	Bfree(b);
+#ifndef MULTIPLE_THREADS
+	if (s == dtoa_result)
+		dtoa_result = 0;
+#endif
+	}
+
+/* dtoa for IEEE arithmetic (dmg): convert double to ASCII string.
+ *
+ * Inspired by "How to Print Floating-Point Numbers Accurately" by
+ * Guy L. Steele, Jr. and Jon L. White [Proc. ACM SIGPLAN '90, pp. 112-126].
+ *
+ * Modifications:
+ *	1. Rather than iterating, we use a simple numeric overestimate
+ *	   to determine k = floor(log10(d)).  We scale relevant
+ *	   quantities using O(log2(k)) rather than O(k) multiplications.
+ *	2. For some modes > 2 (corresponding to ecvt and fcvt), we don't
+ *	   try to generate digits strictly left to right.  Instead, we
+ *	   compute with fewer bits and propagate the carry if necessary
+ *	   when rounding the final digit up.  This is often faster.
+ *	3. Under the assumption that input will be rounded nearest,
+ *	   mode 0 renders 1e23 as 1e23 rather than 9.999999999999999e22.
+ *	   That is, we allow equality in stopping tests when the
+ *	   round-nearest rule will give the same floating-point value
+ *	   as would satisfaction of the stopping test with strict
+ *	   inequality.
+ *	4. We remove common factors of powers of 2 from relevant
+ *	   quantities.
+ *	5. When converting floating-point integers less than 1e16,
+ *	   we use floating-point arithmetic rather than resorting
+ *	   to multiple-precision integers.
+ *	6. When asked to produce fewer than 15 digits, we first try
+ *	   to get by with floating-point arithmetic; we resort to
+ *	   multiple-precision integer arithmetic only if we cannot
+ *	   guarantee that the floating-point calculation has given
+ *	   the correctly rounded result.  For k requested digits and
+ *	   "uniformly" distributed input, the probability is
+ *	   something like 10^(k-15) that we must resort to the Long
+ *	   calculation.
+ */
+
+ char *
+dtoa
+#ifdef KR_headers
+	(d, mode, ndigits, decpt, sign, rve)
+	double d; int mode, ndigits, *decpt, *sign; char **rve;
+#else
+	(double d, int mode, int ndigits, int *decpt, int *sign, char **rve)
+#endif
+{
+ /*	Arguments ndigits, decpt, sign are similar to those
+	of ecvt and fcvt; trailing zeros are suppressed from
+	the returned string.  If not null, *rve is set to point
+	to the end of the return value.  If d is +-Infinity or NaN,
+	then *decpt is set to 9999.
+
+	mode:
+		0 ==> shortest string that yields d when read in
+			and rounded to nearest.
+		1 ==> like 0, but with Steele & White stopping rule;
+			e.g. with IEEE P754 arithmetic , mode 0 gives
+			1e23 whereas mode 1 gives 9.999999999999999e22.
+		2 ==> max(1,ndigits) significant digits.  This gives a
+			return value similar to that of ecvt, except
+			that trailing zeros are suppressed.
+		3 ==> through ndigits past the decimal point.  This
+			gives a return value similar to that from fcvt,
+			except that trailing zeros are suppressed, and
+			ndigits can be negative.
+		4,5 ==> similar to 2 and 3, respectively, but (in
+			round-nearest mode) with the tests of mode 0 to
+			possibly return a shorter string that rounds to d.
+			With IEEE arithmetic and compilation with
+			-DHonor_FLT_ROUNDS, modes 4 and 5 behave the same
+			as modes 2 and 3 when FLT_ROUNDS != 1.
+		6-9 ==> Debugging modes similar to mode - 4:  don't try
+			fast floating-point estimate (if applicable).
+
+		Values of mode other than 0-9 are treated as mode 0.
+
+		Sufficient space is allocated to the return value
+		to hold the suppressed trailing zeros.
+	*/
+
+	int bbits, b2, b5, be, dig, i, ieps, ilim, ilim0, ilim1,
+		j, j1, k, k0, k_check, leftright, m2, m5, s2, s5,
+		spec_case, try_quick, bias_round_up;
+	Long L;
+#ifndef Sudden_Underflow
+	int denorm;
+	ULong x;
+#endif
+	Bigint *b, *b1, *delta, *mlo, *mhi, *S;
+	double d2, ds, eps;
+	char *s, *s0;
+#ifdef Honor_FLT_ROUNDS
+	int rounding;
+#endif
+#ifdef SET_INEXACT
+	int inexact, oldinexact;
+#endif
+
+        /* In mode 2 and 3 we bias rounding up when there are ties. */
+        bias_round_up = mode == 2 || mode == 3;
+
+        ilim = ilim1 = 0; /* to avoid Google3 compiler warnings */
+
+#ifndef MULTIPLE_THREADS
+	if (dtoa_result) {
+		freedtoa(dtoa_result);
+		dtoa_result = 0;
+		}
+#endif
+
+	if (word0(d) & Sign_bit) {
+		/* set sign for everything, including 0's and NaNs */
+		*sign = 1;
+		word0(d) &= ~Sign_bit;	/* clear sign bit */
+		}
+	else
+		*sign = 0;
+
+#if defined(IEEE_Arith) + defined(VAX)
+#ifdef IEEE_Arith
+	if ((word0(d) & Exp_mask) == Exp_mask)
+#else
+	if (word0(d)  == 0x8000)
+#endif
+		{
+		/* Infinity or NaN */
+		*decpt = 9999;
+#ifdef IEEE_Arith
+		if (!word1(d) && !(word0(d) & 0xfffff))
+			return nrv_alloc("Infinity", rve, 8);
+#endif
+		return nrv_alloc("NaN", rve, 3);
+		}
+#endif
+#ifdef IBM
+	dval(d) += 0; /* normalize */
+#endif
+	if (!dval(d)) {
+		*decpt = 1;
+		return nrv_alloc("0", rve, 1);
+		}
+
+#ifdef SET_INEXACT
+	try_quick = oldinexact = get_inexact();
+	inexact = 1;
+#endif
+#ifdef Honor_FLT_ROUNDS
+	if ((rounding = Flt_Rounds) >= 2) {
+		if (*sign)
+			rounding = rounding == 2 ? 0 : 2;
+		else
+			if (rounding != 2)
+				rounding = 0;
+		}
+#endif
+
+	b = d2b(dval(d), &be, &bbits);
+#ifdef Sudden_Underflow
+	i = (int)(word0(d) >> Exp_shift1 & (Exp_mask>>Exp_shift1));
+#else
+	if ((i = (int)(word0(d) >> Exp_shift1 & (Exp_mask>>Exp_shift1)))) {
+#endif
+		dval(d2) = dval(d);
+		word0(d2) &= Frac_mask1;
+		word0(d2) |= Exp_11;
+#ifdef IBM
+		if (j = 11 - hi0bits(word0(d2) & Frac_mask))
+			dval(d2) /= 1 << j;
+#endif
+
+		/* log(x)	~=~ log(1.5) + (x-1.5)/1.5
+		 * log10(x)	 =  log(x) / log(10)
+		 *		~=~ log(1.5)/log(10) + (x-1.5)/(1.5*log(10))
+		 * log10(d) = (i-Bias)*log(2)/log(10) + log10(d2)
+		 *
+		 * This suggests computing an approximation k to log10(d) by
+		 *
+		 * k = (i - Bias)*0.301029995663981
+		 *	+ ( (d2-1.5)*0.289529654602168 + 0.176091259055681 );
+		 *
+		 * We want k to be too large rather than too small.
+		 * The error in the first-order Taylor series approximation
+		 * is in our favor, so we just round up the constant enough
+		 * to compensate for any error in the multiplication of
+		 * (i - Bias) by 0.301029995663981; since |i - Bias| <= 1077,
+		 * and 1077 * 0.30103 * 2^-52 ~=~ 7.2e-14,
+		 * adding 1e-13 to the constant term more than suffices.
+		 * Hence we adjust the constant term to 0.1760912590558.
+		 * (We could get a more accurate k by invoking log10,
+		 *  but this is probably not worthwhile.)
+		 */
+
+		i -= Bias;
+#ifdef IBM
+		i <<= 2;
+		i += j;
+#endif
+#ifndef Sudden_Underflow
+		denorm = 0;
+		}
+	else {
+		/* d is denormalized */
+
+		i = bbits + be + (Bias + (P-1) - 1);
+		x = i > 32  ? (word0(d) << (64 - i)) | (word1(d) >> (i - 32))
+			    : word1(d) << (32 - i);
+		dval(d2) = x;
+		word0(d2) -= 31*Exp_msk1; /* adjust exponent */
+		i -= (Bias + (P-1) - 1) + 1;
+		denorm = 1;
+		}
+#endif
+	ds = (dval(d2)-1.5)*0.289529654602168 + 0.1760912590558 + i*0.301029995663981;
+	k = (int)ds;
+	if (ds < 0. && ds != k)
+		k--;	/* want k = floor(ds) */
+	k_check = 1;
+	if (k >= 0 && k <= Ten_pmax) {
+		if (dval(d) < tens[k])
+			k--;
+		k_check = 0;
+		}
+	j = bbits - i - 1;
+	if (j >= 0) {
+		b2 = 0;
+		s2 = j;
+		}
+	else {
+		b2 = -j;
+		s2 = 0;
+		}
+	if (k >= 0) {
+		b5 = 0;
+		s5 = k;
+		s2 += k;
+		}
+	else {
+		b2 -= k;
+		b5 = -k;
+		s5 = 0;
+		}
+	if (mode < 0 || mode > 9)
+		mode = 0;
+
+#ifndef SET_INEXACT
+#ifdef Check_FLT_ROUNDS
+	try_quick = Rounding == 1;
+#else
+	try_quick = 1;
+#endif
+#endif /*SET_INEXACT*/
+
+	if (mode > 5) {
+		mode -= 4;
+		try_quick = 0;
+		}
+	leftright = 1;
+	switch(mode) {
+		case 0:
+		case 1:
+			ilim = ilim1 = -1;
+			i = 18;
+			ndigits = 0;
+			break;
+		case 2:
+			leftright = 0;
+			/* no break */
+		case 4:
+			if (ndigits <= 0)
+				ndigits = 1;
+			ilim = ilim1 = i = ndigits;
+			break;
+		case 3:
+			leftright = 0;
+			/* no break */
+		case 5:
+			i = ndigits + k + 1;
+			ilim = i;
+			ilim1 = i - 1;
+			if (i <= 0)
+				i = 1;
+		}
+	s = s0 = rv_alloc(i);
+
+#ifdef Honor_FLT_ROUNDS
+	if (mode > 1 && rounding != 1)
+		leftright = 0;
+#endif
+
+	if (ilim >= 0 && ilim <= Quick_max && try_quick) {
+
+		/* Try to get by with floating-point arithmetic. */
+
+		i = 0;
+		dval(d2) = dval(d);
+		k0 = k;
+		ilim0 = ilim;
+		ieps = 2; /* conservative */
+		if (k > 0) {
+			ds = tens[k&0xf];
+			j = k >> 4;
+			if (j & Bletch) {
+				/* prevent overflows */
+				j &= Bletch - 1;
+				dval(d) /= bigtens[n_bigtens-1];
+				ieps++;
+				}
+			for(; j; j >>= 1, i++)
+				if (j & 1) {
+					ieps++;
+					ds *= bigtens[i];
+					}
+			dval(d) /= ds;
+			}
+		else if ((j1 = -k)) {
+			dval(d) *= tens[j1 & 0xf];
+			for(j = j1 >> 4; j; j >>= 1, i++)
+				if (j & 1) {
+					ieps++;
+					dval(d) *= bigtens[i];
+					}
+			}
+		if (k_check && dval(d) < 1. && ilim > 0) {
+			if (ilim1 <= 0)
+				goto fast_failed;
+			ilim = ilim1;
+			k--;
+			dval(d) *= 10.;
+			ieps++;
+			}
+		dval(eps) = ieps*dval(d) + 7.;
+		word0(eps) -= (P-1)*Exp_msk1;
+		if (ilim == 0) {
+			S = mhi = 0;
+			dval(d) -= 5.;
+			if (dval(d) > dval(eps))
+				goto one_digit;
+			if (dval(d) < -dval(eps))
+				goto no_digits;
+			goto fast_failed;
+			}
+#ifndef No_leftright
+		if (leftright) {
+			/* Use Steele & White method of only
+			 * generating digits needed.
+			 */
+			dval(eps) = 0.5/tens[ilim-1] - dval(eps);
+			for(i = 0;;) {
+				L = dval(d);
+				dval(d) -= L;
+				*s++ = '0' + (int)L;
+				if (dval(d) < dval(eps))
+					goto ret1;
+				if (1. - dval(d) < dval(eps))
+					goto bump_up;
+				if (++i >= ilim)
+					break;
+				dval(eps) *= 10.;
+				dval(d) *= 10.;
+				}
+			}
+		else {
+#endif
+			/* Generate ilim digits, then fix them up. */
+			dval(eps) *= tens[ilim-1];
+			for(i = 1;; i++, dval(d) *= 10.) {
+				L = (Long)(dval(d));
+				if (!(dval(d) -= L))
+					ilim = i;
+				*s++ = '0' + (int)L;
+				if (i == ilim) {
+					if (dval(d) > 0.5 + dval(eps))
+						goto bump_up;
+					else if (dval(d) < 0.5 - dval(eps)) {
+						while(*--s == '0');
+						s++;
+						goto ret1;
+						}
+					break;
+					}
+				}
+#ifndef No_leftright
+			}
+#endif
+ fast_failed:
+		s = s0;
+		dval(d) = dval(d2);
+		k = k0;
+		ilim = ilim0;
+		}
+
+	/* Do we have a "small" integer? */
+
+	if (be >= 0 && k <= Int_max) {
+		/* Yes. */
+		ds = tens[k];
+		if (ndigits < 0 && ilim <= 0) {
+			S = mhi = 0;
+			if (ilim < 0 || dval(d) < 5*ds || ((dval(d) == 5*ds) && !bias_round_up))
+				goto no_digits;
+			goto one_digit;
+			}
+
+                /* Limit looping by the number of digits to produce.
+                 * Firefox had a crash bug because some plugins reduce
+                 * the precision of double arithmetic.  With reduced
+                 * precision "dval(d) -= L*ds" might be imprecise and
+                 * d might not become zero and the loop might not
+                 * terminate.
+                 *
+                 * See https://bugzilla.mozilla.org/show_bug.cgi?id=358569
+                 */
+		for(i = 1; i <= k+1; i++, dval(d) *= 10.) {
+			L = (Long)(dval(d) / ds);
+			dval(d) -= L*ds;
+#ifdef Check_FLT_ROUNDS
+			/* If FLT_ROUNDS == 2, L will usually be high by 1 */
+			if (dval(d) < 0) {
+				L--;
+				dval(d) += ds;
+				}
+#endif
+			*s++ = '0' + (int)L;
+			if (!dval(d)) {
+#ifdef SET_INEXACT
+				inexact = 0;
+#endif
+				break;
+				}
+			if (i == ilim) {
+#ifdef Honor_FLT_ROUNDS
+				if (mode > 1)
+				switch(rounding) {
+				  case 0: goto ret1;
+				  case 2: goto bump_up;
+				  }
+#endif
+				dval(d) += dval(d);
+				if (dval(d) > ds || (dval(d) == ds && ((L & 1) || bias_round_up))) {
+ bump_up:
+					while(*--s == '9')
+						if (s == s0) {
+							k++;
+							*s = '0';
+							break;
+							}
+					++*s++;
+					}
+				break;
+				}
+			}
+		goto ret1;
+		}
+
+	m2 = b2;
+	m5 = b5;
+	mhi = mlo = 0;
+	if (leftright) {
+		i =
+#ifndef Sudden_Underflow
+			denorm ? be + (Bias + (P-1) - 1 + 1) :
+#endif
+#ifdef IBM
+			1 + 4*P - 3 - bbits + ((bbits + be - 1) & 3);
+#else
+			1 + P - bbits;
+#endif
+		b2 += i;
+		s2 += i;
+		mhi = i2b(1);
+		}
+	if (m2 > 0 && s2 > 0) {
+		i = m2 < s2 ? m2 : s2;
+		b2 -= i;
+		m2 -= i;
+		s2 -= i;
+		}
+	if (b5 > 0) {
+		if (leftright) {
+			if (m5 > 0) {
+				mhi = pow5mult(mhi, m5);
+				b1 = mult(mhi, b);
+				Bfree(b);
+				b = b1;
+				}
+			if ((j = b5 - m5))
+				b = pow5mult(b, j);
+			}
+		else
+			b = pow5mult(b, b5);
+		}
+	S = i2b(1);
+	if (s5 > 0)
+		S = pow5mult(S, s5);
+
+	/* Check for special case that d is a normalized power of 2. */
+
+	spec_case = 0;
+	if ((mode < 2 || leftright)
+#ifdef Honor_FLT_ROUNDS
+			&& rounding == 1
+#endif
+				) {
+		if (!word1(d) && !(word0(d) & Bndry_mask)
+#ifndef Sudden_Underflow
+		 && word0(d) & (Exp_mask & ~Exp_msk1)
+#endif
+				) {
+			/* The special case */
+			b2 += Log2P;
+			s2 += Log2P;
+			spec_case = 1;
+			}
+		}
+
+	/* Arrange for convenient computation of quotients:
+	 * shift left if necessary so divisor has 4 leading 0 bits.
+	 *
+	 * Perhaps we should just compute leading 28 bits of S once
+	 * and for all and pass them and a shift to quorem, so it
+	 * can do shifts and ors to compute the numerator for q.
+	 */
+#ifdef Pack_32
+	if ((i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0x1f))
+		i = 32 - i;
+#else
+	if ((i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0xf))
+		i = 16 - i;
+#endif
+	if (i > 4) {
+		i -= 4;
+		b2 += i;
+		m2 += i;
+		s2 += i;
+		}
+	else if (i < 4) {
+		i += 28;
+		b2 += i;
+		m2 += i;
+		s2 += i;
+		}
+	if (b2 > 0)
+		b = lshift(b, b2);
+	if (s2 > 0)
+		S = lshift(S, s2);
+	if (k_check) {
+		if (cmp(b,S) < 0) {
+			k--;
+			b = multadd(b, 10, 0);	/* we botched the k estimate */
+			if (leftright)
+				mhi = multadd(mhi, 10, 0);
+			ilim = ilim1;
+			}
+		}
+	if (ilim <= 0 && (mode == 3 || mode == 5)) {
+                S = multadd(S, 5, 0);
+		if (ilim < 0 || cmp(b, S) < 0 || ((cmp(b, S) == 0) && !bias_round_up)) {
+			/* no digits, fcvt style */
+ no_digits:
+			k = -1 - ndigits;
+			goto ret;
+			}
+ one_digit:
+		*s++ = '1';
+		k++;
+		goto ret;
+		}
+	if (leftright) {
+		if (m2 > 0)
+			mhi = lshift(mhi, m2);
+
+		/* Compute mlo -- check for special case
+		 * that d is a normalized power of 2.
+		 */
+
+		mlo = mhi;
+		if (spec_case) {
+			mhi = Balloc(mhi->k);
+			Bcopy(mhi, mlo);
+			mhi = lshift(mhi, Log2P);
+			}
+
+		for(i = 1;;i++) {
+			dig = quorem(b,S) + '0';
+			/* Do we yet have the shortest decimal string
+			 * that will round to d?
+			 */
+			j = cmp(b, mlo);
+			delta = diff(S, mhi);
+			j1 = delta->sign ? 1 : cmp(b, delta);
+			Bfree(delta);
+#ifndef ROUND_BIASED
+			if (j1 == 0 && mode != 1 && !(word1(d) & 1)
+#ifdef Honor_FLT_ROUNDS
+				&& rounding >= 1
+#endif
+								   ) {
+				if (dig == '9')
+					goto round_9_up;
+				if (j > 0)
+					dig++;
+#ifdef SET_INEXACT
+				else if (!b->x[0] && b->wds <= 1)
+					inexact = 0;
+#endif
+				*s++ = dig;
+				goto ret;
+				}
+#endif
+			if (j < 0 || (j == 0 && mode != 1
+#ifndef ROUND_BIASED
+							&& !(word1(d) & 1)
+#endif
+					)) {
+				if (!b->x[0] && b->wds <= 1) {
+#ifdef SET_INEXACT
+					inexact = 0;
+#endif
+					goto accept_dig;
+					}
+#ifdef Honor_FLT_ROUNDS
+				if (mode > 1)
+				 switch(rounding) {
+				  case 0: goto accept_dig;
+				  case 2: goto keep_dig;
+				  }
+#endif /*Honor_FLT_ROUNDS*/
+				if (j1 > 0) {
+					b = lshift(b, 1);
+					j1 = cmp(b, S);
+					if ((j1 > 0 || (j1 == 0 && ((dig & 1) || bias_round_up)))
+                                            && dig++ == '9')
+						goto round_9_up;
+					}
+ accept_dig:
+				*s++ = dig;
+				goto ret;
+				}
+			if (j1 > 0) {
+#ifdef Honor_FLT_ROUNDS
+				if (!rounding)
+					goto accept_dig;
+#endif
+				if (dig == '9') { /* possible if i == 1 */
+ round_9_up:
+					*s++ = '9';
+					goto roundoff;
+					}
+				*s++ = dig + 1;
+				goto ret;
+				}
+#ifdef Honor_FLT_ROUNDS
+ keep_dig:
+#endif
+			*s++ = dig;
+			if (i == ilim)
+				break;
+			b = multadd(b, 10, 0);
+			if (mlo == mhi)
+				mlo = mhi = multadd(mhi, 10, 0);
+			else {
+				mlo = multadd(mlo, 10, 0);
+				mhi = multadd(mhi, 10, 0);
+				}
+			}
+		}
+	else
+		for(i = 1;; i++) {
+			*s++ = dig = quorem(b,S) + '0';
+			if (!b->x[0] && b->wds <= 1) {
+#ifdef SET_INEXACT
+				inexact = 0;
+#endif
+				goto ret;
+				}
+			if (i >= ilim)
+				break;
+			b = multadd(b, 10, 0);
+			}
+
+	/* Round off last digit */
+
+#ifdef Honor_FLT_ROUNDS
+	switch(rounding) {
+	  case 0: goto trimzeros;
+	  case 2: goto roundoff;
+	  }
+#endif
+	b = lshift(b, 1);
+	j = cmp(b, S);
+	if (j > 0 || (j == 0 && ((dig & 1) || bias_round_up))) {
+ roundoff:
+		while(*--s == '9')
+			if (s == s0) {
+				k++;
+				*s++ = '1';
+				goto ret;
+				}
+		++*s++;
+		}
+	else {
+/* trimzeros:  (never used) */
+		while(*--s == '0');
+		s++;
+		}
+ ret:
+	Bfree(S);
+	if (mhi) {
+		if (mlo && mlo != mhi)
+			Bfree(mlo);
+		Bfree(mhi);
+		}
+ ret1:
+#ifdef SET_INEXACT
+	if (inexact) {
+		if (!oldinexact) {
+			word0(d) = Exp_1 + (70 << Exp_shift);
+			word1(d) = 0;
+			dval(d) += 1.;
+			}
+		}
+	else if (!oldinexact)
+		clear_inexact();
+#endif
+	Bfree(b);
+	*s = 0;
+	*decpt = k + 1;
+	if (rve)
+		*rve = s;
+	return s0;
+	}
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/third_party/valgrind/valgrind.h b/src/third_party/valgrind/valgrind.h
new file mode 100644
index 0000000..47f369b
--- /dev/null
+++ b/src/third_party/valgrind/valgrind.h
@@ -0,0 +1,3924 @@
+/* -*- c -*-
+   ----------------------------------------------------------------
+
+   Notice that the following BSD-style license applies to this one
+   file (valgrind.h) only.  The rest of Valgrind is licensed under the
+   terms of the GNU General Public License, version 2, unless
+   otherwise indicated.  See the COPYING file in the source
+   distribution for details.
+
+   ----------------------------------------------------------------
+
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2000-2007 Julian Seward.  All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. The origin of this software must not be misrepresented; you must 
+      not claim that you wrote the original software.  If you use this 
+      software in a product, an acknowledgment in the product 
+      documentation would be appreciated but is not required.
+
+   3. Altered source versions must be plainly marked as such, and must
+      not be misrepresented as being the original software.
+
+   4. The name of the author may not be used to endorse or promote 
+      products derived from this software without specific prior written 
+      permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   ----------------------------------------------------------------
+
+   Notice that the above BSD-style license applies to this one file
+   (valgrind.h) only.  The entire rest of Valgrind is licensed under
+   the terms of the GNU General Public License, version 2.  See the
+   COPYING file in the source distribution for details.
+
+   ---------------------------------------------------------------- 
+*/
+
+
+/* This file is for inclusion into client (your!) code.
+
+   You can use these macros to manipulate and query Valgrind's 
+   execution inside your own programs.
+
+   The resulting executables will still run without Valgrind, just a
+   little bit more slowly than they otherwise would, but otherwise
+   unchanged.  When not running on valgrind, each client request
+   consumes very few (eg. 7) instructions, so the resulting performance
+   loss is negligible unless you plan to execute client requests
+   millions of times per second.  Nevertheless, if that is still a
+   problem, you can compile with the NVALGRIND symbol defined (gcc
+   -DNVALGRIND) so that client requests are not even compiled in.  */
+
+#ifndef __VALGRIND_H
+#define __VALGRIND_H
+
+#include <stdarg.h>
+
+/* Nb: this file might be included in a file compiled with -ansi.  So
+   we can't use C++ style "//" comments nor the "asm" keyword (instead
+   use "__asm__"). */
+
+/* Derive some tags indicating what the target platform is.  Note
+   that in this file we're using the compiler's CPP symbols for
+   identifying architectures, which are different to the ones we use
+   within the rest of Valgrind.  Note, __powerpc__ is active for both
+   32 and 64-bit PPC, whereas __powerpc64__ is only active for the
+   latter (on Linux, that is). */
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#if !defined(_AIX) && defined(__i386__)
+#  define PLAT_x86_linux 1
+#elif !defined(_AIX) && defined(__x86_64__)
+#  define PLAT_amd64_linux 1
+#elif !defined(_AIX) && defined(__powerpc__) && !defined(__powerpc64__)
+#  define PLAT_ppc32_linux 1
+#elif !defined(_AIX) && defined(__powerpc__) && defined(__powerpc64__)
+#  define PLAT_ppc64_linux 1
+#elif defined(_AIX) && defined(__64BIT__)
+#  define PLAT_ppc64_aix5 1
+#elif defined(_AIX) && !defined(__64BIT__)
+#  define PLAT_ppc32_aix5 1
+#endif
+
+
+/* If we're not compiling for our target platform, don't generate
+   any inline asms.  */
+#if !defined(PLAT_x86_linux) && !defined(PLAT_amd64_linux) \
+    && !defined(PLAT_ppc32_linux) && !defined(PLAT_ppc64_linux) \
+    && !defined(PLAT_ppc32_aix5) && !defined(PLAT_ppc64_aix5)
+#  if !defined(NVALGRIND)
+#    define NVALGRIND 1
+#  endif
+#endif
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS.  There is nothing */
+/* in here of use to end-users -- skip to the next section.           */
+/* ------------------------------------------------------------------ */
+
+#if defined(NVALGRIND)
+
+/* Define NVALGRIND to completely remove the Valgrind magic sequence
+   from the compiled code (analogous to NDEBUG's effects on
+   assert()) */
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+   {                                                              \
+      (_zzq_rlval) = (_zzq_default);                              \
+   }
+
+#else  /* ! NVALGRIND */
+
+/* The following defines the magic code sequences which the JITter
+   spots and handles magically.  Don't look too closely at them as
+   they will rot your brain.
+
+   The assembly code sequences for all architectures is in this one
+   file.  This is because this file must be stand-alone, and we don't
+   want to have multiple files.
+
+   For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
+   value gets put in the return slot, so that everything works when
+   this is executed not under Valgrind.  Args are passed in a memory
+   block, and so there's no intrinsic limit to the number that could
+   be passed, but it's currently five.
+   
+   The macro args are: 
+      _zzq_rlval    result lvalue
+      _zzq_default  default value (result returned when running on real CPU)
+      _zzq_request  request code
+      _zzq_arg1..5  request params
+
+   The other two macros are used to support function wrapping, and are
+   a lot simpler.  VALGRIND_GET_NR_CONTEXT returns the value of the
+   guest's NRADDR pseudo-register and whatever other information is
+   needed to safely run the call original from the wrapper: on
+   ppc64-linux, the R2 value at the divert point is also needed.  This
+   information is abstracted into a user-visible type, OrigFn.
+
+   VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
+   guest, but guarantees that the branch instruction will not be
+   redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
+   branch-and-link-to-r11.  VALGRIND_CALL_NOREDIR is just text, not a
+   complete inline asm, since it needs to be combined with more magic
+   inline asm stuff to be useful.
+*/
+
+/* ------------------------- x86-linux ------------------------- */
+
+#if defined(PLAT_x86_linux)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "roll $3,  %%edi ; roll $13, %%edi\n\t"      \
+                     "roll $29, %%edi ; roll $19, %%edi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  { volatile unsigned int _zzq_args[6];                           \
+    volatile unsigned int _zzq_result;                            \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %EDX = client_request ( %EAX ) */         \
+                     "xchgl %%ebx,%%ebx"                          \
+                     : "=d" (_zzq_result)                         \
+                     : "a" (&_zzq_args[0]), "0" (_zzq_default)    \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned int __addr;                                 \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %EAX = guest_NRADDR */                    \
+                     "xchgl %%ecx,%%ecx"                          \
+                     : "=a" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_EAX                                 \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* call-noredir *%EAX */                     \
+                     "xchgl %%edx,%%edx\n\t"
+#endif /* PLAT_x86_linux */
+
+/* ------------------------ amd64-linux ------------------------ */
+
+#if defined(PLAT_amd64_linux)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rolq $3,  %%rdi ; rolq $13, %%rdi\n\t"      \
+                     "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  { volatile unsigned long long int _zzq_args[6];                 \
+    volatile unsigned long long int _zzq_result;                  \
+    _zzq_args[0] = (unsigned long long int)(_zzq_request);        \
+    _zzq_args[1] = (unsigned long long int)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned long long int)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned long long int)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned long long int)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned long long int)(_zzq_arg5);           \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %RDX = client_request ( %RAX ) */         \
+                     "xchgq %%rbx,%%rbx"                          \
+                     : "=d" (_zzq_result)                         \
+                     : "a" (&_zzq_args[0]), "0" (_zzq_default)    \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned long long int __addr;                       \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %RAX = guest_NRADDR */                    \
+                     "xchgq %%rcx,%%rcx"                          \
+                     : "=a" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_RAX                                 \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* call-noredir *%RAX */                     \
+                     "xchgq %%rdx,%%rdx\n\t"
+#endif /* PLAT_amd64_linux */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rlwinm 0,0,3,0,0  ; rlwinm 0,0,13,0,0\n\t"  \
+                     "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned int  _zzq_args[6];                          \
+             unsigned int  _zzq_result;                           \
+             unsigned int* _zzq_ptr;                              \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 3,%1\n\t" /*default*/                    \
+                     "mr 4,%2\n\t" /*ptr*/                        \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"     /*result*/                     \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_default), "b" (_zzq_ptr)         \
+                     : "cc", "memory", "r3", "r4");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    unsigned int __addr;                                          \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory", "r3"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+      unsigned long long int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rotldi 0,0,3  ; rotldi 0,0,13\n\t"          \
+                     "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned long long int  _zzq_args[6];                \
+    register unsigned long long int  _zzq_result __asm__("r3");   \
+    register unsigned long long int* _zzq_ptr __asm__("r4");      \
+    _zzq_args[0] = (unsigned long long int)(_zzq_request);        \
+    _zzq_args[1] = (unsigned long long int)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned long long int)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned long long int)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned long long int)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned long long int)(_zzq_arg5);           \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1"                                   \
+                     : "=r" (_zzq_result)                         \
+                     : "0" (_zzq_default), "r" (_zzq_ptr)         \
+                     : "cc", "memory");                           \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned long long int __addr __asm__("r3");         \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2"                                   \
+                     : "=r" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4"                                   \
+                     : "=r" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+      unsigned int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rlwinm 0,0,3,0,0  ; rlwinm 0,0,13,0,0\n\t"  \
+                     "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned int  _zzq_args[7];                          \
+    register unsigned int  _zzq_result;                           \
+    register unsigned int* _zzq_ptr;                              \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    _zzq_args[6] = (unsigned int)(_zzq_default);                  \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 4,%1\n\t"                                \
+                     "lwz 3, 24(4)\n\t"                           \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_ptr)                             \
+                     : "r3", "r4", "cc", "memory");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned int __addr;                                 \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+      unsigned long long int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rotldi 0,0,3  ; rotldi 0,0,13\n\t"          \
+                     "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned long long int  _zzq_args[7];                \
+    register unsigned long long int  _zzq_result;                 \
+    register unsigned long long int* _zzq_ptr;                    \
+    _zzq_args[0] = (unsigned int long long)(_zzq_request);        \
+    _zzq_args[1] = (unsigned int long long)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned int long long)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned int long long)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned int long long)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned int long long)(_zzq_arg5);           \
+    _zzq_args[6] = (unsigned int long long)(_zzq_default);        \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 4,%1\n\t"                                \
+                     "ld 3, 48(4)\n\t"                            \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_ptr)                             \
+                     : "r3", "r4", "cc", "memory");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned long long int __addr;                       \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_aix5 */
+
+/* Insert assembly code for other platforms here... */
+
+#endif /* NVALGRIND */
+
+
+/* ------------------------------------------------------------------ */
+/* PLATFORM SPECIFICS for FUNCTION WRAPPING.  This is all very        */
+/* ugly.  It's the least-worst tradeoff I can think of.               */
+/* ------------------------------------------------------------------ */
+
+/* This section defines magic (a.k.a appalling-hack) macros for doing
+   guaranteed-no-redirection macros, so as to get from function
+   wrappers to the functions they are wrapping.  The whole point is to
+   construct standard call sequences, but to do the call itself with a
+   special no-redirect call pseudo-instruction that the JIT
+   understands and handles specially.  This section is long and
+   repetitious, and I can't see a way to make it shorter.
+
+   The naming scheme is as follows:
+
+      CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
+
+   'W' stands for "word" and 'v' for "void".  Hence there are
+   different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
+   and for each, the possibility of returning a word-typed result, or
+   no result.
+*/
+
+/* Use these to write the name of your wrapper.  NOTE: duplicates
+   VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+
+#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname)                    \
+   _vgwZU_##soname##_##fnname
+
+#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname)                    \
+   _vgwZZ_##soname##_##fnname
+
+/* Use this macro from within a wrapper function to collect the
+   context (address and possibly other info) of the original function.
+   Once you have that you can then use it in one of the CALL_FN_
+   macros.  The type of the argument _lval is OrigFn. */
+#define VALGRIND_GET_ORIG_FN(_lval)  VALGRIND_GET_NR_CONTEXT(_lval)
+
+/* Derivatives of the main macros below, for calling functions
+   returning void. */
+
+#define CALL_FN_v_v(fnptr)                                        \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_v(_junk,fnptr); } while (0)
+
+#define CALL_FN_v_W(fnptr, arg1)                                  \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
+
+#define CALL_FN_v_WW(fnptr, arg1,arg2)                            \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
+
+#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3)                      \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
+
+/* ------------------------- x86-linux ------------------------- */
+
+#if defined(PLAT_x86_linux)
+
+/* These regs are trashed by the hidden call.  No need to mention eax
+   as gcc can already see that, plus causes gcc to bomb. */
+#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+
+/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
+   long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      __asm__ volatile(                                           \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $4, %%esp\n"                                       \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      __asm__ volatile(                                           \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $8, %%esp\n"                                       \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      __asm__ volatile(                                           \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $12, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      __asm__ volatile(                                           \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      __asm__ volatile(                                           \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $20, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      __asm__ volatile(                                           \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $24, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      __asm__ volatile(                                           \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $28, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      __asm__ volatile(                                           \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      __asm__ volatile(                                           \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $36, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      __asm__ volatile(                                           \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $40, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11)                          \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      __asm__ volatile(                                           \
+         "pushl 44(%%eax)\n\t"                                    \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $44, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11,arg12)                    \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
+      __asm__ volatile(                                           \
+         "pushl 48(%%eax)\n\t"                                    \
+         "pushl 44(%%eax)\n\t"                                    \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_x86_linux */
+
+/* ------------------------ amd64-linux ------------------------ */
+
+#if defined(PLAT_amd64_linux)
+
+/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi",       \
+                            "rdi", "r8", "r9", "r10", "r11"
+
+/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
+   long) == 8. */
+
+/* NB 9 Sept 07.  There is a nasty kludge here in all these CALL_FN_
+   macros.  In order not to trash the stack redzone, we need to drop
+   %rsp by 128 before the hidden call, and restore afterwards.  The
+   nastyness is that it is only by luck that the stack still appears
+   to be unwindable during the hidden call - since then the behaviour
+   of any routine using this macro does not match what the CFI data
+   says.  Sigh.
+
+   Why is this important?  Imagine that a wrapper has a stack
+   allocated local, and passes to the hidden call, a pointer to it.
+   Because gcc does not know about the hidden call, it may allocate
+   that local in the redzone.  Unfortunately the hidden call may then
+   trash it before it comes to use it.  So we must step clear of the
+   redzone, for the duration of the hidden call, to make it safe.
+
+   Probably the same problem afflicts the other redzone-style ABIs too
+   (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+   self describing (none of this CFI nonsense) so at least messing
+   with the stack pointer doesn't give a danger of non-unwindable
+   stack. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $8, %%rsp\n"                                       \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $16, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $24, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $32, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 88(%%rax)\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $40, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
+      __asm__ volatile(                                           \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 96(%%rax)\n\t"                                    \
+         "pushq 88(%%rax)\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $48, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_amd64_linux */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+/* This is useful for finding out about the on-stack stuff:
+
+   extern int f9  ( int,int,int,int,int,int,int,int,int );
+   extern int f10 ( int,int,int,int,int,int,int,int,int,int );
+   extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
+   extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
+
+   int g9 ( void ) {
+      return f9(11,22,33,44,55,66,77,88,99);
+   }
+   int g10 ( void ) {
+      return f10(11,22,33,44,55,66,77,88,99,110);
+   }
+   int g11 ( void ) {
+      return f11(11,22,33,44,55,66,77,88,99,110,121);
+   }
+   int g12 ( void ) {
+      return f12(11,22,33,44,55,66,77,88,99,110,121,132);
+   }
+*/
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc32-linux, 
+   sizeof(unsigned long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-16\n\t"                                       \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,16\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-16\n\t"                                       \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,16\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      _argvec[11] = (unsigned long)arg11;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-32\n\t"                                       \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,16(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,32\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      _argvec[11] = (unsigned long)arg11;                         \
+      _argvec[12] = (unsigned long)arg12;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-32\n\t"                                       \
+         /* arg12 */                                              \
+         "lwz 3,48(11)\n\t"                                       \
+         "stw 3,20(1)\n\t"                                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,16(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,32\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
+   long) == 8. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-128\n\t"  /* expand stack frame */            \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,128"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-128\n\t"  /* expand stack frame */            \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,128"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-144\n\t"  /* expand stack frame */            \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,144"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-144\n\t"  /* expand stack frame */            \
+         /* arg12 */                                              \
+         "ld  3,96(11)\n\t"                                       \
+         "std 3,136(1)\n\t"                                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,144"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+   still works.  Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr)                      \
+         "addi 1,1,-" #_n_fr "\n\t"                               \
+         "lwz  3," #_n_fr "(1)\n\t"                               \
+         "stw  3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr)                               \
+         "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+   long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t" /* arg2->r4 */                       \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(64)                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(64)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(64)                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(64)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(72)                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,64(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(72)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(72)                        \
+         /* arg12 */                                              \
+         "lwz 3,48(11)\n\t"                                       \
+         "stw 3,68(1)\n\t"                                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,64(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(72)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+   still works.  Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr)                      \
+         "addi 1,1,-" #_n_fr "\n\t"                               \
+         "ld   3," #_n_fr "(1)\n\t"                               \
+         "std  3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr)                               \
+         "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
+   long) == 8. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(128)                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(128)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(128)                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(128)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(144)                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(144)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(144)                       \
+         /* arg12 */                                              \
+         "ld  3,96(11)\n\t"                                       \
+         "std 3,136(1)\n\t"                                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(144)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc64_aix5 */
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS.               */
+/*                                                                    */
+/* ------------------------------------------------------------------ */
+
+/* Some request codes.  There are many more of these, but most are not
+   exposed to end-user view.  These are the public ones, all of the
+   form 0x1000 + small_number.
+
+   Core ones are in the range 0x00000000--0x0000ffff.  The non-public
+   ones start at 0x2000.
+*/
+
+/* These macros are used by tools -- they must be public, but don't
+   embed them into other programs. */
+#define VG_USERREQ_TOOL_BASE(a,b) \
+   ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
+#define VG_IS_TOOL_USERREQ(a, b, v) \
+   (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! 
+   This enum comprises an ABI exported by Valgrind to programs
+   which use client requests.  DO NOT CHANGE THE ORDER OF THESE
+   ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+   enum { VG_USERREQ__RUNNING_ON_VALGRIND  = 0x1001,
+          VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
+
+          /* These allow any function to be called from the simulated
+             CPU but run on the real CPU.  Nb: the first arg passed to
+             the function is always the ThreadId of the running
+             thread!  So CLIENT_CALL0 actually requires a 1 arg
+             function, etc. */
+          VG_USERREQ__CLIENT_CALL0 = 0x1101,
+          VG_USERREQ__CLIENT_CALL1 = 0x1102,
+          VG_USERREQ__CLIENT_CALL2 = 0x1103,
+          VG_USERREQ__CLIENT_CALL3 = 0x1104,
+
+          /* Can be useful in regression testing suites -- eg. can
+             send Valgrind's output to /dev/null and still count
+             errors. */
+          VG_USERREQ__COUNT_ERRORS = 0x1201,
+
+          /* These are useful and can be interpreted by any tool that
+             tracks malloc() et al, by using vg_replace_malloc.c. */
+          VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+          VG_USERREQ__FREELIKE_BLOCK   = 0x1302,
+          /* Memory pool support. */
+          VG_USERREQ__CREATE_MEMPOOL   = 0x1303,
+          VG_USERREQ__DESTROY_MEMPOOL  = 0x1304,
+          VG_USERREQ__MEMPOOL_ALLOC    = 0x1305,
+          VG_USERREQ__MEMPOOL_FREE     = 0x1306,
+          VG_USERREQ__MEMPOOL_TRIM     = 0x1307,
+          VG_USERREQ__MOVE_MEMPOOL     = 0x1308,
+          VG_USERREQ__MEMPOOL_CHANGE   = 0x1309,
+          VG_USERREQ__MEMPOOL_EXISTS   = 0x130a,
+
+          /* Allow printfs to valgrind log. */
+          VG_USERREQ__PRINTF           = 0x1401,
+          VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+
+          /* Stack support. */
+          VG_USERREQ__STACK_REGISTER   = 0x1501,
+          VG_USERREQ__STACK_DEREGISTER = 0x1502,
+          VG_USERREQ__STACK_CHANGE     = 0x1503
+   } Vg_ClientRequest;
+
+#if !defined(__GNUC__)
+#  define __extension__ /* */
+#endif
+
+/* Returns the number of Valgrinds this code is running under.  That
+   is, 0 if running natively, 1 if running under Valgrind, 2 if
+   running under Valgrind which is running under another Valgrind,
+   etc. */
+#define RUNNING_ON_VALGRIND  __extension__                        \
+   ({unsigned int _qzz_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */,          \
+                               VG_USERREQ__RUNNING_ON_VALGRIND,   \
+                               0, 0, 0, 0, 0);                    \
+    _qzz_res;                                                     \
+   })
+
+
+/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
+   _qzz_len - 1].  Useful if you are debugging a JITter or some such,
+   since it provides a way to make sure valgrind will retranslate the
+   invalidated area.  Returns no value. */
+#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len)         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__DISCARD_TRANSLATIONS,  \
+                               _qzz_addr, _qzz_len, 0, 0, 0);     \
+   }
+
+
+/* These requests are for getting Valgrind itself to print something.
+   Possibly with a backtrace.  This is a really ugly hack. */
+
+#if defined(NVALGRIND)
+
+#  define VALGRIND_PRINTF(...)
+#  define VALGRIND_PRINTF_BACKTRACE(...)
+
+#else /* NVALGRIND */
+
+/* Modern GCC will optimize the static routine out if unused,
+   and unused attribute will shut down warnings about it.  */
+static int VALGRIND_PRINTF(const char *format, ...)
+   __attribute__((format(__printf__, 1, 2), __unused__));
+static int
+VALGRIND_PRINTF(const char *format, ...)
+{
+   unsigned long _qzz_res;
+   va_list vargs;
+   va_start(vargs, format);
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
+                              (unsigned long)format, (unsigned long)vargs, 
+                              0, 0, 0);
+   va_end(vargs);
+   return (int)_qzz_res;
+}
+
+static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+   __attribute__((format(__printf__, 1, 2), __unused__));
+static int
+VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+{
+   unsigned long _qzz_res;
+   va_list vargs;
+   va_start(vargs, format);
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
+                              (unsigned long)format, (unsigned long)vargs, 
+                              0, 0, 0);
+   va_end(vargs);
+   return (int)_qzz_res;
+}
+
+#endif /* NVALGRIND */
+
+
+/* These requests allow control to move from the simulated CPU to the
+   real CPU, calling an arbitary function.
+   
+   Note that the current ThreadId is inserted as the first argument.
+   So this call:
+
+     VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
+
+   requires f to have this signature:
+
+     Word f(Word tid, Word arg1, Word arg2)
+
+   where "Word" is a word-sized type.
+
+   Note that these client requests are not entirely reliable.  For example,
+   if you call a function with them that subsequently calls printf(),
+   there's a high chance Valgrind will crash.  Generally, your prospects of
+   these working are made higher if the called function does not refer to
+   any global variables, and does not refer to any libc or other functions
+   (printf et al).  Any kind of entanglement with libc or dynamic linking is
+   likely to have a bad outcome, for tricky reasons which we've grappled
+   with a lot in the past.
+*/
+#define VALGRIND_NON_SIMD_CALL0(_qyy_fn)                          \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL0,          \
+                               _qyy_fn,                           \
+                               0, 0, 0, 0);                       \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1)               \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL1,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, 0, 0, 0);               \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2)    \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL2,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, _qyy_arg2, 0, 0);       \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL3,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, _qyy_arg2,              \
+                               _qyy_arg3, 0);                     \
+    _qyy_res;                                                     \
+   })
+
+
+/* Counts the number of errors that have been recorded by a tool.  Nb:
+   the tool must record the errors with VG_(maybe_record_error)() or
+   VG_(unique_error)() for them to be counted. */
+#define VALGRIND_COUNT_ERRORS                                     \
+   __extension__                                                  \
+   ({unsigned int _qyy_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__COUNT_ERRORS,          \
+                               0, 0, 0, 0, 0);                    \
+    _qyy_res;                                                     \
+   })
+
+/* Mark a block of memory as having been allocated by a malloc()-like
+   function.  `addr' is the start of the usable block (ie. after any
+   redzone) `rzB' is redzone size if the allocator can apply redzones;
+   use '0' if not.  Adding redzones makes it more likely Valgrind will spot
+   block overruns.  `is_zeroed' indicates if the memory is zeroed, as it is
+   for calloc().  Put it immediately after the point where a block is
+   allocated. 
+   
+   If you're using Memcheck: If you're allocating memory via superblocks,
+   and then handing out small chunks of each superblock, if you don't have
+   redzones on your small blocks, it's worth marking the superblock with
+   VALGRIND_MAKE_MEM_NOACCESS when it's created, so that block overruns are
+   detected.  But if you can put redzones on, it's probably better to not do
+   this, so that messages for small overruns are described in terms of the
+   small block rather than the superblock (but if you have a big overrun
+   that skips over a redzone, you could miss an error this way).  See
+   memcheck/tests/custom_alloc.c for an example.
+
+   WARNING: if your allocator uses malloc() or 'new' to allocate
+   superblocks, rather than mmap() or brk(), this will not work properly --
+   you'll likely get assertion failures during leak detection.  This is
+   because Valgrind doesn't like seeing overlapping heap blocks.  Sorry.
+
+   Nb: block must be freed via a free()-like function specified
+   with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)    \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MALLOCLIKE_BLOCK,      \
+                               addr, sizeB, rzB, is_zeroed, 0);   \
+   }
+
+/* Mark a block of memory as having been freed by a free()-like function.
+   `rzB' is redzone size;  it must match that given to
+   VALGRIND_MALLOCLIKE_BLOCK.  Memory not freed will be detected by the leak
+   checker.  Put it immediately after the point where the block is freed. */
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB)                        \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__FREELIKE_BLOCK,        \
+                               addr, rzB, 0, 0, 0);               \
+   }
+
+/* Create a memory pool. */
+#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed)             \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__CREATE_MEMPOOL,        \
+                               pool, rzB, is_zeroed, 0, 0);       \
+   }
+
+/* Destroy a memory pool. */
+#define VALGRIND_DESTROY_MEMPOOL(pool)                            \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__DESTROY_MEMPOOL,       \
+                               pool, 0, 0, 0, 0);                 \
+   }
+
+/* Associate a piece of memory with a memory pool. */
+#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size)                  \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_ALLOC,         \
+                               pool, addr, size, 0, 0);           \
+   }
+
+/* Disassociate a piece of memory from a memory pool. */
+#define VALGRIND_MEMPOOL_FREE(pool, addr)                         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_FREE,          \
+                               pool, addr, 0, 0, 0);              \
+   }
+
+/* Disassociate any pieces outside a particular range. */
+#define VALGRIND_MEMPOOL_TRIM(pool, addr, size)                   \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_TRIM,          \
+                               pool, addr, size, 0, 0);           \
+   }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MOVE_MEMPOOL(poolA, poolB)                       \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MOVE_MEMPOOL,          \
+                               poolA, poolB, 0, 0, 0);            \
+   }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size)         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_CHANGE,        \
+                               pool, addrA, addrB, size, 0);      \
+   }
+
+/* Return 1 if a mempool exists, else 0. */
+#define VALGRIND_MEMPOOL_EXISTS(pool)                             \
+   ({unsigned int _qzz_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_EXISTS,        \
+                               pool, 0, 0, 0, 0);                 \
+    _qzz_res;                                                     \
+   })
+
+/* Mark a piece of memory as being a stack. Returns a stack id. */
+#define VALGRIND_STACK_REGISTER(start, end)                       \
+   ({unsigned int _qzz_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_REGISTER,        \
+                               start, end, 0, 0, 0);              \
+    _qzz_res;                                                     \
+   })
+
+/* Unmark the piece of memory associated with a stack id as being a
+   stack. */
+#define VALGRIND_STACK_DEREGISTER(id)                             \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_DEREGISTER,      \
+                               id, 0, 0, 0, 0);                   \
+   }
+
+/* Change the start and end address of the stack id. */
+#define VALGRIND_STACK_CHANGE(id, start, end)                     \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_CHANGE,          \
+                               id, start, end, 0, 0);             \
+   }
+
+
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#endif   /* __VALGRIND_H */
diff --git a/src/token.cc b/src/token.cc
new file mode 100644
index 0000000..bb42cea
--- /dev/null
+++ b/src/token.cc
@@ -0,0 +1,163 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "token.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef DEBUG
+#define T(name, string, precedence) #name,
+const char* Token::name_[NUM_TOKENS] = {
+  TOKEN_LIST(T, T, IGNORE_TOKEN)
+};
+#undef T
+#endif
+
+
+#define T(name, string, precedence) string,
+const char* Token::string_[NUM_TOKENS] = {
+  TOKEN_LIST(T, T, IGNORE_TOKEN)
+};
+#undef T
+
+
+#define T(name, string, precedence) precedence,
+int8_t Token::precedence_[NUM_TOKENS] = {
+  TOKEN_LIST(T, T, IGNORE_TOKEN)
+};
+#undef T
+
+
+// A perfect (0 collision) hash table of keyword token values.
+
+// larger N will reduce the number of collisions (power of 2 for fast %)
+const unsigned int N = 128;
+// make this small since we have <= 256 tokens
+static uint8_t Hashtable[N];
+static bool IsInitialized = false;
+
+
+static unsigned int Hash(const char* s) {
+  // The following constants have been found using trial-and-error. If the
+  // keyword set changes, they may have to be recomputed (make them flags
+  // and play with the flag values). Increasing N is the simplest way to
+  // reduce the number of collisions.
+
+  // we must use at least 4 or more chars ('const' and 'continue' share
+  // 'con')
+  const unsigned int L = 5;
+  // smaller S tend to reduce the number of collisions
+  const unsigned int S = 4;
+  // make this a prime, or at least an odd number
+  const unsigned int M = 3;
+
+  unsigned int h = 0;
+  for (unsigned int i = 0; s[i] != '\0' && i < L; i++) {
+    h += (h << S) + s[i];
+  }
+  // unsigned int % by a power of 2 (otherwise this will not be a bit mask)
+  return h * M % N;
+}
+
+
+Token::Value Token::Lookup(const char* str) {
+  ASSERT(IsInitialized);
+  Value k = static_cast<Value>(Hashtable[Hash(str)]);
+  const char* s = string_[k];
+  ASSERT(s != NULL || k == IDENTIFIER);
+  if (s == NULL || strcmp(s, str) == 0) {
+    return k;
+  }
+  return IDENTIFIER;
+}
+
+
+#ifdef DEBUG
+// We need this function because C++ doesn't allow the expression
+// NULL == NULL, which is a result of macro expansion below. What
+// the hell?
+static bool IsNull(const char* s) {
+  return s == NULL;
+}
+#endif
+
+
+void Token::Initialize() {
+  if (IsInitialized) return;
+
+  // A list of all keywords, terminated by ILLEGAL.
+#define T(name, string, precedence) name,
+  static Value keyword[] = {
+    TOKEN_LIST(IGNORE_TOKEN, T, IGNORE_TOKEN)
+    ILLEGAL
+  };
+#undef T
+
+  // Assert that the keyword array contains the 25 keywords, 3 future
+  // reserved words (const, debugger, and native), and the 3 named literals
+  // defined by ECMA-262 standard.
+  ASSERT(ARRAY_SIZE(keyword) == 25 + 3 + 3 + 1);  // +1 for ILLEGAL sentinel
+
+  // Initialize Hashtable.
+  ASSERT(NUM_TOKENS <= 256);  // Hashtable contains uint8_t elements
+  for (unsigned int i = 0; i < N; i++) {
+    Hashtable[i] = IDENTIFIER;
+  }
+
+  // Insert all keywords into Hashtable.
+  int collisions = 0;
+  for (int i = 0; keyword[i] != ILLEGAL; i++) {
+    Value k = keyword[i];
+    unsigned int h = Hash(string_[k]);
+    if (Hashtable[h] != IDENTIFIER) collisions++;
+    Hashtable[h] = k;
+  }
+
+  if (collisions > 0) {
+    PrintF("%d collisions in keyword hashtable\n", collisions);
+    FATAL("Fix keyword lookup!");
+  }
+
+  IsInitialized = true;
+
+  // Verify hash table.
+#define T(name, string, precedence) \
+  ASSERT(IsNull(string) || Lookup(string) == IDENTIFIER);
+
+#define K(name, string, precedence) \
+  ASSERT(Lookup(string) == name);
+
+  TOKEN_LIST(T, K, IGNORE_TOKEN)
+
+#undef K
+#undef T
+}
+
+} }  // namespace v8::internal
diff --git a/src/token.h b/src/token.h
new file mode 100644
index 0000000..4d4df63
--- /dev/null
+++ b/src/token.h
@@ -0,0 +1,282 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TOKEN_H_
+#define V8_TOKEN_H_
+
+namespace v8 {
+namespace internal {
+
+// TOKEN_LIST takes a list of 3 macros M, all of which satisfy the
+// same signature M(name, string, precedence), where name is the
+// symbolic token name, string is the corresponding syntactic symbol
+// (or NULL, for literals), and precedence is the precedence (or 0).
+// The parameters are invoked for token categories as follows:
+//
+//   T: Non-keyword tokens
+//   K: Keyword tokens
+//   F: Future (reserved) keyword tokens
+
+// IGNORE_TOKEN is a convenience macro that can be supplied as
+// an argument (at any position) for a TOKEN_LIST call. It does
+// nothing with tokens belonging to the respective category.
+
+#define IGNORE_TOKEN(name, string, precedence)
+
+#define TOKEN_LIST(T, K, F)                                             \
+  /* End of source indicator. */                                        \
+  T(EOS, "EOS", 0)                                                      \
+                                                                        \
+  /* Punctuators (ECMA-262, section 7.7, page 15). */                   \
+  T(LPAREN, "(", 0)                                                     \
+  T(RPAREN, ")", 0)                                                     \
+  T(LBRACK, "[", 0)                                                     \
+  T(RBRACK, "]", 0)                                                     \
+  T(LBRACE, "{", 0)                                                     \
+  T(RBRACE, "}", 0)                                                     \
+  T(COLON, ":", 0)                                                      \
+  T(SEMICOLON, ";", 0)                                                  \
+  T(PERIOD, ".", 0)                                                     \
+  T(CONDITIONAL, "?", 3)                                                \
+  T(INC, "++", 0)                                                       \
+  T(DEC, "--", 0)                                                       \
+                                                                        \
+  /* Assignment operators. */                                           \
+  /* IsAssignmentOp() relies on this block of enum values */            \
+  /* being contiguous and sorted in the same order! */                  \
+  T(INIT_VAR, "=init_var", 2)  /* AST-use only. */                      \
+  T(INIT_CONST, "=init_const", 2)  /* AST-use only. */                  \
+  T(ASSIGN, "=", 2)                                                     \
+  T(ASSIGN_BIT_OR, "|=", 2)                                             \
+  T(ASSIGN_BIT_XOR, "^=", 2)                                            \
+  T(ASSIGN_BIT_AND, "&=", 2)                                            \
+  T(ASSIGN_SHL, "<<=", 2)                                               \
+  T(ASSIGN_SAR, ">>=", 2)                                               \
+  T(ASSIGN_SHR, ">>>=", 2)                                              \
+  T(ASSIGN_ADD, "+=", 2)                                                \
+  T(ASSIGN_SUB, "-=", 2)                                                \
+  T(ASSIGN_MUL, "*=", 2)                                                \
+  T(ASSIGN_DIV, "/=", 2)                                                \
+  T(ASSIGN_MOD, "%=", 2)                                                \
+                                                                        \
+  /* Binary operators sorted by precedence. */                          \
+  /* IsBinaryOp() relies on this block of enum values */                \
+  /* being contiguous and sorted in the same order! */                  \
+  T(COMMA, ",", 1)                                                      \
+  T(OR, "||", 4)                                                        \
+  T(AND, "&&", 5)                                                       \
+  T(BIT_OR, "|", 6)                                                     \
+  T(BIT_XOR, "^", 7)                                                    \
+  T(BIT_AND, "&", 8)                                                    \
+  T(SHL, "<<", 11)                                                      \
+  T(SAR, ">>", 11)                                                      \
+  T(SHR, ">>>", 11)                                                     \
+  T(ADD, "+", 12)                                                       \
+  T(SUB, "-", 12)                                                       \
+  T(MUL, "*", 13)                                                       \
+  T(DIV, "/", 13)                                                       \
+  T(MOD, "%", 13)                                                       \
+                                                                        \
+  /* Compare operators sorted by precedence. */                         \
+  /* IsCompareOp() relies on this block of enum values */               \
+  /* being contiguous and sorted in the same order! */                  \
+  T(EQ, "==", 9)                                                        \
+  T(NE, "!=", 9)                                                        \
+  T(EQ_STRICT, "===", 9)                                                \
+  T(NE_STRICT, "!==", 9)                                                \
+  T(LT, "<", 10)                                                        \
+  T(GT, ">", 10)                                                        \
+  T(LTE, "<=", 10)                                                      \
+  T(GTE, ">=", 10)                                                      \
+  K(INSTANCEOF, "instanceof", 10)                                       \
+  K(IN, "in", 10)                                                       \
+                                                                        \
+  /* Unary operators. */                                                \
+  /* IsUnaryOp() relies on this block of enum values */                 \
+  /* being contiguous and sorted in the same order! */                  \
+  T(NOT, "!", 0)                                                        \
+  T(BIT_NOT, "~", 0)                                                    \
+  K(DELETE, "delete", 0)                                                \
+  K(TYPEOF, "typeof", 0)                                                \
+  K(VOID, "void", 0)                                                    \
+                                                                        \
+  /* Keywords (ECMA-262, section 7.5.2, page 13). */                    \
+  K(BREAK, "break", 0)                                                  \
+  K(CASE, "case", 0)                                                    \
+  K(CATCH, "catch", 0)                                                  \
+  K(CONTINUE, "continue", 0)                                            \
+  K(DEBUGGER, "debugger", 0)                                            \
+  K(DEFAULT, "default", 0)                                              \
+  /* DELETE */                                                          \
+  K(DO, "do", 0)                                                        \
+  K(ELSE, "else", 0)                                                    \
+  K(FINALLY, "finally", 0)                                              \
+  K(FOR, "for", 0)                                                      \
+  K(FUNCTION, "function", 0)                                            \
+  K(IF, "if", 0)                                                        \
+  /* IN */                                                              \
+  /* INSTANCEOF */                                                      \
+  K(NEW, "new", 0)                                                      \
+  K(RETURN, "return", 0)                                                \
+  K(SWITCH, "switch", 0)                                                \
+  K(THIS, "this", 0)                                                    \
+  K(THROW, "throw", 0)                                                  \
+  K(TRY, "try", 0)                                                      \
+  /* TYPEOF */                                                          \
+  K(VAR, "var", 0)                                                      \
+  /* VOID */                                                            \
+  K(WHILE, "while", 0)                                                  \
+  K(WITH, "with", 0)                                                    \
+                                                                        \
+  /* Future reserved words (ECMA-262, section 7.5.3, page 14). */       \
+  F(ABSTRACT, "abstract", 0)                                            \
+  F(BOOLEAN, "boolean", 0)                                              \
+  F(BYTE, "byte", 0)                                                    \
+  F(CHAR, "char", 0)                                                    \
+  F(CLASS, "class", 0)                                                  \
+  K(CONST, "const", 0)                                                  \
+  F(DOUBLE, "double", 0)                                                \
+  F(ENUM, "enum", 0)                                                    \
+  F(EXPORT, "export", 0)                                                \
+  F(EXTENDS, "extends", 0)                                              \
+  F(FINAL, "final", 0)                                                  \
+  F(FLOAT, "float", 0)                                                  \
+  F(GOTO, "goto", 0)                                                    \
+  F(IMPLEMENTS, "implements", 0)                                        \
+  F(IMPORT, "import", 0)                                                \
+  F(INT, "int", 0)                                                      \
+  F(INTERFACE, "interface", 0)                                          \
+  F(LONG, "long", 0)                                                    \
+  K(NATIVE, "native", 0)                                                \
+  F(PACKAGE, "package", 0)                                              \
+  F(PRIVATE, "private", 0)                                              \
+  F(PROTECTED, "protected", 0)                                          \
+  F(PUBLIC, "public", 0)                                                \
+  F(SHORT, "short", 0)                                                  \
+  F(STATIC, "static", 0)                                                \
+  F(SUPER, "super", 0)                                                  \
+  F(SYNCHRONIZED, "synchronized", 0)                                    \
+  F(THROWS, "throws", 0)                                                \
+  F(TRANSIENT, "transient", 0)                                          \
+  F(VOLATILE, "volatile", 0)                                            \
+                                                                        \
+  /* Literals (ECMA-262, section 7.8, page 16). */                      \
+  K(NULL_LITERAL, "null", 0)                                            \
+  K(TRUE_LITERAL, "true", 0)                                            \
+  K(FALSE_LITERAL, "false", 0)                                          \
+  T(NUMBER, NULL, 0)                                                    \
+  T(STRING, NULL, 0)                                                    \
+                                                                        \
+  /* Identifiers (not keywords or future reserved words). */            \
+  T(IDENTIFIER, NULL, 0)                                                \
+                                                                        \
+  /* Illegal token - not able to scan. */                               \
+  T(ILLEGAL, "ILLEGAL", 0)                                              \
+                                                                        \
+  /* Scanner-internal use only. */                                      \
+  T(WHITESPACE, NULL, 0)
+
+
+class Token {
+ public:
+  // All token values.
+#define T(name, string, precedence) name,
+  enum Value {
+    TOKEN_LIST(T, T, IGNORE_TOKEN)
+    NUM_TOKENS
+  };
+#undef T
+
+#ifdef DEBUG
+  // Returns a string corresponding to the C++ token name
+  // (e.g. "LT" for the token LT).
+  static const char* Name(Value tok) {
+    ASSERT(0 <= tok && tok < NUM_TOKENS);
+    return name_[tok];
+  }
+#endif
+
+  // Predicates
+  static bool IsAssignmentOp(Value tok) {
+    return INIT_VAR <= tok && tok <= ASSIGN_MOD;
+  }
+
+  static bool IsBinaryOp(Value op) {
+    return COMMA <= op && op <= MOD;
+  }
+
+  static bool IsCompareOp(Value op) {
+    return EQ <= op && op <= IN;
+  }
+
+  static bool IsBitOp(Value op) {
+    return (BIT_OR <= op && op <= SHR) || op == BIT_NOT;
+  }
+
+  static bool IsUnaryOp(Value op) {
+    return (NOT <= op && op <= VOID) || op == ADD || op == SUB;
+  }
+
+  static bool IsCountOp(Value op) {
+    return op == INC || op == DEC;
+  }
+
+  // Returns a string corresponding to the JS token string
+  // (.e., "<" for the token LT) or NULL if the token doesn't
+  // have a (unique) string (e.g. an IDENTIFIER).
+  static const char* String(Value tok) {
+    ASSERT(0 <= tok && tok < NUM_TOKENS);
+    return string_[tok];
+  }
+
+  // Returns the precedence > 0 for binary and compare
+  // operators; returns 0 otherwise.
+  static int Precedence(Value tok) {
+    ASSERT(0 <= tok && tok < NUM_TOKENS);
+    return precedence_[tok];
+  }
+
+  // Returns the keyword value if str is a keyword;
+  // returns IDENTIFIER otherwise. The class must
+  // have been initialized.
+  static Value Lookup(const char* str);
+
+  // Must be called once to initialize the class.
+  // Multiple calls are ignored.
+  static void Initialize();
+
+ private:
+#ifdef DEBUG
+  static const char* name_[NUM_TOKENS];
+#endif
+  static const char* string_[NUM_TOKENS];
+  static int8_t precedence_[NUM_TOKENS];
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_TOKEN_H_
diff --git a/src/top.cc b/src/top.cc
new file mode 100644
index 0000000..aa7788e
--- /dev/null
+++ b/src/top.cc
@@ -0,0 +1,983 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "bootstrapper.h"
+#include "debug.h"
+#include "execution.h"
+#include "string-stream.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+ThreadLocalTop Top::thread_local_;
+Mutex* Top::break_access_ = OS::CreateMutex();
+
+NoAllocationStringAllocator* preallocated_message_space = NULL;
+
+Address top_addresses[] = {
+#define C(name) reinterpret_cast<Address>(Top::name()),
+    TOP_ADDRESS_LIST(C)
+    TOP_ADDRESS_LIST_PROF(C)
+#undef C
+    NULL
+};
+
+Address Top::get_address_from_id(Top::AddressId id) {
+  return top_addresses[id];
+}
+
+char* Top::Iterate(ObjectVisitor* v, char* thread_storage) {
+  ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
+  Iterate(v, thread);
+  return thread_storage + sizeof(ThreadLocalTop);
+}
+
+
+void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
+  v->VisitPointer(&(thread->pending_exception_));
+  v->VisitPointer(&(thread->pending_message_obj_));
+  v->VisitPointer(
+      bit_cast<Object**, Script**>(&(thread->pending_message_script_)));
+  v->VisitPointer(bit_cast<Object**, Context**>(&(thread->context_)));
+  v->VisitPointer(&(thread->scheduled_exception_));
+
+  for (v8::TryCatch* block = thread->try_catch_handler_;
+       block != NULL;
+       block = block->next_) {
+    v->VisitPointer(bit_cast<Object**, void**>(&(block->exception_)));
+    v->VisitPointer(bit_cast<Object**, void**>(&(block->message_)));
+  }
+
+  // Iterate over pointers on native execution stack.
+  for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
+    it.frame()->Iterate(v);
+  }
+}
+
+
+void Top::Iterate(ObjectVisitor* v) {
+  ThreadLocalTop* current_t = &thread_local_;
+  Iterate(v, current_t);
+}
+
+
+void Top::InitializeThreadLocal() {
+  thread_local_.c_entry_fp_ = 0;
+  thread_local_.handler_ = 0;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  thread_local_.js_entry_sp_ = 0;
+#endif
+  thread_local_.stack_is_cooked_ = false;
+  thread_local_.try_catch_handler_ = NULL;
+  thread_local_.context_ = NULL;
+  int id = ThreadManager::CurrentId();
+  thread_local_.thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
+  thread_local_.external_caught_exception_ = false;
+  thread_local_.failed_access_check_callback_ = NULL;
+  clear_pending_exception();
+  clear_pending_message();
+  clear_scheduled_exception();
+  thread_local_.save_context_ = NULL;
+  thread_local_.catcher_ = NULL;
+}
+
+
+// Create a dummy thread that will wait forever on a semaphore. The only
+// purpose for this thread is to have some stack area to save essential data
+// into for use by a stacks only core dump (aka minidump).
+class PreallocatedMemoryThread: public Thread {
+ public:
+  PreallocatedMemoryThread() : keep_running_(true) {
+    wait_for_ever_semaphore_ = OS::CreateSemaphore(0);
+    data_ready_semaphore_ = OS::CreateSemaphore(0);
+  }
+
+  // When the thread starts running it will allocate a fixed number of bytes
+  // on the stack and publish the location of this memory for others to use.
+  void Run() {
+    EmbeddedVector<char, 15 * 1024> local_buffer;
+
+    // Initialize the buffer with a known good value.
+    OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
+                local_buffer.length());
+
+    // Publish the local buffer and signal its availability.
+    data_ = local_buffer.start();
+    length_ = local_buffer.length();
+    data_ready_semaphore_->Signal();
+
+    while (keep_running_) {
+      // This thread will wait here until the end of time.
+      wait_for_ever_semaphore_->Wait();
+    }
+
+    // Make sure we access the buffer after the wait to remove all possibility
+    // of it being optimized away.
+    OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n",
+                local_buffer.length());
+  }
+
+  static char* data() {
+    if (data_ready_semaphore_ != NULL) {
+      // Initial access is guarded until the data has been published.
+      data_ready_semaphore_->Wait();
+      delete data_ready_semaphore_;
+      data_ready_semaphore_ = NULL;
+    }
+    return data_;
+  }
+
+  static unsigned length() {
+    if (data_ready_semaphore_ != NULL) {
+      // Initial access is guarded until the data has been published.
+      data_ready_semaphore_->Wait();
+      delete data_ready_semaphore_;
+      data_ready_semaphore_ = NULL;
+    }
+    return length_;
+  }
+
+  static void StartThread() {
+    if (the_thread_ != NULL) return;
+
+    the_thread_ = new PreallocatedMemoryThread();
+    the_thread_->Start();
+  }
+
+  // Stop the PreallocatedMemoryThread and release its resources.
+  static void StopThread() {
+    if (the_thread_ == NULL) return;
+
+    the_thread_->keep_running_ = false;
+    wait_for_ever_semaphore_->Signal();
+
+    // Wait for the thread to terminate.
+    the_thread_->Join();
+
+    if (data_ready_semaphore_ != NULL) {
+      delete data_ready_semaphore_;
+      data_ready_semaphore_ = NULL;
+    }
+
+    delete wait_for_ever_semaphore_;
+    wait_for_ever_semaphore_ = NULL;
+
+    // Done with the thread entirely.
+    delete the_thread_;
+    the_thread_ = NULL;
+  }
+
+ private:
+  // Used to make sure that the thread keeps looping even for spurious wakeups.
+  bool keep_running_;
+
+  // The preallocated memory thread singleton.
+  static PreallocatedMemoryThread* the_thread_;
+  // This semaphore is used by the PreallocatedMemoryThread to wait for ever.
+  static Semaphore* wait_for_ever_semaphore_;
+  // Semaphore to signal that the data has been initialized.
+  static Semaphore* data_ready_semaphore_;
+
+  // Location and size of the preallocated memory block.
+  static char* data_;
+  static unsigned length_;
+
+  DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread);
+};
+
+PreallocatedMemoryThread* PreallocatedMemoryThread::the_thread_ = NULL;
+Semaphore* PreallocatedMemoryThread::wait_for_ever_semaphore_ = NULL;
+Semaphore* PreallocatedMemoryThread::data_ready_semaphore_ = NULL;
+char* PreallocatedMemoryThread::data_ = NULL;
+unsigned PreallocatedMemoryThread::length_ = 0;
+
+static bool initialized = false;
+
+void Top::Initialize() {
+  CHECK(!initialized);
+
+  InitializeThreadLocal();
+
+  // Only preallocate on the first initialization.
+  if (FLAG_preallocate_message_memory && (preallocated_message_space == NULL)) {
+    // Start the thread which will set aside some memory.
+    PreallocatedMemoryThread::StartThread();
+    preallocated_message_space =
+        new NoAllocationStringAllocator(PreallocatedMemoryThread::data(),
+                                        PreallocatedMemoryThread::length());
+    PreallocatedStorage::Init(PreallocatedMemoryThread::length() / 4);
+  }
+  initialized = true;
+}
+
+
+void Top::TearDown() {
+  if (initialized) {
+    // Remove the external reference to the preallocated stack memory.
+    if (preallocated_message_space != NULL) {
+      delete preallocated_message_space;
+      preallocated_message_space = NULL;
+    }
+
+    PreallocatedMemoryThread::StopThread();
+    initialized = false;
+  }
+}
+
+
+// There are cases where the C stack is separated from JS stack (ARM simulator).
+// To figure out the order of top-most JS try-catch handler and the top-most C
+// try-catch handler, the C try-catch handler keeps a reference to the top-most
+// JS try_catch handler when it was created.
+//
+// Here is a picture to explain the idea:
+//   Top::thread_local_.handler_       Top::thread_local_.try_catch_handler_
+//
+//             |                                         |
+//             v                                         v
+//
+//      | JS handler  |                        | C try_catch handler |
+//      |    next     |--+           +-------- |    js_handler_      |
+//                       |           |         |      next_          |--+
+//                       |           |                                  |
+//      | JS handler  |--+ <---------+                                  |
+//      |    next     |
+//
+// If the top-most JS try-catch handler is not equal to
+// Top::thread_local_.try_catch_handler_.js_handler_, it means the JS handler
+// is on the top. Otherwise, it means the C try-catch handler is on the top.
+//
+void Top::RegisterTryCatchHandler(v8::TryCatch* that) {
+  StackHandler* handler =
+    reinterpret_cast<StackHandler*>(thread_local_.handler_);
+
+  // Find the top-most try-catch handler.
+  while (handler != NULL && !handler->is_try_catch()) {
+    handler = handler->next();
+  }
+
+  that->js_handler_ = handler;  // casted to void*
+  thread_local_.try_catch_handler_ = that;
+}
+
+
+void Top::UnregisterTryCatchHandler(v8::TryCatch* that) {
+  ASSERT(thread_local_.try_catch_handler_ == that);
+  thread_local_.try_catch_handler_ = that->next_;
+  thread_local_.catcher_ = NULL;
+}
+
+
+void Top::MarkCompactPrologue(bool is_compacting) {
+  MarkCompactPrologue(is_compacting, &thread_local_);
+}
+
+
+void Top::MarkCompactPrologue(bool is_compacting, char* data) {
+  MarkCompactPrologue(is_compacting, reinterpret_cast<ThreadLocalTop*>(data));
+}
+
+
+void Top::MarkCompactPrologue(bool is_compacting, ThreadLocalTop* thread) {
+  if (is_compacting) {
+    StackFrame::CookFramesForThread(thread);
+  }
+}
+
+
+void Top::MarkCompactEpilogue(bool is_compacting, char* data) {
+  MarkCompactEpilogue(is_compacting, reinterpret_cast<ThreadLocalTop*>(data));
+}
+
+
+void Top::MarkCompactEpilogue(bool is_compacting) {
+  MarkCompactEpilogue(is_compacting, &thread_local_);
+}
+
+
+void Top::MarkCompactEpilogue(bool is_compacting, ThreadLocalTop* thread) {
+  if (is_compacting) {
+    StackFrame::UncookFramesForThread(thread);
+  }
+}
+
+
+static int stack_trace_nesting_level = 0;
+static StringStream* incomplete_message = NULL;
+
+
+Handle<String> Top::StackTrace() {
+  if (stack_trace_nesting_level == 0) {
+    stack_trace_nesting_level++;
+    HeapStringAllocator allocator;
+    StringStream::ClearMentionedObjectCache();
+    StringStream accumulator(&allocator);
+    incomplete_message = &accumulator;
+    PrintStack(&accumulator);
+    Handle<String> stack_trace = accumulator.ToString();
+    incomplete_message = NULL;
+    stack_trace_nesting_level = 0;
+    return stack_trace;
+  } else if (stack_trace_nesting_level == 1) {
+    stack_trace_nesting_level++;
+    OS::PrintError(
+      "\n\nAttempt to print stack while printing stack (double fault)\n");
+    OS::PrintError(
+      "If you are lucky you may find a partial stack dump on stdout.\n\n");
+    incomplete_message->OutputToStdOut();
+    return Factory::empty_symbol();
+  } else {
+    OS::Abort();
+    // Unreachable
+    return Factory::empty_symbol();
+  }
+}
+
+
+void Top::PrintStack() {
+  if (stack_trace_nesting_level == 0) {
+    stack_trace_nesting_level++;
+
+    StringAllocator* allocator;
+    if (preallocated_message_space == NULL) {
+      allocator = new HeapStringAllocator();
+    } else {
+      allocator = preallocated_message_space;
+    }
+
+    NativeAllocationChecker allocation_checker(
+      !FLAG_preallocate_message_memory ?
+      NativeAllocationChecker::ALLOW :
+      NativeAllocationChecker::DISALLOW);
+
+    StringStream::ClearMentionedObjectCache();
+    StringStream accumulator(allocator);
+    incomplete_message = &accumulator;
+    PrintStack(&accumulator);
+    accumulator.OutputToStdOut();
+    accumulator.Log();
+    incomplete_message = NULL;
+    stack_trace_nesting_level = 0;
+    if (preallocated_message_space == NULL) {
+      // Remove the HeapStringAllocator created above.
+      delete allocator;
+    }
+  } else if (stack_trace_nesting_level == 1) {
+    stack_trace_nesting_level++;
+    OS::PrintError(
+      "\n\nAttempt to print stack while printing stack (double fault)\n");
+    OS::PrintError(
+      "If you are lucky you may find a partial stack dump on stdout.\n\n");
+    incomplete_message->OutputToStdOut();
+  }
+}
+
+
+static void PrintFrames(StringStream* accumulator,
+                        StackFrame::PrintMode mode) {
+  StackFrameIterator it;
+  for (int i = 0; !it.done(); it.Advance()) {
+    it.frame()->Print(accumulator, mode, i++);
+  }
+}
+
+
+void Top::PrintStack(StringStream* accumulator) {
+  // The MentionedObjectCache is not GC-proof at the moment.
+  AssertNoAllocation nogc;
+  ASSERT(StringStream::IsMentionedObjectCacheClear());
+
+  // Avoid printing anything if there are no frames.
+  if (c_entry_fp(GetCurrentThread()) == 0) return;
+
+  accumulator->Add(
+      "\n==== Stack trace ============================================\n\n");
+  PrintFrames(accumulator, StackFrame::OVERVIEW);
+
+  accumulator->Add(
+      "\n==== Details ================================================\n\n");
+  PrintFrames(accumulator, StackFrame::DETAILS);
+
+  accumulator->PrintMentionedObjectCache();
+  accumulator->Add("=====================\n\n");
+}
+
+
+void Top::SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback) {
+  ASSERT(thread_local_.failed_access_check_callback_ == NULL);
+  thread_local_.failed_access_check_callback_ = callback;
+}
+
+
+void Top::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
+  if (!thread_local_.failed_access_check_callback_) return;
+
+  ASSERT(receiver->IsAccessCheckNeeded());
+  ASSERT(Top::context());
+  // The callers of this method are not expecting a GC.
+  AssertNoAllocation no_gc;
+
+  // Get the data object from access check info.
+  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+  Object* info = constructor->shared()->function_data();
+  if (info == Heap::undefined_value()) return;
+
+  Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
+  if (data_obj == Heap::undefined_value()) return;
+
+  HandleScope scope;
+  Handle<JSObject> receiver_handle(receiver);
+  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
+  thread_local_.failed_access_check_callback_(
+    v8::Utils::ToLocal(receiver_handle),
+    type,
+    v8::Utils::ToLocal(data));
+}
+
+
+enum MayAccessDecision {
+  YES, NO, UNKNOWN
+};
+
+
+static MayAccessDecision MayAccessPreCheck(JSObject* receiver,
+                                           v8::AccessType type) {
+  // During bootstrapping, callback functions are not enabled yet.
+  if (Bootstrapper::IsActive()) return YES;
+
+  if (receiver->IsJSGlobalProxy()) {
+    Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
+    if (!receiver_context->IsContext()) return NO;
+
+    // Get the global context of current top context.
+    // avoid using Top::global_context() because it uses Handle.
+    Context* global_context = Top::context()->global()->global_context();
+    if (receiver_context == global_context) return YES;
+
+    if (Context::cast(receiver_context)->security_token() ==
+        global_context->security_token())
+      return YES;
+  }
+
+  return UNKNOWN;
+}
+
+
+bool Top::MayNamedAccess(JSObject* receiver, Object* key, v8::AccessType type) {
+  ASSERT(receiver->IsAccessCheckNeeded());
+  // Check for compatibility between the security tokens in the
+  // current lexical context and the accessed object.
+  ASSERT(Top::context());
+  // The callers of this method are not expecting a GC.
+  AssertNoAllocation no_gc;
+
+  MayAccessDecision decision = MayAccessPreCheck(receiver, type);
+  if (decision != UNKNOWN) return decision == YES;
+
+  // Get named access check callback
+  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+  Object* info = constructor->shared()->function_data();
+  if (info == Heap::undefined_value()) return false;
+
+  Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
+  if (data_obj == Heap::undefined_value()) return false;
+
+  Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
+  v8::NamedSecurityCallback callback =
+      v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
+
+  if (!callback) return false;
+
+  HandleScope scope;
+  Handle<JSObject> receiver_handle(receiver);
+  Handle<Object> key_handle(key);
+  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
+  LOG(ApiNamedSecurityCheck(key));
+  bool result = false;
+  {
+    // Leaving JavaScript.
+    VMState state(EXTERNAL);
+    result = callback(v8::Utils::ToLocal(receiver_handle),
+                      v8::Utils::ToLocal(key_handle),
+                      type,
+                      v8::Utils::ToLocal(data));
+  }
+  return result;
+}
+
+
+bool Top::MayIndexedAccess(JSObject* receiver,
+                           uint32_t index,
+                           v8::AccessType type) {
+  ASSERT(receiver->IsAccessCheckNeeded());
+  // Check for compatibility between the security tokens in the
+  // current lexical context and the accessed object.
+  ASSERT(Top::context());
+  // The callers of this method are not expecting a GC.
+  AssertNoAllocation no_gc;
+
+  MayAccessDecision decision = MayAccessPreCheck(receiver, type);
+  if (decision != UNKNOWN) return decision == YES;
+
+  // Get indexed access check callback
+  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+  Object* info = constructor->shared()->function_data();
+  if (info == Heap::undefined_value()) return false;
+
+  Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
+  if (data_obj == Heap::undefined_value()) return false;
+
+  Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
+  v8::IndexedSecurityCallback callback =
+      v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
+
+  if (!callback) return false;
+
+  HandleScope scope;
+  Handle<JSObject> receiver_handle(receiver);
+  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
+  LOG(ApiIndexedSecurityCheck(index));
+  bool result = false;
+  {
+    // Leaving JavaScript.
+    VMState state(EXTERNAL);
+    result = callback(v8::Utils::ToLocal(receiver_handle),
+                      index,
+                      type,
+                      v8::Utils::ToLocal(data));
+  }
+  return result;
+}
+
+
+const char* Top::kStackOverflowMessage =
+  "Uncaught RangeError: Maximum call stack size exceeded";
+
+
+Failure* Top::StackOverflow() {
+  HandleScope scope;
+  Handle<String> key = Factory::stack_overflow_symbol();
+  Handle<JSObject> boilerplate =
+      Handle<JSObject>::cast(GetProperty(Top::builtins(), key));
+  Handle<Object> exception = Copy(boilerplate);
+  // TODO(1240995): To avoid having to call JavaScript code to compute
+  // the message for stack overflow exceptions which is very likely to
+  // double fault with another stack overflow exception, we use a
+  // precomputed message. This is somewhat problematic in that it
+  // doesn't use ReportUncaughtException to determine the location
+  // from where the exception occurred. It should probably be
+  // reworked.
+  DoThrow(*exception, NULL, kStackOverflowMessage);
+  return Failure::Exception();
+}
+
+
+Failure* Top::TerminateExecution() {
+  DoThrow(Heap::termination_exception(), NULL, NULL);
+  return Failure::Exception();
+}
+
+
+Failure* Top::Throw(Object* exception, MessageLocation* location) {
+  DoThrow(exception, location, NULL);
+  return Failure::Exception();
+}
+
+
+Failure* Top::ReThrow(Object* exception, MessageLocation* location) {
+  // Set the exception being re-thrown.
+  set_pending_exception(exception);
+  return Failure::Exception();
+}
+
+
+Failure* Top::ThrowIllegalOperation() {
+  return Throw(Heap::illegal_access_symbol());
+}
+
+
+void Top::ScheduleThrow(Object* exception) {
+  // When scheduling a throw we first throw the exception to get the
+  // error reporting if it is uncaught before rescheduling it.
+  Throw(exception);
+  thread_local_.scheduled_exception_ = pending_exception();
+  thread_local_.external_caught_exception_ = false;
+  clear_pending_exception();
+}
+
+
+Object* Top::PromoteScheduledException() {
+  Object* thrown = scheduled_exception();
+  clear_scheduled_exception();
+  // Re-throw the exception to avoid getting repeated error reporting.
+  return ReThrow(thrown);
+}
+
+
+void Top::PrintCurrentStackTrace(FILE* out) {
+  StackTraceFrameIterator it;
+  while (!it.done()) {
+    HandleScope scope;
+    // Find code position if recorded in relocation info.
+    JavaScriptFrame* frame = it.frame();
+    int pos = frame->code()->SourcePosition(frame->pc());
+    Handle<Object> pos_obj(Smi::FromInt(pos));
+    // Fetch function and receiver.
+    Handle<JSFunction> fun(JSFunction::cast(frame->function()));
+    Handle<Object> recv(frame->receiver());
+    // Advance to the next JavaScript frame and determine if the
+    // current frame is the top-level frame.
+    it.Advance();
+    Handle<Object> is_top_level = it.done()
+        ? Factory::true_value()
+        : Factory::false_value();
+    // Generate and print stack trace line.
+    Handle<String> line =
+        Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
+    if (line->length() > 0) {
+      line->PrintOn(out);
+      fprintf(out, "\n");
+    }
+  }
+}
+
+
+void Top::ComputeLocation(MessageLocation* target) {
+  *target = MessageLocation(empty_script(), -1, -1);
+  StackTraceFrameIterator it;
+  if (!it.done()) {
+    JavaScriptFrame* frame = it.frame();
+    JSFunction* fun = JSFunction::cast(frame->function());
+    Object* script = fun->shared()->script();
+    if (script->IsScript() &&
+        !(Script::cast(script)->source()->IsUndefined())) {
+      int pos = frame->code()->SourcePosition(frame->pc());
+      // Compute the location from the function and the reloc info.
+      Handle<Script> casted_script(Script::cast(script));
+      *target = MessageLocation(casted_script, pos, pos + 1);
+    }
+  }
+}
+
+
+void Top::ReportUncaughtException(Handle<Object> exception,
+                                  MessageLocation* location,
+                                  Handle<String> stack_trace) {
+  Handle<Object> message;
+  if (!Bootstrapper::IsActive()) {
+    // It's not safe to try to make message objects while the bootstrapper
+    // is active since the infrastructure may not have been properly
+    // initialized.
+    message =
+      MessageHandler::MakeMessageObject("uncaught_exception",
+                                        location,
+                                        HandleVector<Object>(&exception, 1),
+                                        stack_trace);
+  }
+  // Report the uncaught exception.
+  MessageHandler::ReportMessage(location, message);
+}
+
+
+bool Top::ShouldReturnException(bool* is_caught_externally,
+                                bool catchable_by_javascript) {
+  // Find the top-most try-catch handler.
+  StackHandler* handler =
+      StackHandler::FromAddress(Top::handler(Top::GetCurrentThread()));
+  while (handler != NULL && !handler->is_try_catch()) {
+    handler = handler->next();
+  }
+
+  // Get the address of the external handler so we can compare the address to
+  // determine which one is closer to the top of the stack.
+  v8::TryCatch* try_catch = thread_local_.try_catch_handler_;
+
+  // The exception has been externally caught if and only if there is
+  // an external handler which is on top of the top-most try-catch
+  // handler.
+  //
+  // See comments in RegisterTryCatchHandler for details.
+  *is_caught_externally = try_catch != NULL &&
+      (handler == NULL || handler == try_catch->js_handler_ ||
+       !catchable_by_javascript);
+
+  if (*is_caught_externally) {
+    // Only report the exception if the external handler is verbose.
+    return thread_local_.try_catch_handler_->is_verbose_;
+  } else {
+    // Report the exception if it isn't caught by JavaScript code.
+    return handler == NULL;
+  }
+}
+
+
+void Top::DoThrow(Object* exception,
+                  MessageLocation* location,
+                  const char* message) {
+  ASSERT(!has_pending_exception());
+
+  HandleScope scope;
+  Handle<Object> exception_handle(exception);
+
+  // Determine reporting and whether the exception is caught externally.
+  bool is_caught_externally = false;
+  bool is_out_of_memory = exception == Failure::OutOfMemoryException();
+  bool is_termination_exception = exception == Heap::termination_exception();
+  bool catchable_by_javascript = !is_termination_exception && !is_out_of_memory;
+  bool should_return_exception =
+      ShouldReturnException(&is_caught_externally, catchable_by_javascript);
+  bool report_exception = catchable_by_javascript && should_return_exception;
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Notify debugger of exception.
+  if (catchable_by_javascript) {
+    Debugger::OnException(exception_handle, report_exception);
+  }
+#endif
+
+  // Generate the message.
+  Handle<Object> message_obj;
+  MessageLocation potential_computed_location;
+  bool try_catch_needs_message =
+      is_caught_externally &&
+      thread_local_.try_catch_handler_->capture_message_;
+  if (report_exception || try_catch_needs_message) {
+    if (location == NULL) {
+      // If no location was specified we use a computed one instead
+      ComputeLocation(&potential_computed_location);
+      location = &potential_computed_location;
+    }
+    if (!Bootstrapper::IsActive()) {
+      // It's not safe to try to make message objects or collect stack
+      // traces while the bootstrapper is active since the infrastructure
+      // may not have been properly initialized.
+      Handle<String> stack_trace;
+      if (FLAG_trace_exception) stack_trace = StackTrace();
+      message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
+          location, HandleVector<Object>(&exception_handle, 1), stack_trace);
+    }
+  }
+
+  // Save the message for reporting if the the exception remains uncaught.
+  thread_local_.has_pending_message_ = report_exception;
+  thread_local_.pending_message_ = message;
+  if (!message_obj.is_null()) {
+    thread_local_.pending_message_obj_ = *message_obj;
+    if (location != NULL) {
+      thread_local_.pending_message_script_ = *location->script();
+      thread_local_.pending_message_start_pos_ = location->start_pos();
+      thread_local_.pending_message_end_pos_ = location->end_pos();
+    }
+  }
+
+  if (is_caught_externally) {
+    thread_local_.catcher_ = thread_local_.try_catch_handler_;
+  }
+
+  // NOTE: Notifying the debugger or generating the message
+  // may have caused new exceptions. For now, we just ignore
+  // that and set the pending exception to the original one.
+  set_pending_exception(*exception_handle);
+}
+
+
+void Top::ReportPendingMessages() {
+  ASSERT(has_pending_exception());
+  setup_external_caught();
+  // If the pending exception is OutOfMemoryException set out_of_memory in
+  // the global context.  Note: We have to mark the global context here
+  // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
+  // set it.
+  bool external_caught = thread_local_.external_caught_exception_;
+  HandleScope scope;
+  if (thread_local_.pending_exception_ == Failure::OutOfMemoryException()) {
+    context()->mark_out_of_memory();
+  } else if (thread_local_.pending_exception_ ==
+             Heap::termination_exception()) {
+    if (external_caught) {
+      thread_local_.try_catch_handler_->can_continue_ = false;
+      thread_local_.try_catch_handler_->exception_ = Heap::null_value();
+    }
+  } else {
+    Handle<Object> exception(pending_exception());
+    thread_local_.external_caught_exception_ = false;
+    if (external_caught) {
+      thread_local_.try_catch_handler_->can_continue_ = true;
+      thread_local_.try_catch_handler_->exception_ =
+        thread_local_.pending_exception_;
+      if (!thread_local_.pending_message_obj_->IsTheHole()) {
+        try_catch_handler()->message_ = thread_local_.pending_message_obj_;
+      }
+    }
+    if (thread_local_.has_pending_message_) {
+      thread_local_.has_pending_message_ = false;
+      if (thread_local_.pending_message_ != NULL) {
+        MessageHandler::ReportMessage(thread_local_.pending_message_);
+      } else if (!thread_local_.pending_message_obj_->IsTheHole()) {
+        Handle<Object> message_obj(thread_local_.pending_message_obj_);
+        if (thread_local_.pending_message_script_ != NULL) {
+          Handle<Script> script(thread_local_.pending_message_script_);
+          int start_pos = thread_local_.pending_message_start_pos_;
+          int end_pos = thread_local_.pending_message_end_pos_;
+          MessageLocation location(script, start_pos, end_pos);
+          MessageHandler::ReportMessage(&location, message_obj);
+        } else {
+          MessageHandler::ReportMessage(NULL, message_obj);
+        }
+      }
+    }
+    thread_local_.external_caught_exception_ = external_caught;
+    set_pending_exception(*exception);
+  }
+  clear_pending_message();
+}
+
+
+void Top::TraceException(bool flag) {
+  FLAG_trace_exception = flag;
+}
+
+
+bool Top::OptionalRescheduleException(bool is_bottom_call) {
+  // Allways reschedule out of memory exceptions.
+  if (!is_out_of_memory()) {
+    bool is_termination_exception =
+        pending_exception() == Heap::termination_exception();
+
+    // Do not reschedule the exception if this is the bottom call.
+    bool clear_exception = is_bottom_call;
+
+    if (is_termination_exception) {
+      if (is_bottom_call) {
+        thread_local_.external_caught_exception_ = false;
+        clear_pending_exception();
+        return false;
+      }
+    } else if (thread_local_.external_caught_exception_) {
+      // If the exception is externally caught, clear it if there are no
+      // JavaScript frames on the way to the C++ frame that has the
+      // external handler.
+      ASSERT(thread_local_.try_catch_handler_ != NULL);
+      Address external_handler_address =
+          reinterpret_cast<Address>(thread_local_.try_catch_handler_);
+      JavaScriptFrameIterator it;
+      if (it.done() || (it.frame()->sp() > external_handler_address)) {
+        clear_exception = true;
+      }
+    }
+
+    // Clear the exception if needed.
+    if (clear_exception) {
+      thread_local_.external_caught_exception_ = false;
+      clear_pending_exception();
+      return false;
+    }
+  }
+
+  // Reschedule the exception.
+  thread_local_.scheduled_exception_ = pending_exception();
+  clear_pending_exception();
+  return true;
+}
+
+
+bool Top::is_out_of_memory() {
+  if (has_pending_exception()) {
+    Object* e = pending_exception();
+    if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+      return true;
+    }
+  }
+  if (has_scheduled_exception()) {
+    Object* e = scheduled_exception();
+    if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+Handle<Context> Top::global_context() {
+  GlobalObject* global = thread_local_.context_->global();
+  return Handle<Context>(global->global_context());
+}
+
+
+Handle<Context> Top::GetCallingGlobalContext() {
+  JavaScriptFrameIterator it;
+  if (it.done()) return Handle<Context>::null();
+  JavaScriptFrame* frame = it.frame();
+  Context* context = Context::cast(frame->context());
+  return Handle<Context>(context->global_context());
+}
+
+
+Object* Top::LookupSpecialFunction(JSObject* receiver,
+                                   JSObject* prototype,
+                                   JSFunction* function) {
+  if (receiver->IsJSArray()) {
+    FixedArray* table = context()->global_context()->special_function_table();
+    for (int index = 0; index < table->length(); index +=3) {
+      if ((prototype == table->get(index)) &&
+          (function == table->get(index+1))) {
+        return table->get(index+2);
+      }
+    }
+  }
+  return Heap::undefined_value();
+}
+
+
+char* Top::ArchiveThread(char* to) {
+  memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(thread_local_));
+  InitializeThreadLocal();
+  return to + sizeof(thread_local_);
+}
+
+
+char* Top::RestoreThread(char* from) {
+  memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(thread_local_));
+  return from + sizeof(thread_local_);
+}
+
+
+ExecutionAccess::ExecutionAccess() {
+  Top::break_access_->Lock();
+}
+
+
+ExecutionAccess::~ExecutionAccess() {
+  Top::break_access_->Unlock();
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/top.h b/src/top.h
new file mode 100644
index 0000000..ae94f08
--- /dev/null
+++ b/src/top.h
@@ -0,0 +1,421 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TOP_H_
+#define V8_TOP_H_
+
+#include "frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define RETURN_IF_SCHEDULED_EXCEPTION() \
+  if (Top::has_scheduled_exception()) return Top::PromoteScheduledException()
+
+// Top has static variables used for JavaScript execution.
+
+class SaveContext;  // Forward declaration.
+
+class ThreadLocalTop BASE_EMBEDDED {
+ public:
+  // The context where the current execution method is created and for variable
+  // lookups.
+  Context* context_;
+  int thread_id_;
+  Object* pending_exception_;
+  bool has_pending_message_;
+  const char* pending_message_;
+  Object* pending_message_obj_;
+  Script* pending_message_script_;
+  int pending_message_start_pos_;
+  int pending_message_end_pos_;
+  // Use a separate value for scheduled exceptions to preserve the
+  // invariants that hold about pending_exception.  We may want to
+  // unify them later.
+  Object* scheduled_exception_;
+  bool external_caught_exception_;
+  v8::TryCatch* try_catch_handler_;
+  SaveContext* save_context_;
+  v8::TryCatch* catcher_;
+
+  // Stack.
+  Address c_entry_fp_;  // the frame pointer of the top c entry frame
+  Address handler_;   // try-blocks are chained through the stack
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  Address js_entry_sp_;  // the stack pointer of the bottom js entry frame
+#endif
+  bool stack_is_cooked_;
+  inline bool stack_is_cooked() { return stack_is_cooked_; }
+  inline void set_stack_is_cooked(bool value) { stack_is_cooked_ = value; }
+
+  // Generated code scratch locations.
+  int32_t formal_count_;
+
+  // Call back function to report unsafe JS accesses.
+  v8::FailedAccessCheckCallback failed_access_check_callback_;
+
+  void Free() {
+    ASSERT(!has_pending_message_);
+    ASSERT(!external_caught_exception_);
+    ASSERT(try_catch_handler_ == NULL);
+  }
+};
+
+#define TOP_ADDRESS_LIST(C) \
+  C(handler_address)                   \
+  C(c_entry_fp_address)                \
+  C(context_address)                   \
+  C(pending_exception_address)         \
+  C(external_caught_exception_address)
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+#define TOP_ADDRESS_LIST_PROF(C)       \
+  C(js_entry_sp_address)
+#else
+#define TOP_ADDRESS_LIST_PROF(C)
+#endif
+
+
+class Top {
+ public:
+  enum AddressId {
+#define C(name) k_##name,
+    TOP_ADDRESS_LIST(C)
+    TOP_ADDRESS_LIST_PROF(C)
+#undef C
+    k_top_address_count
+  };
+
+  static Address get_address_from_id(AddressId id);
+
+  // Access to top context (where the current function object was created).
+  static Context* context() { return thread_local_.context_; }
+  static void set_context(Context* context) {
+    thread_local_.context_ = context;
+  }
+  static Context** context_address() { return &thread_local_.context_; }
+
+  static SaveContext* save_context() {return thread_local_.save_context_; }
+  static void set_save_context(SaveContext* save) {
+    thread_local_.save_context_ = save;
+  }
+
+  // Access to current thread id.
+  static int thread_id() { return thread_local_.thread_id_; }
+  static void set_thread_id(int id) { thread_local_.thread_id_ = id; }
+
+  // Interface to pending exception.
+  static Object* pending_exception() {
+    ASSERT(has_pending_exception());
+    return thread_local_.pending_exception_;
+  }
+  static bool external_caught_exception() {
+    return thread_local_.external_caught_exception_;
+  }
+  static void set_pending_exception(Object* exception) {
+    thread_local_.pending_exception_ = exception;
+  }
+  static void clear_pending_exception() {
+    thread_local_.pending_exception_ = Heap::the_hole_value();
+  }
+
+  static Object** pending_exception_address() {
+    return &thread_local_.pending_exception_;
+  }
+  static bool has_pending_exception() {
+    return !thread_local_.pending_exception_->IsTheHole();
+  }
+  static void clear_pending_message() {
+    thread_local_.has_pending_message_ = false;
+    thread_local_.pending_message_ = NULL;
+    thread_local_.pending_message_obj_ = Heap::the_hole_value();
+    thread_local_.pending_message_script_ = NULL;
+  }
+  static v8::TryCatch* try_catch_handler() {
+    return thread_local_.try_catch_handler_;
+  }
+  // This method is called by the api after operations that may throw
+  // exceptions.  If an exception was thrown and not handled by an external
+  // handler the exception is scheduled to be rethrown when we return to running
+  // JavaScript code.  If an exception is scheduled true is returned.
+  static bool OptionalRescheduleException(bool is_bottom_call);
+
+
+  static bool* external_caught_exception_address() {
+    return &thread_local_.external_caught_exception_;
+  }
+
+  static Object* scheduled_exception() {
+    ASSERT(has_scheduled_exception());
+    return thread_local_.scheduled_exception_;
+  }
+  static bool has_scheduled_exception() {
+    return !thread_local_.scheduled_exception_->IsTheHole();
+  }
+  static void clear_scheduled_exception() {
+    thread_local_.scheduled_exception_ = Heap::the_hole_value();
+  }
+
+  static void setup_external_caught() {
+    thread_local_.external_caught_exception_ =
+        has_pending_exception() &&
+        (thread_local_.catcher_ != NULL) &&
+        (thread_local_.try_catch_handler_ == thread_local_.catcher_);
+  }
+
+  // Tells whether the current context has experienced an out of memory
+  // exception.
+  static bool is_out_of_memory();
+
+  // JS execution stack (see frames.h).
+  static Address c_entry_fp(ThreadLocalTop* thread) {
+    return thread->c_entry_fp_;
+  }
+  static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
+
+  static inline Address* c_entry_fp_address() {
+    return &thread_local_.c_entry_fp_;
+  }
+  static inline Address* handler_address() { return &thread_local_.handler_; }
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // Bottom JS entry (see StackTracer::Trace in log.cc).
+  static Address js_entry_sp(ThreadLocalTop* thread) {
+    return thread->js_entry_sp_;
+  }
+  static inline Address* js_entry_sp_address() {
+    return &thread_local_.js_entry_sp_;
+  }
+#endif
+
+  // Generated code scratch locations.
+  static void* formal_count_address() { return &thread_local_.formal_count_; }
+
+  static void MarkCompactPrologue(bool is_compacting);
+  static void MarkCompactEpilogue(bool is_compacting);
+  static void MarkCompactPrologue(bool is_compacting,
+                                  char* archived_thread_data);
+  static void MarkCompactEpilogue(bool is_compacting,
+                                  char* archived_thread_data);
+  static void PrintCurrentStackTrace(FILE* out);
+  static void PrintStackTrace(FILE* out, char* thread_data);
+  static void PrintStack(StringStream* accumulator);
+  static void PrintStack();
+  static Handle<String> StackTrace();
+
+  // Returns if the top context may access the given global object. If
+  // the result is false, the pending exception is guaranteed to be
+  // set.
+  static bool MayNamedAccess(JSObject* receiver,
+                             Object* key,
+                             v8::AccessType type);
+  static bool MayIndexedAccess(JSObject* receiver,
+                               uint32_t index,
+                               v8::AccessType type);
+
+  static void SetFailedAccessCheckCallback(
+      v8::FailedAccessCheckCallback callback);
+  static void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
+
+  // Exception throwing support. The caller should use the result
+  // of Throw() as its return value.
+  static Failure* Throw(Object* exception, MessageLocation* location = NULL);
+  // Re-throw an exception.  This involves no error reporting since
+  // error reporting was handled when the exception was thrown
+  // originally.
+  static Failure* ReThrow(Object* exception, MessageLocation* location = NULL);
+  static void ScheduleThrow(Object* exception);
+  static void ReportPendingMessages();
+  static Failure* ThrowIllegalOperation();
+
+  // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
+  static Object* PromoteScheduledException();
+  static void DoThrow(Object* exception,
+                      MessageLocation* location,
+                      const char* message);
+  static bool ShouldReturnException(bool* is_caught_externally,
+                                    bool catchable_by_javascript);
+  static void ReportUncaughtException(Handle<Object> exception,
+                                      MessageLocation* location,
+                                      Handle<String> stack_trace);
+
+  // Attempts to compute the current source location, storing the
+  // result in the target out parameter.
+  static void ComputeLocation(MessageLocation* target);
+
+  // Override command line flag.
+  static void TraceException(bool flag);
+
+  // Out of resource exception helpers.
+  static Failure* StackOverflow();
+  static Failure* TerminateExecution();
+
+  // Administration
+  static void Initialize();
+  static void TearDown();
+  static void Iterate(ObjectVisitor* v);
+  static void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
+  static char* Iterate(ObjectVisitor* v, char* t);
+
+  // Returns the global object of the current context. It could be
+  // a builtin object, or a js global object.
+  static Handle<GlobalObject> global() {
+    return Handle<GlobalObject>(context()->global());
+  }
+
+  // Returns the global proxy object of the current context.
+  static Object* global_proxy() {
+    return context()->global_proxy();
+  }
+
+  // Returns the current global context.
+  static Handle<Context> global_context();
+
+  // Returns the global context of the calling JavaScript code.  That
+  // is, the global context of the top-most JavaScript frame.
+  static Handle<Context> GetCallingGlobalContext();
+
+  static Handle<JSBuiltinsObject> builtins() {
+    return Handle<JSBuiltinsObject>(thread_local_.context_->builtins());
+  }
+
+  static Object* LookupSpecialFunction(JSObject* receiver,
+                                       JSObject* prototype,
+                                       JSFunction* value);
+
+  static void RegisterTryCatchHandler(v8::TryCatch* that);
+  static void UnregisterTryCatchHandler(v8::TryCatch* that);
+
+#define TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name)  \
+  static Handle<type> name() {                                \
+    return Handle<type>(context()->global_context()->name()); \
+  }
+  GLOBAL_CONTEXT_FIELDS(TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR)
+#undef TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR
+
+  static inline ThreadLocalTop* GetCurrentThread() { return &thread_local_; }
+  static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
+  static char* ArchiveThread(char* to);
+  static char* RestoreThread(char* from);
+  static void FreeThreadResources() { thread_local_.Free(); }
+
+  static const char* kStackOverflowMessage;
+
+ private:
+  // The context that initiated this JS execution.
+  static ThreadLocalTop thread_local_;
+  static void InitializeThreadLocal();
+  static void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
+  static void MarkCompactPrologue(bool is_compacting,
+                                  ThreadLocalTop* archived_thread_data);
+  static void MarkCompactEpilogue(bool is_compacting,
+                                  ThreadLocalTop* archived_thread_data);
+
+  // Debug.
+  // Mutex for serializing access to break control structures.
+  static Mutex* break_access_;
+
+  friend class SaveContext;
+  friend class AssertNoContextChange;
+  friend class ExecutionAccess;
+
+  static void FillCache();
+};
+
+
+// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
+// class as a work around for a bug in the generated code found with these
+// versions of GCC. See V8 issue 122 for details.
+class SaveContext BASE_EMBEDDED {
+ public:
+  SaveContext()
+      : context_(Top::context()),
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+        dummy_(Top::context()),
+#endif
+        prev_(Top::save_context()) {
+    Top::set_save_context(this);
+
+    // If there is no JS frame under the current C frame, use the value 0.
+    JavaScriptFrameIterator it;
+    js_sp_ = it.done() ? 0 : it.frame()->sp();
+  }
+
+  ~SaveContext() {
+    Top::set_context(*context_);
+    Top::set_save_context(prev_);
+  }
+
+  Handle<Context> context() { return context_; }
+  SaveContext* prev() { return prev_; }
+
+  // Returns true if this save context is below a given JavaScript frame.
+  bool below(JavaScriptFrame* frame) {
+    return (js_sp_ == 0) || (frame->sp() < js_sp_);
+  }
+
+ private:
+  Handle<Context> context_;
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+  Handle<Context> dummy_;
+#endif
+  SaveContext* prev_;
+  Address js_sp_;  // The top JS frame's sp when saving context.
+};
+
+
+class AssertNoContextChange BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+  AssertNoContextChange() :
+      context_(Top::context()) {
+  }
+
+  ~AssertNoContextChange() {
+    ASSERT(Top::context() == *context_);
+  }
+
+ private:
+  HandleScope scope_;
+  Handle<Context> context_;
+#else
+ public:
+  AssertNoContextChange() { }
+#endif
+};
+
+
+class ExecutionAccess BASE_EMBEDDED {
+ public:
+  ExecutionAccess();
+  ~ExecutionAccess();
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_TOP_H_
diff --git a/src/unicode-inl.h b/src/unicode-inl.h
new file mode 100644
index 0000000..0ee03bd
--- /dev/null
+++ b/src/unicode-inl.h
@@ -0,0 +1,238 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UNICODE_INL_H_
+#define V8_UNICODE_INL_H_
+
+#include "unicode.h"
+
+namespace unibrow {
+
+template <class T, int s> bool Predicate<T, s>::get(uchar code_point) {
+  CacheEntry entry = entries_[code_point & kMask];
+  if (entry.code_point_ == code_point) return entry.value_;
+  return CalculateValue(code_point);
+}
+
+template <class T, int s> bool Predicate<T, s>::CalculateValue(
+    uchar code_point) {
+  bool result = T::Is(code_point);
+  entries_[code_point & kMask] = CacheEntry(code_point, result);
+  return result;
+}
+
+template <class T, int s> int Mapping<T, s>::get(uchar c, uchar n,
+    uchar* result) {
+  CacheEntry entry = entries_[c & kMask];
+  if (entry.code_point_ == c) {
+    if (entry.offset_ == 0) {
+      return 0;
+    } else {
+      result[0] = c + entry.offset_;
+      return 1;
+    }
+  } else {
+    return CalculateValue(c, n, result);
+  }
+}
+
+template <class T, int s> int Mapping<T, s>::CalculateValue(uchar c, uchar n,
+    uchar* result) {
+  bool allow_caching = true;
+  int length = T::Convert(c, n, result, &allow_caching);
+  if (allow_caching) {
+    if (length == 1) {
+      entries_[c & kMask] = CacheEntry(c, result[0] - c);
+      return 1;
+    } else {
+      entries_[c & kMask] = CacheEntry(c, 0);
+      return 0;
+    }
+  } else {
+    return length;
+  }
+}
+
+
+unsigned Utf8::Encode(char* str, uchar c) {
+  static const int kMask = ~(1 << 6);
+  if (c <= kMaxOneByteChar) {
+    str[0] = c;
+    return 1;
+  } else if (c <= kMaxTwoByteChar) {
+    str[0] = 0xC0 | (c >> 6);
+    str[1] = 0x80 | (c & kMask);
+    return 2;
+  } else if (c <= kMaxThreeByteChar) {
+    str[0] = 0xE0 | (c >> 12);
+    str[1] = 0x80 | ((c >> 6) & kMask);
+    str[2] = 0x80 | (c & kMask);
+    return 3;
+  } else {
+    str[0] = 0xF0 | (c >> 18);
+    str[1] = 0x80 | ((c >> 12) & kMask);
+    str[2] = 0x80 | ((c >> 6) & kMask);
+    str[3] = 0x80 | (c & kMask);
+    return 4;
+  }
+}
+
+
+uchar Utf8::ValueOf(const byte* bytes, unsigned length, unsigned* cursor) {
+  if (length <= 0) return kBadChar;
+  byte first = bytes[0];
+  // Characters between 0000 and 0007F are encoded as a single character
+  if (first <= kMaxOneByteChar) {
+    *cursor += 1;
+    return first;
+  }
+  return CalculateValue(bytes, length, cursor);
+}
+
+unsigned Utf8::Length(uchar c) {
+  if (c <= kMaxOneByteChar) {
+    return 1;
+  } else if (c <= kMaxTwoByteChar) {
+    return 2;
+  } else if (c <= kMaxThreeByteChar) {
+    return 3;
+  } else {
+    return 4;
+  }
+}
+
+uchar CharacterStream::GetNext() {
+  uchar result = DecodeCharacter(buffer_, &cursor_);
+  if (remaining_ == 1) {
+    cursor_ = 0;
+    FillBuffer();
+  } else {
+    remaining_--;
+  }
+  return result;
+}
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define IF_LITTLE(expr) expr
+#define IF_BIG(expr)    ((void) 0)
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define IF_LITTLE(expr) ((void) 0)
+#define IF_BIG(expr)    expr
+#else
+#warning Unknown byte ordering
+#endif
+
+bool CharacterStream::EncodeAsciiCharacter(uchar c, byte* buffer,
+    unsigned capacity, unsigned& offset) {
+  if (offset >= capacity) return false;
+  buffer[offset] = c;
+  offset += 1;
+  return true;
+}
+
+bool CharacterStream::EncodeNonAsciiCharacter(uchar c, byte* buffer,
+    unsigned capacity, unsigned& offset) {
+  unsigned aligned = (offset + 0x3) & ~0x3;
+  if ((aligned + sizeof(uchar)) > capacity)
+    return false;
+  if (offset == aligned) {
+    IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = (c << 8) | 0x80);
+    IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c | (1 << 31));
+  } else {
+    buffer[offset] = 0x80;
+    IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = c << 8);
+    IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c);
+  }
+  offset = aligned + sizeof(uchar);
+  return true;
+}
+
+bool CharacterStream::EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
+    unsigned& offset) {
+  if (c <= Utf8::kMaxOneByteChar) {
+    return EncodeAsciiCharacter(c, buffer, capacity, offset);
+  } else {
+    return EncodeNonAsciiCharacter(c, buffer, capacity, offset);
+  }
+}
+
+uchar CharacterStream::DecodeCharacter(const byte* buffer, unsigned* offset) {
+  byte b = buffer[*offset];
+  if (b <= Utf8::kMaxOneByteChar) {
+    (*offset)++;
+    return b;
+  } else {
+    unsigned aligned = (*offset + 0x3) & ~0x3;
+    *offset = aligned + sizeof(uchar);
+    IF_LITTLE(return *reinterpret_cast<const uchar*>(buffer + aligned) >> 8);
+    IF_BIG(return *reinterpret_cast<const uchar*>(buffer + aligned) &
+                    ~(1 << 31));
+  }
+}
+
+#undef IF_LITTLE
+#undef IF_BIG
+
+template <class R, class I, unsigned s>
+void InputBuffer<R, I, s>::FillBuffer() {
+  buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
+}
+
+template <class R, class I, unsigned s>
+void InputBuffer<R, I, s>::Rewind() {
+  Reset(input_);
+}
+
+template <class R, class I, unsigned s>
+void InputBuffer<R, I, s>::Reset(unsigned position, I input) {
+  input_ = input;
+  remaining_ = 0;
+  cursor_ = 0;
+  offset_ = position;
+  buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
+}
+
+template <class R, class I, unsigned s>
+void InputBuffer<R, I, s>::Reset(I input) {
+  Reset(0, input);
+}
+
+template <class R, class I, unsigned s>
+void InputBuffer<R, I, s>::Seek(unsigned position) {
+  offset_ = position;
+  buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
+}
+
+template <unsigned s>
+Utf8InputBuffer<s>::Utf8InputBuffer(const char* data, unsigned length)
+    : InputBuffer<Utf8, Buffer<const char*>, s>(Buffer<const char*>(data,
+                                                                    length)) {
+}
+
+}  // namespace unibrow
+
+#endif  // V8_UNICODE_INL_H_
diff --git a/src/unicode.cc b/src/unicode.cc
new file mode 100644
index 0000000..ef13593
--- /dev/null
+++ b/src/unicode.cc
@@ -0,0 +1,749 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// This file was generated at 2008-11-25 16:02:40.592795
+
+#include "unicode-inl.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+namespace unibrow {
+
+static const int kStartBit = (1 << 30);
+static const int kChunkBits = (1 << 15);
+
+/**
+ * \file
+ * Implementations of functions for working with unicode.
+ */
+
+typedef signed short int16_t;  // NOLINT
+typedef unsigned short uint16_t;  // NOLINT
+typedef int int32_t;  // NOLINT
+
+// All access to the character table should go through this function.
+template <int D>
+static inline uchar TableGet(const int32_t* table, int index) {
+  return table[D * index];
+}
+
+static inline uchar GetEntry(int32_t entry) {
+  return entry & (kStartBit - 1);
+}
+
+static inline bool IsStart(int32_t entry) {
+  return (entry & kStartBit) != 0;
+}
+
+/**
+ * Look up a character in the unicode table using a mix of binary and
+ * interpolation search.  For a uniformly distributed array
+ * interpolation search beats binary search by a wide margin.  However,
+ * in this case interpolation search degenerates because of some very
+ * high values in the lower end of the table so this function uses a
+ * combination.  The average number of steps to look up the information
+ * about a character is around 10, slightly higher if there is no
+ * information available about the character.
+ */
+static bool LookupPredicate(const int32_t* table, uint16_t size, uchar chr) {
+  static const int kEntryDist = 1;
+  uint16_t value = chr & (kChunkBits - 1);
+  unsigned int low = 0;
+  unsigned int high = size - 1;
+  while (high != low) {
+    unsigned int mid = low + ((high - low) >> 1);
+    uchar current_value = GetEntry(TableGet<kEntryDist>(table, mid));
+    // If we've found an entry less than or equal to this one, and the
+    // next one is not also less than this one, we've arrived.
+    if ((current_value <= value) &&
+        (mid + 1 == size ||
+         GetEntry(TableGet<kEntryDist>(table, mid + 1)) > value)) {
+      low = mid;
+      break;
+    } else if (current_value < value) {
+      low = mid + 1;
+    } else if (current_value > value) {
+      // If we've just checked the bottom-most value and it's not
+      // the one we're looking for, we're done.
+      if (mid == 0) break;
+      high = mid - 1;
+    }
+  }
+  int32_t field = TableGet<kEntryDist>(table, low);
+  uchar entry = GetEntry(field);
+  bool is_start = IsStart(field);
+  return (entry == value) ||
+          (entry < value && is_start);
+}
+
+template <int kW>
+struct MultiCharacterSpecialCase {
+  uint16_t length;
+  uchar chars[kW];
+};
+
+// Look up the mapping for the given character in the specified table,
+// which is of the specified length and uses the specified special case
+// mapping for multi-char mappings.  The next parameter is the character
+// following the one to map.  The result will be written in to the result
+// buffer and the number of characters written will be returned.  Finally,
+// if the allow_caching_ptr is non-null then false will be stored in
+// it if the result contains multiple characters or depends on the
+// context.
+template <int kW>
+static int LookupMapping(const int32_t* table,
+                         uint16_t size,
+                         const MultiCharacterSpecialCase<kW>* multi_chars,
+                         uchar chr,
+                         uchar next,
+                         uchar* result,
+                         bool* allow_caching_ptr) {
+  static const int kEntryDist = 2;
+  uint16_t value = chr & (kChunkBits - 1);
+  unsigned int low = 0;
+  unsigned int high = size - 1;
+  while (high != low) {
+    unsigned int mid = low + ((high - low) >> 1);
+    uchar current_value = GetEntry(TableGet<kEntryDist>(table, mid));
+    // If we've found an entry less than or equal to this one, and the next one
+    // is not also less than this one, we've arrived.
+    if ((current_value <= value) &&
+        (mid + 1 == size ||
+         GetEntry(TableGet<kEntryDist>(table, mid + 1)) > value)) {
+      low = mid;
+      break;
+    } else if (current_value < value) {
+      low = mid + 1;
+    } else if (current_value > value) {
+      // If we've just checked the bottom-most value and it's not
+      // the one we're looking for, we're done.
+      if (mid == 0) break;
+      high = mid - 1;
+    }
+  }
+  int32_t field = TableGet<kEntryDist>(table, low);
+  uchar entry = GetEntry(field);
+  bool is_start = IsStart(field);
+  bool found = (entry == value) || (entry < value && is_start);
+  if (found) {
+    int32_t value = table[2 * low + 1];
+    if (value == 0) {
+      // 0 means not present
+      return 0;
+    } else if ((value & 3) == 0) {
+      // Low bits 0 means a constant offset from the given character.
+      result[0] = chr + (value >> 2);
+      return 1;
+    } else if ((value & 3) == 1) {
+      // Low bits 1 means a special case mapping
+      if (allow_caching_ptr) *allow_caching_ptr = false;
+      const MultiCharacterSpecialCase<kW>& mapping = multi_chars[value >> 2];
+      for (int i = 0; i < mapping.length; i++)
+        result[i] = mapping.chars[i];
+      return mapping.length;
+    } else {
+      // Low bits 2 means a really really special case
+      if (allow_caching_ptr) *allow_caching_ptr = false;
+      // The cases of this switch are defined in unicode.py in the
+      // really_special_cases mapping.
+      switch (value >> 2) {
+        case 1:
+          // Really special case 1: upper case sigma.  This letter
+          // converts to two different lower case sigmas depending on
+          // whether or not it occurs at the end of a word.
+          if (next != 0 && Letter::Is(next)) {
+            result[0] = 0x03C3;
+          } else {
+            result[0] = 0x03C2;
+          }
+          return 1;
+        default:
+          return 0;
+      }
+      return -1;
+    }
+  } else {
+    return 0;
+  }
+}
+
+uchar Utf8::CalculateValue(const byte* str,
+                           unsigned length,
+                           unsigned* cursor) {
+  // We only get called for non-ascii characters.
+  if (length == 1) {
+    *cursor += 1;
+    return kBadChar;
+  }
+  byte first = str[0];
+  byte second = str[1] ^ 0x80;
+  if (second & 0xC0) {
+    *cursor += 1;
+    return kBadChar;
+  }
+  if (first < 0xE0) {
+    if (first < 0xC0) {
+      *cursor += 1;
+      return kBadChar;
+    }
+    uchar l = ((first << 6) | second) & kMaxTwoByteChar;
+    if (l <= kMaxOneByteChar) {
+      *cursor += 1;
+      return kBadChar;
+    }
+    *cursor += 2;
+    return l;
+  }
+  if (length == 2) {
+    *cursor += 1;
+    return kBadChar;
+  }
+  byte third = str[2] ^ 0x80;
+  if (third & 0xC0) {
+    *cursor += 1;
+    return kBadChar;
+  }
+  if (first < 0xF0) {
+    uchar l = ((((first << 6) | second) << 6) | third) & kMaxThreeByteChar;
+    if (l <= kMaxTwoByteChar) {
+      *cursor += 1;
+      return kBadChar;
+    }
+    *cursor += 3;
+    return l;
+  }
+  if (length == 3) {
+    *cursor += 1;
+    return kBadChar;
+  }
+  byte fourth = str[3] ^ 0x80;
+  if (fourth & 0xC0) {
+    *cursor += 1;
+    return kBadChar;
+  }
+  if (first < 0xF8) {
+    uchar l = (((((first << 6 | second) << 6) | third) << 6) | fourth) &
+              kMaxFourByteChar;
+    if (l <= kMaxThreeByteChar) {
+      *cursor += 1;
+      return kBadChar;
+    }
+    *cursor += 4;
+    return l;
+  }
+  *cursor += 1;
+  return kBadChar;
+}
+
+const byte* Utf8::ReadBlock(Buffer<const char*> str, byte* buffer,
+    unsigned capacity, unsigned* chars_read_ptr, unsigned* offset_ptr) {
+  unsigned offset = *offset_ptr;
+  // Bail out early if we've reached the end of the string.
+  if (offset == str.length()) {
+    *chars_read_ptr = 0;
+    return NULL;
+  }
+  const byte* data = reinterpret_cast<const byte*>(str.data());
+  if (data[offset] <= kMaxOneByteChar) {
+    // The next character is an ascii char so we scan forward over
+    // the following ascii characters and return the next pure ascii
+    // substring
+    const byte* result = data + offset;
+    offset++;
+    while ((offset < str.length()) && (data[offset] <= kMaxOneByteChar))
+      offset++;
+    *chars_read_ptr = offset - *offset_ptr;
+    *offset_ptr = offset;
+    return result;
+  } else {
+    // The next character is non-ascii so we just fill the buffer
+    unsigned cursor = 0;
+    unsigned chars_read = 0;
+    while (offset < str.length()) {
+      uchar c = data[offset];
+      if (c <= kMaxOneByteChar) {
+        // Fast case for ascii characters
+        if (!CharacterStream::EncodeAsciiCharacter(c,
+                                                   buffer,
+                                                   capacity,
+                                                   cursor))
+          break;
+        offset += 1;
+      } else {
+        unsigned chars = 0;
+        c = Utf8::ValueOf(data + offset, str.length() - offset, &chars);
+        if (!CharacterStream::EncodeNonAsciiCharacter(c,
+                                                      buffer,
+                                                      capacity,
+                                                      cursor))
+          break;
+        offset += chars;
+      }
+      chars_read++;
+    }
+    *offset_ptr = offset;
+    *chars_read_ptr = chars_read;
+    return buffer;
+  }
+}
+
+unsigned CharacterStream::Length() {
+  unsigned result = 0;
+  while (has_more()) {
+    result++;
+    GetNext();
+  }
+  Rewind();
+  return result;
+}
+
+void CharacterStream::Seek(unsigned position) {
+  Rewind();
+  for (unsigned i = 0; i < position; i++) {
+    GetNext();
+  }
+}
+
+// Uppercase:            point.category == 'Lu'
+
+static const uint16_t kUppercaseTable0Size = 509;
+static const int32_t kUppercaseTable0[509] = { 1073741889, 90, 1073742016, 214, 1073742040, 222, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 313, 315, 317, 319, 321, 323, 325, 327, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 1073742200, 377, 379, 381, 1073742209, 386, 388, 1073742214, 391, 1073742217, 395, 1073742222, 401, 1073742227, 404, 1073742230, 408, 1073742236, 413, 1073742239, 416, 418, 420, 1073742246, 423, 425, 428, 1073742254, 431, 1073742257, 435, 437, 1073742263, 440, 444, 452, 455, 458, 461, 463, 465, 467, 469, 471, 473, 475, 478, 480, 482, 484, 486, 488, 490, 492, 494, 497, 500, 1073742326, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 1073742394, 571, 1073742397, 574, 577, 1073742403, 582, 584, 586, 588, 590, 902, 1073742728, 906, 908, 1073742734, 911, 1073742737, 929, 1073742755, 939, 1073742802, 980, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1012, 1015, 1073742841, 1018, 1073742845, 1071, 1120, 1122, 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146, 1148, 1150, 1152, 1162, 1164, 1166, 1168, 1170, 1172, 1174, 1176, 1178, 1180, 1182, 1184, 1186, 1188, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1210, 1212, 1214, 1073743040, 1217, 1219, 1221, 1223, 1225, 1227, 1229, 1232, 1234, 1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250, 1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1073743153, 1366, 1073746080, 4293, 7680, 7682, 7684, 7686, 7688, 7690, 7692, 7694, 7696, 7698, 7700, 7702, 7704, 7706, 7708, 7710, 7712, 7714, 7716, 7718, 7720, 7722, 7724, 7726, 7728, 7730, 7732, 7734, 7736, 7738, 7740, 7742, 7744, 7746, 7748, 7750, 7752, 7754, 7756, 7758, 7760, 7762, 7764, 7766, 7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782, 7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798, 7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814, 7816, 7818, 7820, 7822, 7824, 7826, 7828, 7840, 7842, 7844, 7846, 7848, 7850, 7852, 7854, 7856, 7858, 7860, 7862, 7864, 7866, 7868, 7870, 7872, 7874, 7876, 7878, 7880, 7882, 7884, 7886, 7888, 7890, 7892, 7894, 7896, 7898, 7900, 7902, 7904, 7906, 7908, 7910, 7912, 7914, 7916, 7918, 7920, 7922, 7924, 7926, 7928, 1073749768, 7951, 1073749784, 7965, 1073749800, 7983, 1073749816, 7999, 1073749832, 8013, 8025, 8027, 8029, 8031, 1073749864, 8047, 1073749944, 8123, 1073749960, 8139, 1073749976, 8155, 1073749992, 8172, 1073750008, 8187, 8450, 8455, 1073750283, 8461, 1073750288, 8466, 8469, 1073750297, 8477, 8484, 8486, 8488, 1073750314, 8493, 1073750320, 8499, 1073750334, 8511, 8517, 8579, 1073753088, 11310, 11360, 1073753186, 11364, 11367, 11369, 11371, 11381, 11392, 11394, 11396, 11398, 11400, 11402, 11404, 11406, 11408, 11410, 11412, 11414, 11416, 11418, 11420, 11422, 11424, 11426, 11428, 11430, 11432, 11434, 11436, 11438, 11440, 11442, 11444, 11446, 11448, 11450, 11452, 11454, 11456, 11458, 11460, 11462, 11464, 11466, 11468, 11470, 11472, 11474, 11476, 11478, 11480, 11482, 11484, 11486, 11488, 11490 }; // NOLINT
+static const uint16_t kUppercaseTable1Size = 2;
+static const int32_t kUppercaseTable1[2] = { 1073774369, 32570 }; // NOLINT
+static const uint16_t kUppercaseTable2Size = 2;
+static const int32_t kUppercaseTable2[2] = { 1073742848, 1063 }; // NOLINT
+static const uint16_t kUppercaseTable3Size = 58;
+static const int32_t kUppercaseTable3[58] = { 1073763328, 21529, 1073763380, 21581, 1073763432, 21633, 21660, 1073763486, 21663, 21666, 1073763493, 21670, 1073763497, 21676, 1073763502, 21685, 1073763536, 21737, 1073763588, 21765, 1073763591, 21770, 1073763597, 21780, 1073763606, 21788, 1073763640, 21817, 1073763643, 21822, 1073763648, 21828, 21830, 1073763658, 21840, 1073763692, 21893, 1073763744, 21945, 1073763796, 21997, 1073763848, 22049, 1073763900, 22101, 1073763952, 22153, 1073764008, 22208, 1073764066, 22266, 1073764124, 22324, 1073764182, 22382, 1073764240, 22440, 22474 }; // NOLINT
+bool Uppercase::Is(uchar c) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupPredicate(kUppercaseTable0,
+                                       kUppercaseTable0Size,
+                                       c);
+    case 1: return LookupPredicate(kUppercaseTable1,
+                                       kUppercaseTable1Size,
+                                       c);
+    case 2: return LookupPredicate(kUppercaseTable2,
+                                       kUppercaseTable2Size,
+                                       c);
+    case 3: return LookupPredicate(kUppercaseTable3,
+                                       kUppercaseTable3Size,
+                                       c);
+    default: return false;
+  }
+}
+
+// Lowercase:            point.category == 'Ll'
+
+static const uint16_t kLowercaseTable0Size = 528;
+static const int32_t kLowercaseTable0[528] = { 1073741921, 122, 170, 181, 186, 1073742047, 246, 1073742072, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 1073742135, 312, 314, 316, 318, 320, 322, 324, 326, 1073742152, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 378, 380, 1073742206, 384, 387, 389, 392, 1073742220, 397, 402, 405, 1073742233, 411, 414, 417, 419, 421, 424, 1073742250, 427, 429, 432, 436, 438, 1073742265, 442, 1073742269, 447, 454, 457, 460, 462, 464, 466, 468, 470, 472, 474, 1073742300, 477, 479, 481, 483, 485, 487, 489, 491, 493, 1073742319, 496, 499, 501, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 1073742387, 569, 572, 1073742399, 576, 578, 583, 585, 587, 589, 1073742415, 659, 1073742485, 687, 1073742715, 893, 912, 1073742764, 974, 1073742800, 977, 1073742805, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1073742831, 1011, 1013, 1016, 1073742843, 1020, 1073742896, 1119, 1121, 1123, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1149, 1151, 1153, 1163, 1165, 1167, 1169, 1171, 1173, 1175, 1177, 1179, 1181, 1183, 1185, 1187, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1218, 1220, 1222, 1224, 1226, 1228, 1073743054, 1231, 1233, 1235, 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1073743201, 1415, 1073749248, 7467, 1073749346, 7543, 1073749369, 7578, 7681, 7683, 7685, 7687, 7689, 7691, 7693, 7695, 7697, 7699, 7701, 7703, 7705, 7707, 7709, 7711, 7713, 7715, 7717, 7719, 7721, 7723, 7725, 7727, 7729, 7731, 7733, 7735, 7737, 7739, 7741, 7743, 7745, 7747, 7749, 7751, 7753, 7755, 7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771, 7773, 7775, 7777, 7779, 7781, 7783, 7785, 7787, 7789, 7791, 7793, 7795, 7797, 7799, 7801, 7803, 7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819, 7821, 7823, 7825, 7827, 1073749653, 7835, 7841, 7843, 7845, 7847, 7849, 7851, 7853, 7855, 7857, 7859, 7861, 7863, 7865, 7867, 7869, 7871, 7873, 7875, 7877, 7879, 7881, 7883, 7885, 7887, 7889, 7891, 7893, 7895, 7897, 7899, 7901, 7903, 7905, 7907, 7909, 7911, 7913, 7915, 7917, 7919, 7921, 7923, 7925, 7927, 7929, 1073749760, 7943, 1073749776, 7957, 1073749792, 7975, 1073749808, 7991, 1073749824, 8005, 1073749840, 8023, 1073749856, 8039, 1073749872, 8061, 1073749888, 8071, 1073749904, 8087, 1073749920, 8103, 1073749936, 8116, 1073749942, 8119, 8126, 1073749954, 8132, 1073749958, 8135, 1073749968, 8147, 1073749974, 8151, 1073749984, 8167, 1073750002, 8180, 1073750006, 8183, 8305, 8319, 8458, 1073750286, 8463, 8467, 8495, 8500, 8505, 1073750332, 8509, 1073750342, 8521, 8526, 8580, 1073753136, 11358, 11361, 1073753189, 11366, 11368, 11370, 11372, 11380, 1073753206, 11383, 11393, 11395, 11397, 11399, 11401, 11403, 11405, 11407, 11409, 11411, 11413, 11415, 11417, 11419, 11421, 11423, 11425, 11427, 11429, 11431, 11433, 11435, 11437, 11439, 11441, 11443, 11445, 11447, 11449, 11451, 11453, 11455, 11457, 11459, 11461, 11463, 11465, 11467, 11469, 11471, 11473, 11475, 11477, 11479, 11481, 11483, 11485, 11487, 11489, 1073753315, 11492, 1073753344, 11557 }; // NOLINT
+static const uint16_t kLowercaseTable1Size = 6;
+static const int32_t kLowercaseTable1[6] = { 1073773312, 31494, 1073773331, 31511, 1073774401, 32602 }; // NOLINT
+static const uint16_t kLowercaseTable2Size = 2;
+static const int32_t kLowercaseTable2[2] = { 1073742888, 1103 }; // NOLINT
+static const uint16_t kLowercaseTable3Size = 54;
+static const int32_t kLowercaseTable3[54] = { 1073763354, 21555, 1073763406, 21588, 1073763414, 21607, 1073763458, 21659, 1073763510, 21689, 21691, 1073763517, 21699, 1073763525, 21711, 1073763562, 21763, 1073763614, 21815, 1073763666, 21867, 1073763718, 21919, 1073763770, 21971, 1073763822, 22023, 1073763874, 22075, 1073763926, 22127, 1073763978, 22181, 1073764034, 22234, 1073764060, 22241, 1073764092, 22292, 1073764118, 22299, 1073764150, 22350, 1073764176, 22357, 1073764208, 22408, 1073764234, 22415, 1073764266, 22466, 1073764292, 22473, 22475 }; // NOLINT
+bool Lowercase::Is(uchar c) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupPredicate(kLowercaseTable0,
+                                       kLowercaseTable0Size,
+                                       c);
+    case 1: return LookupPredicate(kLowercaseTable1,
+                                       kLowercaseTable1Size,
+                                       c);
+    case 2: return LookupPredicate(kLowercaseTable2,
+                                       kLowercaseTable2Size,
+                                       c);
+    case 3: return LookupPredicate(kLowercaseTable3,
+                                       kLowercaseTable3Size,
+                                       c);
+    default: return false;
+  }
+}
+
+// Letter:               point.category in ['Lu', 'Ll', 'Lt', 'Lm', 'Lo' ]
+
+static const uint16_t kLetterTable0Size = 476;
+static const int32_t kLetterTable0[476] = { 1073741889, 90, 1073741921, 122, 170, 181, 186, 1073742016, 214, 1073742040, 246, 1073742072, 705, 1073742534, 721, 1073742560, 740, 750, 1073742714, 893, 902, 1073742728, 906, 908, 1073742734, 929, 1073742755, 974, 1073742800, 1013, 1073742839, 1153, 1073742986, 1299, 1073743153, 1366, 1369, 1073743201, 1415, 1073743312, 1514, 1073743344, 1522, 1073743393, 1594, 1073743424, 1610, 1073743470, 1647, 1073743473, 1747, 1749, 1073743589, 1766, 1073743598, 1775, 1073743610, 1788, 1791, 1808, 1073743634, 1839, 1073743693, 1901, 1073743744, 1957, 1969, 1073743818, 2026, 1073743860, 2037, 2042, 1073744132, 2361, 2365, 2384, 1073744216, 2401, 1073744251, 2431, 1073744261, 2444, 1073744271, 2448, 1073744275, 2472, 1073744298, 2480, 2482, 1073744310, 2489, 2493, 2510, 1073744348, 2525, 1073744351, 2529, 1073744368, 2545, 1073744389, 2570, 1073744399, 2576, 1073744403, 2600, 1073744426, 2608, 1073744434, 2611, 1073744437, 2614, 1073744440, 2617, 1073744473, 2652, 2654, 1073744498, 2676, 1073744517, 2701, 1073744527, 2705, 1073744531, 2728, 1073744554, 2736, 1073744562, 2739, 1073744565, 2745, 2749, 2768, 1073744608, 2785, 1073744645, 2828, 1073744655, 2832, 1073744659, 2856, 1073744682, 2864, 1073744690, 2867, 1073744693, 2873, 2877, 1073744732, 2909, 1073744735, 2913, 2929, 2947, 1073744773, 2954, 1073744782, 2960, 1073744786, 2965, 1073744793, 2970, 2972, 1073744798, 2975, 1073744803, 2980, 1073744808, 2986, 1073744814, 3001, 1073744901, 3084, 1073744910, 3088, 1073744914, 3112, 1073744938, 3123, 1073744949, 3129, 1073744992, 3169, 1073745029, 3212, 1073745038, 3216, 1073745042, 3240, 1073745066, 3251, 1073745077, 3257, 3261, 3294, 1073745120, 3297, 1073745157, 3340, 1073745166, 3344, 1073745170, 3368, 1073745194, 3385, 1073745248, 3425, 1073745285, 3478, 1073745306, 3505, 1073745331, 3515, 3517, 1073745344, 3526, 1073745409, 3632, 1073745458, 3635, 1073745472, 3654, 1073745537, 3714, 3716, 1073745543, 3720, 3722, 3725, 1073745556, 3735, 1073745561, 3743, 1073745569, 3747, 3749, 3751, 1073745578, 3755, 1073745581, 3760, 1073745586, 3763, 3773, 1073745600, 3780, 3782, 1073745628, 3805, 3840, 1073745728, 3911, 1073745737, 3946, 1073745800, 3979, 1073745920, 4129, 1073745955, 4135, 1073745961, 4138, 1073746000, 4181, 1073746080, 4293, 1073746128, 4346, 4348, 1073746176, 4441, 1073746271, 4514, 1073746344, 4601, 1073746432, 4680, 1073746506, 4685, 1073746512, 4694, 4696, 1073746522, 4701, 1073746528, 4744, 1073746570, 4749, 1073746576, 4784, 1073746610, 4789, 1073746616, 4798, 4800, 1073746626, 4805, 1073746632, 4822, 1073746648, 4880, 1073746706, 4885, 1073746712, 4954, 1073746816, 5007, 1073746848, 5108, 1073746945, 5740, 1073747567, 5750, 1073747585, 5786, 1073747616, 5866, 1073747712, 5900, 1073747726, 5905, 1073747744, 5937, 1073747776, 5969, 1073747808, 5996, 1073747822, 6000, 1073747840, 6067, 6103, 6108, 1073748000, 6263, 1073748096, 6312, 1073748224, 6428, 1073748304, 6509, 1073748336, 6516, 1073748352, 6569, 1073748417, 6599, 1073748480, 6678, 1073748741, 6963, 1073748805, 6987, 1073749248, 7615, 1073749504, 7835, 1073749664, 7929, 1073749760, 7957, 1073749784, 7965, 1073749792, 8005, 1073749832, 8013, 1073749840, 8023, 8025, 8027, 8029, 1073749855, 8061, 1073749888, 8116, 1073749942, 8124, 8126, 1073749954, 8132, 1073749958, 8140, 1073749968, 8147, 1073749974, 8155, 1073749984, 8172, 1073750002, 8180, 1073750006, 8188, 8305, 8319, 1073750160, 8340, 8450, 8455, 1073750282, 8467, 8469, 1073750297, 8477, 8484, 8486, 8488, 1073750314, 8493, 1073750319, 8505, 1073750332, 8511, 1073750341, 8521, 8526, 1073750403, 8580, 1073753088, 11310, 1073753136, 11358, 1073753184, 11372, 1073753204, 11383, 1073753216, 11492, 1073753344, 11557, 1073753392, 11621, 11631, 1073753472, 11670, 1073753504, 11686, 1073753512, 11694, 1073753520, 11702, 1073753528, 11710, 1073753536, 11718, 1073753544, 11726, 1073753552, 11734, 1073753560, 11742, 1073754117, 12294, 1073754161, 12341, 1073754171, 12348, 1073754177, 12438, 1073754269, 12447, 1073754273, 12538, 1073754364, 12543, 1073754373, 12588, 1073754417, 12686, 1073754528, 12727, 1073754608, 12799, 1073755136, 19893, 1073761792, 32767 }; // NOLINT
+static const uint16_t kLetterTable1Size = 68;
+static const int32_t kLetterTable1[68] = { 1073741824, 8123, 1073750016, 9356, 1073751831, 10010, 1073752064, 10241, 1073752067, 10245, 1073752071, 10250, 1073752076, 10274, 1073752128, 10355, 1073753088, 22435, 1073772800, 31277, 1073773104, 31338, 1073773168, 31449, 1073773312, 31494, 1073773331, 31511, 31517, 1073773343, 31528, 1073773354, 31542, 1073773368, 31548, 31550, 1073773376, 31553, 1073773379, 31556, 1073773382, 31665, 1073773523, 32061, 1073773904, 32143, 1073773970, 32199, 1073774064, 32251, 1073774192, 32372, 1073774198, 32508, 1073774369, 32570, 1073774401, 32602, 1073774438, 32702, 1073774530, 32711, 1073774538, 32719, 1073774546, 32727, 1073774554, 32732 }; // NOLINT
+static const uint16_t kLetterTable2Size = 48;
+static const int32_t kLetterTable2[48] = { 1073741824, 11, 1073741837, 38, 1073741864, 58, 1073741884, 61, 1073741887, 77, 1073741904, 93, 1073741952, 250, 1073742592, 798, 1073742640, 832, 1073742658, 841, 1073742720, 925, 1073742752, 963, 1073742792, 975, 1073742848, 1181, 1073743872, 2053, 2056, 1073743882, 2101, 1073743927, 2104, 2108, 2111, 1073744128, 2325, 2560, 1073744400, 2579, 1073744405, 2583, 1073744409, 2611, 1073750016, 9070 }; // NOLINT
+static const uint16_t kLetterTable3Size = 57;
+static const int32_t kLetterTable3[57] = { 1073763328, 21588, 1073763414, 21660, 1073763486, 21663, 21666, 1073763493, 21670, 1073763497, 21676, 1073763502, 21689, 21691, 1073763517, 21699, 1073763525, 21765, 1073763591, 21770, 1073763597, 21780, 1073763606, 21788, 1073763614, 21817, 1073763643, 21822, 1073763648, 21828, 21830, 1073763658, 21840, 1073763666, 22181, 1073764008, 22208, 1073764034, 22234, 1073764060, 22266, 1073764092, 22292, 1073764118, 22324, 1073764150, 22350, 1073764176, 22382, 1073764208, 22408, 1073764234, 22440, 1073764266, 22466, 1073764292, 22475 }; // NOLINT
+static const uint16_t kLetterTable4Size = 2;
+static const int32_t kLetterTable4[2] = { 1073741824, 32767 }; // NOLINT
+static const uint16_t kLetterTable5Size = 4;
+static const int32_t kLetterTable5[4] = { 1073741824, 9942, 1073772544, 31261 }; // NOLINT
+bool Letter::Is(uchar c) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupPredicate(kLetterTable0,
+                                       kLetterTable0Size,
+                                       c);
+    case 1: return LookupPredicate(kLetterTable1,
+                                       kLetterTable1Size,
+                                       c);
+    case 2: return LookupPredicate(kLetterTable2,
+                                       kLetterTable2Size,
+                                       c);
+    case 3: return LookupPredicate(kLetterTable3,
+                                       kLetterTable3Size,
+                                       c);
+    case 4: return LookupPredicate(kLetterTable4,
+                                       kLetterTable4Size,
+                                       c);
+    case 5: return LookupPredicate(kLetterTable5,
+                                       kLetterTable5Size,
+                                       c);
+    default: return false;
+  }
+}
+
+// Space:                point.category == 'Zs'
+
+static const uint16_t kSpaceTable0Size = 9;
+static const int32_t kSpaceTable0[9] = { 32, 160, 5760, 6158, 1073750016, 8202, 8239, 8287, 12288 }; // NOLINT
+bool Space::Is(uchar c) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupPredicate(kSpaceTable0,
+                                       kSpaceTable0Size,
+                                       c);
+    default: return false;
+  }
+}
+
+// Number:               point.category in ['Nd', 'Nl', 'No' ]
+
+static const uint16_t kNumberTable0Size = 86;
+static const int32_t kNumberTable0[86] = { 1073741872, 57, 1073742002, 179, 185, 1073742012, 190, 1073743456, 1641, 1073743600, 1785, 1073743808, 1993, 1073744230, 2415, 1073744358, 2543, 1073744372, 2553, 1073744486, 2671, 1073744614, 2799, 1073744742, 2927, 1073744870, 3058, 1073744998, 3183, 1073745126, 3311, 1073745254, 3439, 1073745488, 3673, 1073745616, 3801, 1073745696, 3891, 1073745984, 4169, 1073746793, 4988, 1073747694, 5872, 1073747936, 6121, 1073747952, 6137, 1073747984, 6169, 1073748294, 6479, 1073748432, 6617, 1073748816, 7001, 8304, 1073750132, 8313, 1073750144, 8329, 1073750355, 8578, 1073751136, 9371, 1073751274, 9471, 1073751926, 10131, 11517, 12295, 1073754145, 12329, 1073754168, 12346, 1073754514, 12693, 1073754656, 12841, 1073754705, 12895, 1073754752, 12937, 1073754801, 12991 }; // NOLINT
+static const uint16_t kNumberTable1Size = 2;
+static const int32_t kNumberTable1[2] = { 1073774352, 32537 }; // NOLINT
+static const uint16_t kNumberTable2Size = 19;
+static const int32_t kNumberTable2[19] = { 1073742087, 307, 1073742144, 376, 394, 1073742624, 803, 833, 842, 1073742801, 981, 1073743008, 1193, 1073744150, 2329, 1073744448, 2631, 1073751040, 9314 }; // NOLINT
+static const uint16_t kNumberTable3Size = 4;
+static const int32_t kNumberTable3[4] = { 1073763168, 21361, 1073764302, 22527 }; // NOLINT
+bool Number::Is(uchar c) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupPredicate(kNumberTable0,
+                                       kNumberTable0Size,
+                                       c);
+    case 1: return LookupPredicate(kNumberTable1,
+                                       kNumberTable1Size,
+                                       c);
+    case 2: return LookupPredicate(kNumberTable2,
+                                       kNumberTable2Size,
+                                       c);
+    case 3: return LookupPredicate(kNumberTable3,
+                                       kNumberTable3Size,
+                                       c);
+    default: return false;
+  }
+}
+
+// WhiteSpace:           'Ws' in point.properties
+
+static const uint16_t kWhiteSpaceTable0Size = 14;
+static const int32_t kWhiteSpaceTable0[14] = { 1073741833, 13, 32, 133, 160, 5760, 6158, 1073750016, 8202, 1073750056, 8233, 8239, 8287, 12288 }; // NOLINT
+bool WhiteSpace::Is(uchar c) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupPredicate(kWhiteSpaceTable0,
+                                       kWhiteSpaceTable0Size,
+                                       c);
+    default: return false;
+  }
+}
+
+// LineTerminator:       'Lt' in point.properties
+
+static const uint16_t kLineTerminatorTable0Size = 4;
+static const int32_t kLineTerminatorTable0[4] = { 10, 13, 1073750056, 8233 }; // NOLINT
+bool LineTerminator::Is(uchar c) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupPredicate(kLineTerminatorTable0,
+                                       kLineTerminatorTable0Size,
+                                       c);
+    default: return false;
+  }
+}
+
+// CombiningMark:        point.category in ['Mn', 'Mc']
+
+static const uint16_t kCombiningMarkTable0Size = 214;
+static const int32_t kCombiningMarkTable0[214] = { 1073742592, 879, 1073742979, 1158, 1073743249, 1469, 1471, 1073743297, 1474, 1073743300, 1477, 1479, 1073743376, 1557, 1073743435, 1630, 1648, 1073743574, 1756, 1073743583, 1764, 1073743591, 1768, 1073743594, 1773, 1809, 1073743664, 1866, 1073743782, 1968, 1073743851, 2035, 1073744129, 2307, 2364, 1073744190, 2381, 1073744209, 2388, 1073744226, 2403, 1073744257, 2435, 2492, 1073744318, 2500, 1073744327, 2504, 1073744331, 2509, 2519, 1073744354, 2531, 1073744385, 2563, 2620, 1073744446, 2626, 1073744455, 2632, 1073744459, 2637, 1073744496, 2673, 1073744513, 2691, 2748, 1073744574, 2757, 1073744583, 2761, 1073744587, 2765, 1073744610, 2787, 1073744641, 2819, 2876, 1073744702, 2883, 1073744711, 2888, 1073744715, 2893, 1073744726, 2903, 2946, 1073744830, 3010, 1073744838, 3016, 1073744842, 3021, 3031, 1073744897, 3075, 1073744958, 3140, 1073744966, 3144, 1073744970, 3149, 1073744981, 3158, 1073745026, 3203, 3260, 1073745086, 3268, 1073745094, 3272, 1073745098, 3277, 1073745109, 3286, 1073745122, 3299, 1073745154, 3331, 1073745214, 3395, 1073745222, 3400, 1073745226, 3405, 3415, 1073745282, 3459, 3530, 1073745359, 3540, 3542, 1073745368, 3551, 1073745394, 3571, 3633, 1073745460, 3642, 1073745479, 3662, 3761, 1073745588, 3769, 1073745595, 3772, 1073745608, 3789, 1073745688, 3865, 3893, 3895, 3897, 1073745726, 3903, 1073745777, 3972, 1073745798, 3975, 1073745808, 3991, 1073745817, 4028, 4038, 1073745964, 4146, 1073745974, 4153, 1073746006, 4185, 4959, 1073747730, 5908, 1073747762, 5940, 1073747794, 5971, 1073747826, 6003, 1073747894, 6099, 6109, 1073747979, 6157, 6313, 1073748256, 6443, 1073748272, 6459, 1073748400, 6592, 1073748424, 6601, 1073748503, 6683, 1073748736, 6916, 1073748788, 6980, 1073748843, 7027, 1073749440, 7626, 1073749502, 7679, 1073750224, 8412, 8417, 1073750245, 8431, 1073754154, 12335, 1073754265, 12442 }; // NOLINT
+static const uint16_t kCombiningMarkTable1Size = 10;
+static const int32_t kCombiningMarkTable1[10] = { 10242, 10246, 10251, 1073752099, 10279, 31518, 1073774080, 32271, 1073774112, 32291 }; // NOLINT
+static const uint16_t kCombiningMarkTable2Size = 9;
+static const int32_t kCombiningMarkTable2[9] = { 1073744385, 2563, 1073744389, 2566, 1073744396, 2575, 1073744440, 2618, 2623 }; // NOLINT
+static const uint16_t kCombiningMarkTable3Size = 12;
+static const int32_t kCombiningMarkTable3[12] = { 1073762661, 20841, 1073762669, 20850, 1073762683, 20866, 1073762693, 20875, 1073762730, 20909, 1073762882, 21060 }; // NOLINT
+static const uint16_t kCombiningMarkTable28Size = 2;
+static const int32_t kCombiningMarkTable28[2] = { 1073742080, 495 }; // NOLINT
+bool CombiningMark::Is(uchar c) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupPredicate(kCombiningMarkTable0,
+                                       kCombiningMarkTable0Size,
+                                       c);
+    case 1: return LookupPredicate(kCombiningMarkTable1,
+                                       kCombiningMarkTable1Size,
+                                       c);
+    case 2: return LookupPredicate(kCombiningMarkTable2,
+                                       kCombiningMarkTable2Size,
+                                       c);
+    case 3: return LookupPredicate(kCombiningMarkTable3,
+                                       kCombiningMarkTable3Size,
+                                       c);
+    case 28: return LookupPredicate(kCombiningMarkTable28,
+                                       kCombiningMarkTable28Size,
+                                       c);
+    default: return false;
+  }
+}
+
+// ConnectorPunctuation: point.category == 'Pc'
+
+static const uint16_t kConnectorPunctuationTable0Size = 4;
+static const int32_t kConnectorPunctuationTable0[4] = { 95, 1073750079, 8256, 8276 }; // NOLINT
+static const uint16_t kConnectorPunctuationTable1Size = 5;
+static const int32_t kConnectorPunctuationTable1[5] = { 1073774131, 32308, 1073774157, 32335, 32575 }; // NOLINT
+bool ConnectorPunctuation::Is(uchar c) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupPredicate(kConnectorPunctuationTable0,
+                                       kConnectorPunctuationTable0Size,
+                                       c);
+    case 1: return LookupPredicate(kConnectorPunctuationTable1,
+                                       kConnectorPunctuationTable1Size,
+                                       c);
+    default: return false;
+  }
+}
+
+static const MultiCharacterSpecialCase<3> kToLowercaseMultiStrings0[] = { {2, {105, 775}}, {0, {0}} }; // NOLINT
+static const uint16_t kToLowercaseTable0Size = 531;
+static const int32_t kToLowercaseTable0[1062] = { 1073741889, 128, 90, 128, 1073742016, 128, 214, 128, 1073742040, 128, 222, 128, 256, 4, 258, 4, 260, 4, 262, 4, 264, 4, 266, 4, 268, 4, 270, 4, 272, 4, 274, 4, 276, 4, 278, 4, 280, 4, 282, 4, 284, 4, 286, 4, 288, 4, 290, 4, 292, 4, 294, 4, 296, 4, 298, 4, 300, 4, 302, 4, 304, 1, 306, 4, 308, 4, 310, 4, 313, 4, 315, 4, 317, 4, 319, 4, 321, 4, 323, 4, 325, 4, 327, 4, 330, 4, 332, 4, 334, 4, 336, 4, 338, 4, 340, 4, 342, 4, 344, 4, 346, 4, 348, 4, 350, 4, 352, 4, 354, 4, 356, 4, 358, 4, 360, 4, 362, 4, 364, 4, 366, 4, 368, 4, 370, 4, 372, 4, 374, 4, 376, -484, 377, 4, 379, 4, 381, 4, 385, 840, 386, 4, 388, 4, 390, 824, 391, 4, 1073742217, 820, 394, 820, 395, 4, 398, 316, 399, 808, 400, 812, 401, 4, 403, 820, 404, 828, 406, 844, 407, 836, 408, 4, 412, 844, 413, 852, 415, 856, 416, 4, 418, 4, 420, 4, 422, 872, 423, 4, 425, 872, 428, 4, 430, 872, 431, 4, 1073742257, 868, 434, 868, 435, 4, 437, 4, 439, 876, 440, 4, 444, 4, 452, 8, 453, 4, 455, 8, 456, 4, 458, 8, 459, 4, 461, 4, 463, 4, 465, 4, 467, 4, 469, 4, 471, 4, 473, 4, 475, 4, 478, 4, 480, 4, 482, 4, 484, 4, 486, 4, 488, 4, 490, 4, 492, 4, 494, 4, 497, 8, 498, 4, 500, 4, 502, -388, 503, -224, 504, 4, 506, 4, 508, 4, 510, 4, 512, 4, 514, 4, 516, 4, 518, 4, 520, 4, 522, 4, 524, 4, 526, 4, 528, 4, 530, 4, 532, 4, 534, 4, 536, 4, 538, 4, 540, 4, 542, 4, 544, -520, 546, 4, 548, 4, 550, 4, 552, 4, 554, 4, 556, 4, 558, 4, 560, 4, 562, 4, 570, 43180, 571, 4, 573, -652, 574, 43168, 577, 4, 579, -780, 580, 276, 581, 284, 582, 4, 584, 4, 586, 4, 588, 4, 590, 4, 902, 152, 1073742728, 148, 906, 148, 908, 256, 1073742734, 252, 911, 252, 1073742737, 128, 929, 128, 1073742755, 6, 939, 128, 984, 4, 986, 4, 988, 4, 990, 4, 992, 4, 994, 4, 996, 4, 998, 4, 1000, 4, 1002, 4, 1004, 4, 1006, 4, 1012, -240, 1015, 4, 1017, -28, 1018, 4, 1073742845, -520, 1023, -520, 1073742848, 320, 1039, 320, 1073742864, 128, 1071, 128, 1120, 4, 1122, 4, 1124, 4, 1126, 4, 1128, 4, 1130, 4, 1132, 4, 1134, 4, 1136, 4, 1138, 4, 1140, 4, 1142, 4, 1144, 4, 1146, 4, 1148, 4, 1150, 4, 1152, 4, 1162, 4, 1164, 4, 1166, 4, 1168, 4, 1170, 4, 1172, 4, 1174, 4, 1176, 4, 1178, 4, 1180, 4, 1182, 4, 1184, 4, 1186, 4, 1188, 4, 1190, 4, 1192, 4, 1194, 4, 1196, 4, 1198, 4, 1200, 4, 1202, 4, 1204, 4, 1206, 4, 1208, 4, 1210, 4, 1212, 4, 1214, 4, 1216, 60, 1217, 4, 1219, 4, 1221, 4, 1223, 4, 1225, 4, 1227, 4, 1229, 4, 1232, 4, 1234, 4, 1236, 4, 1238, 4, 1240, 4, 1242, 4, 1244, 4, 1246, 4, 1248, 4, 1250, 4, 1252, 4, 1254, 4, 1256, 4, 1258, 4, 1260, 4, 1262, 4, 1264, 4, 1266, 4, 1268, 4, 1270, 4, 1272, 4, 1274, 4, 1276, 4, 1278, 4, 1280, 4, 1282, 4, 1284, 4, 1286, 4, 1288, 4, 1290, 4, 1292, 4, 1294, 4, 1296, 4, 1298, 4, 1073743153, 192, 1366, 192, 1073746080, 29056, 4293, 29056, 7680, 4, 7682, 4, 7684, 4, 7686, 4, 7688, 4, 7690, 4, 7692, 4, 7694, 4, 7696, 4, 7698, 4, 7700, 4, 7702, 4, 7704, 4, 7706, 4, 7708, 4, 7710, 4, 7712, 4, 7714, 4, 7716, 4, 7718, 4, 7720, 4, 7722, 4, 7724, 4, 7726, 4, 7728, 4, 7730, 4, 7732, 4, 7734, 4, 7736, 4, 7738, 4, 7740, 4, 7742, 4, 7744, 4, 7746, 4, 7748, 4, 7750, 4, 7752, 4, 7754, 4, 7756, 4, 7758, 4, 7760, 4, 7762, 4, 7764, 4, 7766, 4, 7768, 4, 7770, 4, 7772, 4, 7774, 4, 7776, 4, 7778, 4, 7780, 4, 7782, 4, 7784, 4, 7786, 4, 7788, 4, 7790, 4, 7792, 4, 7794, 4, 7796, 4, 7798, 4, 7800, 4, 7802, 4, 7804, 4, 7806, 4, 7808, 4, 7810, 4, 7812, 4, 7814, 4, 7816, 4, 7818, 4, 7820, 4, 7822, 4, 7824, 4, 7826, 4, 7828, 4, 7840, 4, 7842, 4, 7844, 4, 7846, 4, 7848, 4, 7850, 4, 7852, 4, 7854, 4, 7856, 4, 7858, 4, 7860, 4, 7862, 4, 7864, 4, 7866, 4, 7868, 4, 7870, 4, 7872, 4, 7874, 4, 7876, 4, 7878, 4, 7880, 4, 7882, 4, 7884, 4, 7886, 4, 7888, 4, 7890, 4, 7892, 4, 7894, 4, 7896, 4, 7898, 4, 7900, 4, 7902, 4, 7904, 4, 7906, 4, 7908, 4, 7910, 4, 7912, 4, 7914, 4, 7916, 4, 7918, 4, 7920, 4, 7922, 4, 7924, 4, 7926, 4, 7928, 4, 1073749768, -32, 7951, -32, 1073749784, -32, 7965, -32, 1073749800, -32, 7983, -32, 1073749816, -32, 7999, -32, 1073749832, -32, 8013, -32, 8025, -32, 8027, -32, 8029, -32, 8031, -32, 1073749864, -32, 8047, -32, 1073749896, -32, 8079, -32, 1073749912, -32, 8095, -32, 1073749928, -32, 8111, -32, 1073749944, -32, 8121, -32, 1073749946, -296, 8123, -296, 8124, -36, 1073749960, -344, 8139, -344, 8140, -36, 1073749976, -32, 8153, -32, 1073749978, -400, 8155, -400, 1073749992, -32, 8169, -32, 1073749994, -448, 8171, -448, 8172, -28, 1073750008, -512, 8185, -512, 1073750010, -504, 8187, -504, 8188, -36, 8486, -30068, 8490, -33532, 8491, -33048, 8498, 112, 1073750368, 64, 8559, 64, 8579, 4, 1073751222, 104, 9423, 104, 1073753088, 192, 11310, 192, 11360, 4, 11362, -42972, 11363, -15256, 11364, -42908, 11367, 4, 11369, 4, 11371, 4, 11381, 4, 11392, 4, 11394, 4, 11396, 4, 11398, 4, 11400, 4, 11402, 4, 11404, 4, 11406, 4, 11408, 4, 11410, 4, 11412, 4, 11414, 4, 11416, 4, 11418, 4, 11420, 4, 11422, 4, 11424, 4, 11426, 4, 11428, 4, 11430, 4, 11432, 4, 11434, 4, 11436, 4, 11438, 4, 11440, 4, 11442, 4, 11444, 4, 11446, 4, 11448, 4, 11450, 4, 11452, 4, 11454, 4, 11456, 4, 11458, 4, 11460, 4, 11462, 4, 11464, 4, 11466, 4, 11468, 4, 11470, 4, 11472, 4, 11474, 4, 11476, 4, 11478, 4, 11480, 4, 11482, 4, 11484, 4, 11486, 4, 11488, 4, 11490, 4 }; // NOLINT
+static const MultiCharacterSpecialCase<3> kToLowercaseMultiStrings1[] = { {0, {0}} }; // NOLINT
+static const uint16_t kToLowercaseTable1Size = 2;
+static const int32_t kToLowercaseTable1[4] = { 1073774369, 128, 32570, 128 }; // NOLINT
+static const MultiCharacterSpecialCase<3> kToLowercaseMultiStrings2[] = { {0, {0}} }; // NOLINT
+static const uint16_t kToLowercaseTable2Size = 2;
+static const int32_t kToLowercaseTable2[4] = { 1073742848, 160, 1063, 160 }; // NOLINT
+int ToLowercase::Convert(uchar c,
+                      uchar n,
+                      uchar* result,
+                      bool* allow_caching_ptr) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupMapping(kToLowercaseTable0,
+                                     kToLowercaseTable0Size,
+                                     kToLowercaseMultiStrings0,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    case 1: return LookupMapping(kToLowercaseTable1,
+                                     kToLowercaseTable1Size,
+                                     kToLowercaseMultiStrings1,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    case 2: return LookupMapping(kToLowercaseTable2,
+                                     kToLowercaseTable2Size,
+                                     kToLowercaseMultiStrings2,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    default: return 0;
+  }
+}
+
+static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings0[] = { {2, {83, 83}}, {2, {700, 78}}, {2, {74, 780}}, {3, {921, 776, 769}}, {3, {933, 776, 769}}, {2, {1333, 1362}}, {2, {72, 817}}, {2, {84, 776}}, {2, {87, 778}}, {2, {89, 778}}, {2, {65, 702}}, {2, {933, 787}}, {3, {933, 787, 768}}, {3, {933, 787, 769}}, {3, {933, 787, 834}}, {2, {7944, 921}}, {2, {7945, 921}}, {2, {7946, 921}}, {2, {7947, 921}}, {2, {7948, 921}}, {2, {7949, 921}}, {2, {7950, 921}}, {2, {7951, 921}}, {2, {7944, 921}}, {2, {7945, 921}}, {2, {7946, 921}}, {2, {7947, 921}}, {2, {7948, 921}}, {2, {7949, 921}}, {2, {7950, 921}}, {2, {7951, 921}}, {2, {7976, 921}}, {2, {7977, 921}}, {2, {7978, 921}}, {2, {7979, 921}}, {2, {7980, 921}}, {2, {7981, 921}}, {2, {7982, 921}}, {2, {7983, 921}}, {2, {7976, 921}}, {2, {7977, 921}}, {2, {7978, 921}}, {2, {7979, 921}}, {2, {7980, 921}}, {2, {7981, 921}}, {2, {7982, 921}}, {2, {7983, 921}}, {2, {8040, 921}}, {2, {8041, 921}}, {2, {8042, 921}}, {2, {8043, 921}}, {2, {8044, 921}}, {2, {8045, 921}}, {2, {8046, 921}}, {2, {8047, 921}}, {2, {8040, 921}}, {2, {8041, 921}}, {2, {8042, 921}}, {2, {8043, 921}}, {2, {8044, 921}}, {2, {8045, 921}}, {2, {8046, 921}}, {2, {8047, 921}}, {2, {8122, 921}}, {2, {913, 921}}, {2, {902, 921}}, {2, {913, 834}}, {3, {913, 834, 921}}, {2, {913, 921}}, {2, {8138, 921}}, {2, {919, 921}}, {2, {905, 921}}, {2, {919, 834}}, {3, {919, 834, 921}}, {2, {919, 921}}, {3, {921, 776, 768}}, {3, {921, 776, 769}}, {2, {921, 834}}, {3, {921, 776, 834}}, {3, {933, 776, 768}}, {3, {933, 776, 769}}, {2, {929, 787}}, {2, {933, 834}}, {3, {933, 776, 834}}, {2, {8186, 921}}, {2, {937, 921}}, {2, {911, 921}}, {2, {937, 834}}, {3, {937, 834, 921}}, {2, {937, 921}}, {0, {0}} }; // NOLINT
+static const uint16_t kToUppercaseTable0Size = 621;
+static const int32_t kToUppercaseTable0[1242] = { 1073741921, -128, 122, -128, 181, 2972, 223, 1, 1073742048, -128, 246, -128, 1073742072, -128, 254, -128, 255, 484, 257, -4, 259, -4, 261, -4, 263, -4, 265, -4, 267, -4, 269, -4, 271, -4, 273, -4, 275, -4, 277, -4, 279, -4, 281, -4, 283, -4, 285, -4, 287, -4, 289, -4, 291, -4, 293, -4, 295, -4, 297, -4, 299, -4, 301, -4, 303, -4, 305, -928, 307, -4, 309, -4, 311, -4, 314, -4, 316, -4, 318, -4, 320, -4, 322, -4, 324, -4, 326, -4, 328, -4, 329, 5, 331, -4, 333, -4, 335, -4, 337, -4, 339, -4, 341, -4, 343, -4, 345, -4, 347, -4, 349, -4, 351, -4, 353, -4, 355, -4, 357, -4, 359, -4, 361, -4, 363, -4, 365, -4, 367, -4, 369, -4, 371, -4, 373, -4, 375, -4, 378, -4, 380, -4, 382, -4, 383, -1200, 384, 780, 387, -4, 389, -4, 392, -4, 396, -4, 402, -4, 405, 388, 409, -4, 410, 652, 414, 520, 417, -4, 419, -4, 421, -4, 424, -4, 429, -4, 432, -4, 436, -4, 438, -4, 441, -4, 445, -4, 447, 224, 453, -4, 454, -8, 456, -4, 457, -8, 459, -4, 460, -8, 462, -4, 464, -4, 466, -4, 468, -4, 470, -4, 472, -4, 474, -4, 476, -4, 477, -316, 479, -4, 481, -4, 483, -4, 485, -4, 487, -4, 489, -4, 491, -4, 493, -4, 495, -4, 496, 9, 498, -4, 499, -8, 501, -4, 505, -4, 507, -4, 509, -4, 511, -4, 513, -4, 515, -4, 517, -4, 519, -4, 521, -4, 523, -4, 525, -4, 527, -4, 529, -4, 531, -4, 533, -4, 535, -4, 537, -4, 539, -4, 541, -4, 543, -4, 547, -4, 549, -4, 551, -4, 553, -4, 555, -4, 557, -4, 559, -4, 561, -4, 563, -4, 572, -4, 578, -4, 583, -4, 585, -4, 587, -4, 589, -4, 591, -4, 595, -840, 596, -824, 1073742422, -820, 599, -820, 601, -808, 603, -812, 608, -820, 611, -828, 616, -836, 617, -844, 619, 42972, 623, -844, 626, -852, 629, -856, 637, 42908, 640, -872, 643, -872, 648, -872, 649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876, 837, 336, 1073742715, 520, 893, 520, 912, 13, 940, -152, 1073742765, -148, 943, -148, 944, 17, 1073742769, -128, 961, -128, 962, -124, 1073742787, -128, 971, -128, 972, -256, 1073742797, -252, 974, -252, 976, -248, 977, -228, 981, -188, 982, -216, 985, -4, 987, -4, 989, -4, 991, -4, 993, -4, 995, -4, 997, -4, 999, -4, 1001, -4, 1003, -4, 1005, -4, 1007, -4, 1008, -344, 1009, -320, 1010, 28, 1013, -384, 1016, -4, 1019, -4, 1073742896, -128, 1103, -128, 1073742928, -320, 1119, -320, 1121, -4, 1123, -4, 1125, -4, 1127, -4, 1129, -4, 1131, -4, 1133, -4, 1135, -4, 1137, -4, 1139, -4, 1141, -4, 1143, -4, 1145, -4, 1147, -4, 1149, -4, 1151, -4, 1153, -4, 1163, -4, 1165, -4, 1167, -4, 1169, -4, 1171, -4, 1173, -4, 1175, -4, 1177, -4, 1179, -4, 1181, -4, 1183, -4, 1185, -4, 1187, -4, 1189, -4, 1191, -4, 1193, -4, 1195, -4, 1197, -4, 1199, -4, 1201, -4, 1203, -4, 1205, -4, 1207, -4, 1209, -4, 1211, -4, 1213, -4, 1215, -4, 1218, -4, 1220, -4, 1222, -4, 1224, -4, 1226, -4, 1228, -4, 1230, -4, 1231, -60, 1233, -4, 1235, -4, 1237, -4, 1239, -4, 1241, -4, 1243, -4, 1245, -4, 1247, -4, 1249, -4, 1251, -4, 1253, -4, 1255, -4, 1257, -4, 1259, -4, 1261, -4, 1263, -4, 1265, -4, 1267, -4, 1269, -4, 1271, -4, 1273, -4, 1275, -4, 1277, -4, 1279, -4, 1281, -4, 1283, -4, 1285, -4, 1287, -4, 1289, -4, 1291, -4, 1293, -4, 1295, -4, 1297, -4, 1299, -4, 1073743201, -192, 1414, -192, 1415, 21, 7549, 15256, 7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4, 7695, -4, 7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4, 7711, -4, 7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4, 7727, -4, 7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4, 7743, -4, 7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4, 7759, -4, 7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4, 7775, -4, 7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4, 7791, -4, 7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4, 7807, -4, 7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4, 7823, -4, 7825, -4, 7827, -4, 7829, -4, 7830, 25, 7831, 29, 7832, 33, 7833, 37, 7834, 41, 7835, -236, 7841, -4, 7843, -4, 7845, -4, 7847, -4, 7849, -4, 7851, -4, 7853, -4, 7855, -4, 7857, -4, 7859, -4, 7861, -4, 7863, -4, 7865, -4, 7867, -4, 7869, -4, 7871, -4, 7873, -4, 7875, -4, 7877, -4, 7879, -4, 7881, -4, 7883, -4, 7885, -4, 7887, -4, 7889, -4, 7891, -4, 7893, -4, 7895, -4, 7897, -4, 7899, -4, 7901, -4, 7903, -4, 7905, -4, 7907, -4, 7909, -4, 7911, -4, 7913, -4, 7915, -4, 7917, -4, 7919, -4, 7921, -4, 7923, -4, 7925, -4, 7927, -4, 7929, -4, 1073749760, 32, 7943, 32, 1073749776, 32, 7957, 32, 1073749792, 32, 7975, 32, 1073749808, 32, 7991, 32, 1073749824, 32, 8005, 32, 8016, 45, 8017, 32, 8018, 49, 8019, 32, 8020, 53, 8021, 32, 8022, 57, 8023, 32, 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, 8053, 344, 1073749878, 400, 8055, 400, 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, 8061, 504, 8064, 61, 8065, 65, 8066, 69, 8067, 73, 8068, 77, 8069, 81, 8070, 85, 8071, 89, 8072, 93, 8073, 97, 8074, 101, 8075, 105, 8076, 109, 8077, 113, 8078, 117, 8079, 121, 8080, 125, 8081, 129, 8082, 133, 8083, 137, 8084, 141, 8085, 145, 8086, 149, 8087, 153, 8088, 157, 8089, 161, 8090, 165, 8091, 169, 8092, 173, 8093, 177, 8094, 181, 8095, 185, 8096, 189, 8097, 193, 8098, 197, 8099, 201, 8100, 205, 8101, 209, 8102, 213, 8103, 217, 8104, 221, 8105, 225, 8106, 229, 8107, 233, 8108, 237, 8109, 241, 8110, 245, 8111, 249, 1073749936, 32, 8113, 32, 8114, 253, 8115, 257, 8116, 261, 8118, 265, 8119, 269, 8124, 273, 8126, -28820, 8130, 277, 8131, 281, 8132, 285, 8134, 289, 8135, 293, 8140, 297, 1073749968, 32, 8145, 32, 8146, 301, 8147, 305, 8150, 309, 8151, 313, 1073749984, 32, 8161, 32, 8162, 317, 8163, 321, 8164, 325, 8165, 28, 8166, 329, 8167, 333, 8178, 337, 8179, 341, 8180, 345, 8182, 349, 8183, 353, 8188, 357, 8526, -112, 1073750384, -64, 8575, -64, 8580, -4, 1073751248, -104, 9449, -104, 1073753136, -192, 11358, -192, 11361, -4, 11365, -43180, 11366, -43168, 11368, -4, 11370, -4, 11372, -4, 11382, -4, 11393, -4, 11395, -4, 11397, -4, 11399, -4, 11401, -4, 11403, -4, 11405, -4, 11407, -4, 11409, -4, 11411, -4, 11413, -4, 11415, -4, 11417, -4, 11419, -4, 11421, -4, 11423, -4, 11425, -4, 11427, -4, 11429, -4, 11431, -4, 11433, -4, 11435, -4, 11437, -4, 11439, -4, 11441, -4, 11443, -4, 11445, -4, 11447, -4, 11449, -4, 11451, -4, 11453, -4, 11455, -4, 11457, -4, 11459, -4, 11461, -4, 11463, -4, 11465, -4, 11467, -4, 11469, -4, 11471, -4, 11473, -4, 11475, -4, 11477, -4, 11479, -4, 11481, -4, 11483, -4, 11485, -4, 11487, -4, 11489, -4, 11491, -4, 1073753344, -29056, 11557, -29056 }; // NOLINT
+static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings1[] = { {2, {70, 70}}, {2, {70, 73}}, {2, {70, 76}}, {3, {70, 70, 73}}, {3, {70, 70, 76}}, {2, {83, 84}}, {2, {83, 84}}, {2, {1348, 1350}}, {2, {1348, 1333}}, {2, {1348, 1339}}, {2, {1358, 1350}}, {2, {1348, 1341}}, {0, {0}} }; // NOLINT
+static const uint16_t kToUppercaseTable1Size = 14;
+static const int32_t kToUppercaseTable1[28] = { 31488, 1, 31489, 5, 31490, 9, 31491, 13, 31492, 17, 31493, 21, 31494, 25, 31507, 29, 31508, 33, 31509, 37, 31510, 41, 31511, 45, 1073774401, -128, 32602, -128 }; // NOLINT
+static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings2[] = { {0, {0}} }; // NOLINT
+static const uint16_t kToUppercaseTable2Size = 2;
+static const int32_t kToUppercaseTable2[4] = { 1073742888, -160, 1103, -160 }; // NOLINT
+int ToUppercase::Convert(uchar c,
+                      uchar n,
+                      uchar* result,
+                      bool* allow_caching_ptr) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupMapping(kToUppercaseTable0,
+                                     kToUppercaseTable0Size,
+                                     kToUppercaseMultiStrings0,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    case 1: return LookupMapping(kToUppercaseTable1,
+                                     kToUppercaseTable1Size,
+                                     kToUppercaseMultiStrings1,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    case 2: return LookupMapping(kToUppercaseTable2,
+                                     kToUppercaseTable2Size,
+                                     kToUppercaseMultiStrings2,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    default: return 0;
+  }
+}
+
+static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings0[] = { {0, {0}} }; // NOLINT
+static const uint16_t kEcma262CanonicalizeTable0Size = 529;
+static const int32_t kEcma262CanonicalizeTable0[1058] = { 1073741921, -128, 122, -128, 181, 2972, 1073742048, -128, 246, -128, 1073742072, -128, 254, -128, 255, 484, 257, -4, 259, -4, 261, -4, 263, -4, 265, -4, 267, -4, 269, -4, 271, -4, 273, -4, 275, -4, 277, -4, 279, -4, 281, -4, 283, -4, 285, -4, 287, -4, 289, -4, 291, -4, 293, -4, 295, -4, 297, -4, 299, -4, 301, -4, 303, -4, 307, -4, 309, -4, 311, -4, 314, -4, 316, -4, 318, -4, 320, -4, 322, -4, 324, -4, 326, -4, 328, -4, 331, -4, 333, -4, 335, -4, 337, -4, 339, -4, 341, -4, 343, -4, 345, -4, 347, -4, 349, -4, 351, -4, 353, -4, 355, -4, 357, -4, 359, -4, 361, -4, 363, -4, 365, -4, 367, -4, 369, -4, 371, -4, 373, -4, 375, -4, 378, -4, 380, -4, 382, -4, 384, 780, 387, -4, 389, -4, 392, -4, 396, -4, 402, -4, 405, 388, 409, -4, 410, 652, 414, 520, 417, -4, 419, -4, 421, -4, 424, -4, 429, -4, 432, -4, 436, -4, 438, -4, 441, -4, 445, -4, 447, 224, 453, -4, 454, -8, 456, -4, 457, -8, 459, -4, 460, -8, 462, -4, 464, -4, 466, -4, 468, -4, 470, -4, 472, -4, 474, -4, 476, -4, 477, -316, 479, -4, 481, -4, 483, -4, 485, -4, 487, -4, 489, -4, 491, -4, 493, -4, 495, -4, 498, -4, 499, -8, 501, -4, 505, -4, 507, -4, 509, -4, 511, -4, 513, -4, 515, -4, 517, -4, 519, -4, 521, -4, 523, -4, 525, -4, 527, -4, 529, -4, 531, -4, 533, -4, 535, -4, 537, -4, 539, -4, 541, -4, 543, -4, 547, -4, 549, -4, 551, -4, 553, -4, 555, -4, 557, -4, 559, -4, 561, -4, 563, -4, 572, -4, 578, -4, 583, -4, 585, -4, 587, -4, 589, -4, 591, -4, 595, -840, 596, -824, 1073742422, -820, 599, -820, 601, -808, 603, -812, 608, -820, 611, -828, 616, -836, 617, -844, 619, 42972, 623, -844, 626, -852, 629, -856, 637, 42908, 640, -872, 643, -872, 648, -872, 649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876, 837, 336, 1073742715, 520, 893, 520, 940, -152, 1073742765, -148, 943, -148, 1073742769, -128, 961, -128, 962, -124, 1073742787, -128, 971, -128, 972, -256, 1073742797, -252, 974, -252, 976, -248, 977, -228, 981, -188, 982, -216, 985, -4, 987, -4, 989, -4, 991, -4, 993, -4, 995, -4, 997, -4, 999, -4, 1001, -4, 1003, -4, 1005, -4, 1007, -4, 1008, -344, 1009, -320, 1010, 28, 1013, -384, 1016, -4, 1019, -4, 1073742896, -128, 1103, -128, 1073742928, -320, 1119, -320, 1121, -4, 1123, -4, 1125, -4, 1127, -4, 1129, -4, 1131, -4, 1133, -4, 1135, -4, 1137, -4, 1139, -4, 1141, -4, 1143, -4, 1145, -4, 1147, -4, 1149, -4, 1151, -4, 1153, -4, 1163, -4, 1165, -4, 1167, -4, 1169, -4, 1171, -4, 1173, -4, 1175, -4, 1177, -4, 1179, -4, 1181, -4, 1183, -4, 1185, -4, 1187, -4, 1189, -4, 1191, -4, 1193, -4, 1195, -4, 1197, -4, 1199, -4, 1201, -4, 1203, -4, 1205, -4, 1207, -4, 1209, -4, 1211, -4, 1213, -4, 1215, -4, 1218, -4, 1220, -4, 1222, -4, 1224, -4, 1226, -4, 1228, -4, 1230, -4, 1231, -60, 1233, -4, 1235, -4, 1237, -4, 1239, -4, 1241, -4, 1243, -4, 1245, -4, 1247, -4, 1249, -4, 1251, -4, 1253, -4, 1255, -4, 1257, -4, 1259, -4, 1261, -4, 1263, -4, 1265, -4, 1267, -4, 1269, -4, 1271, -4, 1273, -4, 1275, -4, 1277, -4, 1279, -4, 1281, -4, 1283, -4, 1285, -4, 1287, -4, 1289, -4, 1291, -4, 1293, -4, 1295, -4, 1297, -4, 1299, -4, 1073743201, -192, 1414, -192, 7549, 15256, 7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4, 7695, -4, 7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4, 7711, -4, 7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4, 7727, -4, 7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4, 7743, -4, 7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4, 7759, -4, 7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4, 7775, -4, 7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4, 7791, -4, 7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4, 7807, -4, 7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4, 7823, -4, 7825, -4, 7827, -4, 7829, -4, 7835, -236, 7841, -4, 7843, -4, 7845, -4, 7847, -4, 7849, -4, 7851, -4, 7853, -4, 7855, -4, 7857, -4, 7859, -4, 7861, -4, 7863, -4, 7865, -4, 7867, -4, 7869, -4, 7871, -4, 7873, -4, 7875, -4, 7877, -4, 7879, -4, 7881, -4, 7883, -4, 7885, -4, 7887, -4, 7889, -4, 7891, -4, 7893, -4, 7895, -4, 7897, -4, 7899, -4, 7901, -4, 7903, -4, 7905, -4, 7907, -4, 7909, -4, 7911, -4, 7913, -4, 7915, -4, 7917, -4, 7919, -4, 7921, -4, 7923, -4, 7925, -4, 7927, -4, 7929, -4, 1073749760, 32, 7943, 32, 1073749776, 32, 7957, 32, 1073749792, 32, 7975, 32, 1073749808, 32, 7991, 32, 1073749824, 32, 8005, 32, 8017, 32, 8019, 32, 8021, 32, 8023, 32, 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, 8053, 344, 1073749878, 400, 8055, 400, 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, 8061, 504, 1073749936, 32, 8113, 32, 8126, -28820, 1073749968, 32, 8145, 32, 1073749984, 32, 8161, 32, 8165, 28, 8526, -112, 1073750384, -64, 8575, -64, 8580, -4, 1073751248, -104, 9449, -104, 1073753136, -192, 11358, -192, 11361, -4, 11365, -43180, 11366, -43168, 11368, -4, 11370, -4, 11372, -4, 11382, -4, 11393, -4, 11395, -4, 11397, -4, 11399, -4, 11401, -4, 11403, -4, 11405, -4, 11407, -4, 11409, -4, 11411, -4, 11413, -4, 11415, -4, 11417, -4, 11419, -4, 11421, -4, 11423, -4, 11425, -4, 11427, -4, 11429, -4, 11431, -4, 11433, -4, 11435, -4, 11437, -4, 11439, -4, 11441, -4, 11443, -4, 11445, -4, 11447, -4, 11449, -4, 11451, -4, 11453, -4, 11455, -4, 11457, -4, 11459, -4, 11461, -4, 11463, -4, 11465, -4, 11467, -4, 11469, -4, 11471, -4, 11473, -4, 11475, -4, 11477, -4, 11479, -4, 11481, -4, 11483, -4, 11485, -4, 11487, -4, 11489, -4, 11491, -4, 1073753344, -29056, 11557, -29056 }; // NOLINT
+static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings1[] = { {0, {0}} }; // NOLINT
+static const uint16_t kEcma262CanonicalizeTable1Size = 2;
+static const int32_t kEcma262CanonicalizeTable1[4] = { 1073774401, -128, 32602, -128 }; // NOLINT
+static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings2[] = { {0, {0}} }; // NOLINT
+static const uint16_t kEcma262CanonicalizeTable2Size = 2;
+static const int32_t kEcma262CanonicalizeTable2[4] = { 1073742888, -160, 1103, -160 }; // NOLINT
+int Ecma262Canonicalize::Convert(uchar c,
+                      uchar n,
+                      uchar* result,
+                      bool* allow_caching_ptr) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupMapping(kEcma262CanonicalizeTable0,
+                                     kEcma262CanonicalizeTable0Size,
+                                     kEcma262CanonicalizeMultiStrings0,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    case 1: return LookupMapping(kEcma262CanonicalizeTable1,
+                                     kEcma262CanonicalizeTable1Size,
+                                     kEcma262CanonicalizeMultiStrings1,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    case 2: return LookupMapping(kEcma262CanonicalizeTable2,
+                                     kEcma262CanonicalizeTable2Size,
+                                     kEcma262CanonicalizeMultiStrings2,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    default: return 0;
+  }
+}
+
+static const MultiCharacterSpecialCase<4> kEcma262UnCanonicalizeMultiStrings0[] = { {2, {65, 97}}, {2, {66, 98}}, {2, {67, 99}}, {2, {68, 100}}, {2, {69, 101}}, {2, {70, 102}}, {2, {71, 103}}, {2, {72, 104}}, {2, {73, 105}}, {2, {74, 106}}, {2, {75, 107}}, {2, {76, 108}}, {2, {77, 109}}, {2, {78, 110}}, {2, {79, 111}}, {2, {80, 112}}, {2, {81, 113}}, {2, {82, 114}}, {2, {83, 115}}, {2, {84, 116}}, {2, {85, 117}}, {2, {86, 118}}, {2, {87, 119}}, {2, {88, 120}}, {2, {89, 121}}, {2, {90, 122}}, {2, {65, 97}}, {2, {66, 98}}, {2, {67, 99}}, {2, {68, 100}}, {2, {69, 101}}, {2, {70, 102}}, {2, {71, 103}}, {2, {72, 104}}, {2, {73, 105}}, {2, {74, 106}}, {2, {75, 107}}, {2, {76, 108}}, {2, {77, 109}}, {2, {78, 110}}, {2, {79, 111}}, {2, {80, 112}}, {2, {81, 113}}, {2, {82, 114}}, {2, {83, 115}}, {2, {84, 116}}, {2, {85, 117}}, {2, {86, 118}}, {2, {87, 119}}, {2, {88, 120}}, {2, {89, 121}}, {2, {90, 122}}, {3, {181, 924, 956}}, {2, {192, 224}}, {2, {193, 225}}, {2, {194, 226}}, {2, {195, 227}}, {2, {196, 228}}, {2, {197, 229}}, {2, {198, 230}}, {2, {199, 231}}, {2, {200, 232}}, {2, {201, 233}}, {2, {202, 234}}, {2, {203, 235}}, {2, {204, 236}}, {2, {205, 237}}, {2, {206, 238}}, {2, {207, 239}}, {2, {208, 240}}, {2, {209, 241}}, {2, {210, 242}}, {2, {211, 243}}, {2, {212, 244}}, {2, {213, 245}}, {2, {214, 246}}, {2, {216, 248}}, {2, {217, 249}}, {2, {218, 250}}, {2, {219, 251}}, {2, {220, 252}}, {2, {221, 253}}, {2, {222, 254}}, {2, {192, 224}}, {2, {193, 225}}, {2, {194, 226}}, {2, {195, 227}}, {2, {196, 228}}, {2, {197, 229}}, {2, {198, 230}}, {2, {199, 231}}, {2, {200, 232}}, {2, {201, 233}}, {2, {202, 234}}, {2, {203, 235}}, {2, {204, 236}}, {2, {205, 237}}, {2, {206, 238}}, {2, {207, 239}}, {2, {208, 240}}, {2, {209, 241}}, {2, {210, 242}}, {2, {211, 243}}, {2, {212, 244}}, {2, {213, 245}}, {2, {214, 246}}, {2, {216, 248}}, {2, {217, 249}}, {2, {218, 250}}, {2, {219, 251}}, {2, {220, 252}}, {2, {221, 253}}, {2, {222, 254}}, {2, {255, 376}}, {2, {256, 257}}, {2, {256, 257}}, {2, {258, 259}}, {2, {258, 259}}, {2, {260, 261}}, {2, {260, 261}}, {2, {262, 263}}, {2, {262, 263}}, {2, {264, 265}}, {2, {264, 265}}, {2, {266, 267}}, {2, {266, 267}}, {2, {268, 269}}, {2, {268, 269}}, {2, {270, 271}}, {2, {270, 271}}, {2, {272, 273}}, {2, {272, 273}}, {2, {274, 275}}, {2, {274, 275}}, {2, {276, 277}}, {2, {276, 277}}, {2, {278, 279}}, {2, {278, 279}}, {2, {280, 281}}, {2, {280, 281}}, {2, {282, 283}}, {2, {282, 283}}, {2, {284, 285}}, {2, {284, 285}}, {2, {286, 287}}, {2, {286, 287}}, {2, {288, 289}}, {2, {288, 289}}, {2, {290, 291}}, {2, {290, 291}}, {2, {292, 293}}, {2, {292, 293}}, {2, {294, 295}}, {2, {294, 295}}, {2, {296, 297}}, {2, {296, 297}}, {2, {298, 299}}, {2, {298, 299}}, {2, {300, 301}}, {2, {300, 301}}, {2, {302, 303}}, {2, {302, 303}}, {2, {306, 307}}, {2, {306, 307}}, {2, {308, 309}}, {2, {308, 309}}, {2, {310, 311}}, {2, {310, 311}}, {2, {313, 314}}, {2, {313, 314}}, {2, {315, 316}}, {2, {315, 316}}, {2, {317, 318}}, {2, {317, 318}}, {2, {319, 320}}, {2, {319, 320}}, {2, {321, 322}}, {2, {321, 322}}, {2, {323, 324}}, {2, {323, 324}}, {2, {325, 326}}, {2, {325, 326}}, {2, {327, 328}}, {2, {327, 328}}, {2, {330, 331}}, {2, {330, 331}}, {2, {332, 333}}, {2, {332, 333}}, {2, {334, 335}}, {2, {334, 335}}, {2, {336, 337}}, {2, {336, 337}}, {2, {338, 339}}, {2, {338, 339}}, {2, {340, 341}}, {2, {340, 341}}, {2, {342, 343}}, {2, {342, 343}}, {2, {344, 345}}, {2, {344, 345}}, {2, {346, 347}}, {2, {346, 347}}, {2, {348, 349}}, {2, {348, 349}}, {2, {350, 351}}, {2, {350, 351}}, {2, {352, 353}}, {2, {352, 353}}, {2, {354, 355}}, {2, {354, 355}}, {2, {356, 357}}, {2, {356, 357}}, {2, {358, 359}}, {2, {358, 359}}, {2, {360, 361}}, {2, {360, 361}}, {2, {362, 363}}, {2, {362, 363}}, {2, {364, 365}}, {2, {364, 365}}, {2, {366, 367}}, {2, {366, 367}}, {2, {368, 369}}, {2, {368, 369}}, {2, {370, 371}}, {2, {370, 371}}, {2, {372, 373}}, {2, {372, 373}}, {2, {374, 375}}, {2, {374, 375}}, {2, {255, 376}}, {2, {377, 378}}, {2, {377, 378}}, {2, {379, 380}}, {2, {379, 380}}, {2, {381, 382}}, {2, {381, 382}}, {2, {384, 579}}, {2, {385, 595}}, {2, {386, 387}}, {2, {386, 387}}, {2, {388, 389}}, {2, {388, 389}}, {2, {390, 596}}, {2, {391, 392}}, {2, {391, 392}}, {2, {393, 598}}, {2, {394, 599}}, {2, {395, 396}}, {2, {395, 396}}, {2, {398, 477}}, {2, {399, 601}}, {2, {400, 603}}, {2, {401, 402}}, {2, {401, 402}}, {2, {403, 608}}, {2, {404, 611}}, {2, {405, 502}}, {2, {406, 617}}, {2, {407, 616}}, {2, {408, 409}}, {2, {408, 409}}, {2, {410, 573}}, {2, {412, 623}}, {2, {413, 626}}, {2, {414, 544}}, {2, {415, 629}}, {2, {416, 417}}, {2, {416, 417}}, {2, {418, 419}}, {2, {418, 419}}, {2, {420, 421}}, {2, {420, 421}}, {2, {422, 640}}, {2, {423, 424}}, {2, {423, 424}}, {2, {425, 643}}, {2, {428, 429}}, {2, {428, 429}}, {2, {430, 648}}, {2, {431, 432}}, {2, {431, 432}}, {2, {433, 650}}, {2, {434, 651}}, {2, {435, 436}}, {2, {435, 436}}, {2, {437, 438}}, {2, {437, 438}}, {2, {439, 658}}, {2, {440, 441}}, {2, {440, 441}}, {2, {444, 445}}, {2, {444, 445}}, {2, {447, 503}}, {3, {452, 453, 454}}, {3, {452, 453, 454}}, {3, {452, 453, 454}}, {3, {455, 456, 457}}, {3, {455, 456, 457}}, {3, {455, 456, 457}}, {3, {458, 459, 460}}, {3, {458, 459, 460}}, {3, {458, 459, 460}}, {2, {461, 462}}, {2, {461, 462}}, {2, {463, 464}}, {2, {463, 464}}, {2, {465, 466}}, {2, {465, 466}}, {2, {467, 468}}, {2, {467, 468}}, {2, {469, 470}}, {2, {469, 470}}, {2, {471, 472}}, {2, {471, 472}}, {2, {473, 474}}, {2, {473, 474}}, {2, {475, 476}}, {2, {475, 476}}, {2, {398, 477}}, {2, {478, 479}}, {2, {478, 479}}, {2, {480, 481}}, {2, {480, 481}}, {2, {482, 483}}, {2, {482, 483}}, {2, {484, 485}}, {2, {484, 485}}, {2, {486, 487}}, {2, {486, 487}}, {2, {488, 489}}, {2, {488, 489}}, {2, {490, 491}}, {2, {490, 491}}, {2, {492, 493}}, {2, {492, 493}}, {2, {494, 495}}, {2, {494, 495}}, {3, {497, 498, 499}}, {3, {497, 498, 499}}, {3, {497, 498, 499}}, {2, {500, 501}}, {2, {500, 501}}, {2, {405, 502}}, {2, {447, 503}}, {2, {504, 505}}, {2, {504, 505}}, {2, {506, 507}}, {2, {506, 507}}, {2, {508, 509}}, {2, {508, 509}}, {2, {510, 511}}, {2, {510, 511}}, {2, {512, 513}}, {2, {512, 513}}, {2, {514, 515}}, {2, {514, 515}}, {2, {516, 517}}, {2, {516, 517}}, {2, {518, 519}}, {2, {518, 519}}, {2, {520, 521}}, {2, {520, 521}}, {2, {522, 523}}, {2, {522, 523}}, {2, {524, 525}}, {2, {524, 525}}, {2, {526, 527}}, {2, {526, 527}}, {2, {528, 529}}, {2, {528, 529}}, {2, {530, 531}}, {2, {530, 531}}, {2, {532, 533}}, {2, {532, 533}}, {2, {534, 535}}, {2, {534, 535}}, {2, {536, 537}}, {2, {536, 537}}, {2, {538, 539}}, {2, {538, 539}}, {2, {540, 541}}, {2, {540, 541}}, {2, {542, 543}}, {2, {542, 543}}, {2, {414, 544}}, {2, {546, 547}}, {2, {546, 547}}, {2, {548, 549}}, {2, {548, 549}}, {2, {550, 551}}, {2, {550, 551}}, {2, {552, 553}}, {2, {552, 553}}, {2, {554, 555}}, {2, {554, 555}}, {2, {556, 557}}, {2, {556, 557}}, {2, {558, 559}}, {2, {558, 559}}, {2, {560, 561}}, {2, {560, 561}}, {2, {562, 563}}, {2, {562, 563}}, {2, {570, 11365}}, {2, {571, 572}}, {2, {571, 572}}, {2, {410, 573}}, {2, {574, 11366}}, {2, {577, 578}}, {2, {577, 578}}, {2, {384, 579}}, {2, {580, 649}}, {2, {581, 652}}, {2, {582, 583}}, {2, {582, 583}}, {2, {584, 585}}, {2, {584, 585}}, {2, {586, 587}}, {2, {586, 587}}, {2, {588, 589}}, {2, {588, 589}}, {2, {590, 591}}, {2, {590, 591}}, {2, {385, 595}}, {2, {390, 596}}, {2, {393, 598}}, {2, {394, 599}}, {2, {399, 601}}, {2, {400, 603}}, {2, {403, 608}}, {2, {404, 611}}, {2, {407, 616}}, {2, {406, 617}}, {2, {619, 11362}}, {2, {412, 623}}, {2, {413, 626}}, {2, {415, 629}}, {2, {637, 11364}}, {2, {422, 640}}, {2, {425, 643}}, {2, {430, 648}}, {2, {580, 649}}, {2, {433, 650}}, {2, {434, 651}}, {2, {581, 652}}, {2, {439, 658}}, {4, {837, 921, 953, 8126}}, {2, {891, 1021}}, {2, {892, 1022}}, {2, {893, 1023}}, {2, {902, 940}}, {2, {904, 941}}, {2, {905, 942}}, {2, {906, 943}}, {2, {908, 972}}, {2, {910, 973}}, {2, {911, 974}}, {2, {913, 945}}, {3, {914, 946, 976}}, {2, {915, 947}}, {2, {916, 948}}, {3, {917, 949, 1013}}, {2, {918, 950}}, {2, {919, 951}}, {3, {920, 952, 977}}, {4, {837, 921, 953, 8126}}, {3, {922, 954, 1008}}, {2, {923, 955}}, {3, {181, 924, 956}}, {2, {925, 957}}, {2, {926, 958}}, {2, {927, 959}}, {3, {928, 960, 982}}, {3, {929, 961, 1009}}, {3, {931, 962, 963}}, {2, {932, 964}}, {2, {933, 965}}, {3, {934, 966, 981}}, {2, {935, 967}}, {2, {936, 968}}, {2, {937, 969}}, {2, {938, 970}}, {2, {939, 971}}, {2, {902, 940}}, {2, {904, 941}}, {2, {905, 942}}, {2, {906, 943}}, {2, {913, 945}}, {3, {914, 946, 976}}, {2, {915, 947}}, {2, {916, 948}}, {3, {917, 949, 1013}}, {2, {918, 950}}, {2, {919, 951}}, {3, {920, 952, 977}}, {4, {837, 921, 953, 8126}}, {3, {922, 954, 1008}}, {2, {923, 955}}, {3, {181, 924, 956}}, {2, {925, 957}}, {2, {926, 958}}, {2, {927, 959}}, {3, {928, 960, 982}}, {3, {929, 961, 1009}}, {3, {931, 962, 963}}, {3, {931, 962, 963}}, {2, {932, 964}}, {2, {933, 965}}, {3, {934, 966, 981}}, {2, {935, 967}}, {2, {936, 968}}, {2, {937, 969}}, {2, {938, 970}}, {2, {939, 971}}, {2, {908, 972}}, {2, {910, 973}}, {2, {911, 974}}, {3, {914, 946, 976}}, {3, {920, 952, 977}}, {3, {934, 966, 981}}, {3, {928, 960, 982}}, {2, {984, 985}}, {2, {984, 985}}, {2, {986, 987}}, {2, {986, 987}}, {2, {988, 989}}, {2, {988, 989}}, {2, {990, 991}}, {2, {990, 991}}, {2, {992, 993}}, {2, {992, 993}}, {2, {994, 995}}, {2, {994, 995}}, {2, {996, 997}}, {2, {996, 997}}, {2, {998, 999}}, {2, {998, 999}}, {2, {1000, 1001}}, {2, {1000, 1001}}, {2, {1002, 1003}}, {2, {1002, 1003}}, {2, {1004, 1005}}, {2, {1004, 1005}}, {2, {1006, 1007}}, {2, {1006, 1007}}, {3, {922, 954, 1008}}, {3, {929, 961, 1009}}, {2, {1010, 1017}}, {3, {917, 949, 1013}}, {2, {1015, 1016}}, {2, {1015, 1016}}, {2, {1010, 1017}}, {2, {1018, 1019}}, {2, {1018, 1019}}, {2, {891, 1021}}, {2, {892, 1022}}, {2, {893, 1023}}, {2, {1024, 1104}}, {2, {1025, 1105}}, {2, {1026, 1106}}, {2, {1027, 1107}}, {2, {1028, 1108}}, {2, {1029, 1109}}, {2, {1030, 1110}}, {2, {1031, 1111}}, {2, {1032, 1112}}, {2, {1033, 1113}}, {2, {1034, 1114}}, {2, {1035, 1115}}, {2, {1036, 1116}}, {2, {1037, 1117}}, {2, {1038, 1118}}, {2, {1039, 1119}}, {2, {1040, 1072}}, {2, {1041, 1073}}, {2, {1042, 1074}}, {2, {1043, 1075}}, {2, {1044, 1076}}, {2, {1045, 1077}}, {2, {1046, 1078}}, {2, {1047, 1079}}, {2, {1048, 1080}}, {2, {1049, 1081}}, {2, {1050, 1082}}, {2, {1051, 1083}}, {2, {1052, 1084}}, {2, {1053, 1085}}, {2, {1054, 1086}}, {2, {1055, 1087}}, {2, {1056, 1088}}, {2, {1057, 1089}}, {2, {1058, 1090}}, {2, {1059, 1091}}, {2, {1060, 1092}}, {2, {1061, 1093}}, {2, {1062, 1094}}, {2, {1063, 1095}}, {2, {1064, 1096}}, {2, {1065, 1097}}, {2, {1066, 1098}}, {2, {1067, 1099}}, {2, {1068, 1100}}, {2, {1069, 1101}}, {2, {1070, 1102}}, {2, {1071, 1103}}, {2, {1040, 1072}}, {2, {1041, 1073}}, {2, {1042, 1074}}, {2, {1043, 1075}}, {2, {1044, 1076}}, {2, {1045, 1077}}, {2, {1046, 1078}}, {2, {1047, 1079}}, {2, {1048, 1080}}, {2, {1049, 1081}}, {2, {1050, 1082}}, {2, {1051, 1083}}, {2, {1052, 1084}}, {2, {1053, 1085}}, {2, {1054, 1086}}, {2, {1055, 1087}}, {2, {1056, 1088}}, {2, {1057, 1089}}, {2, {1058, 1090}}, {2, {1059, 1091}}, {2, {1060, 1092}}, {2, {1061, 1093}}, {2, {1062, 1094}}, {2, {1063, 1095}}, {2, {1064, 1096}}, {2, {1065, 1097}}, {2, {1066, 1098}}, {2, {1067, 1099}}, {2, {1068, 1100}}, {2, {1069, 1101}}, {2, {1070, 1102}}, {2, {1071, 1103}}, {2, {1024, 1104}}, {2, {1025, 1105}}, {2, {1026, 1106}}, {2, {1027, 1107}}, {2, {1028, 1108}}, {2, {1029, 1109}}, {2, {1030, 1110}}, {2, {1031, 1111}}, {2, {1032, 1112}}, {2, {1033, 1113}}, {2, {1034, 1114}}, {2, {1035, 1115}}, {2, {1036, 1116}}, {2, {1037, 1117}}, {2, {1038, 1118}}, {2, {1039, 1119}}, {2, {1120, 1121}}, {2, {1120, 1121}}, {2, {1122, 1123}}, {2, {1122, 1123}}, {2, {1124, 1125}}, {2, {1124, 1125}}, {2, {1126, 1127}}, {2, {1126, 1127}}, {2, {1128, 1129}}, {2, {1128, 1129}}, {2, {1130, 1131}}, {2, {1130, 1131}}, {2, {1132, 1133}}, {2, {1132, 1133}}, {2, {1134, 1135}}, {2, {1134, 1135}}, {2, {1136, 1137}}, {2, {1136, 1137}}, {2, {1138, 1139}}, {2, {1138, 1139}}, {2, {1140, 1141}}, {2, {1140, 1141}}, {2, {1142, 1143}}, {2, {1142, 1143}}, {2, {1144, 1145}}, {2, {1144, 1145}}, {2, {1146, 1147}}, {2, {1146, 1147}}, {2, {1148, 1149}}, {2, {1148, 1149}}, {2, {1150, 1151}}, {2, {1150, 1151}}, {2, {1152, 1153}}, {2, {1152, 1153}}, {2, {1162, 1163}}, {2, {1162, 1163}}, {2, {1164, 1165}}, {2, {1164, 1165}}, {2, {1166, 1167}}, {2, {1166, 1167}}, {2, {1168, 1169}}, {2, {1168, 1169}}, {2, {1170, 1171}}, {2, {1170, 1171}}, {2, {1172, 1173}}, {2, {1172, 1173}}, {2, {1174, 1175}}, {2, {1174, 1175}}, {2, {1176, 1177}}, {2, {1176, 1177}}, {2, {1178, 1179}}, {2, {1178, 1179}}, {2, {1180, 1181}}, {2, {1180, 1181}}, {2, {1182, 1183}}, {2, {1182, 1183}}, {2, {1184, 1185}}, {2, {1184, 1185}}, {2, {1186, 1187}}, {2, {1186, 1187}}, {2, {1188, 1189}}, {2, {1188, 1189}}, {2, {1190, 1191}}, {2, {1190, 1191}}, {2, {1192, 1193}}, {2, {1192, 1193}}, {2, {1194, 1195}}, {2, {1194, 1195}}, {2, {1196, 1197}}, {2, {1196, 1197}}, {2, {1198, 1199}}, {2, {1198, 1199}}, {2, {1200, 1201}}, {2, {1200, 1201}}, {2, {1202, 1203}}, {2, {1202, 1203}}, {2, {1204, 1205}}, {2, {1204, 1205}}, {2, {1206, 1207}}, {2, {1206, 1207}}, {2, {1208, 1209}}, {2, {1208, 1209}}, {2, {1210, 1211}}, {2, {1210, 1211}}, {2, {1212, 1213}}, {2, {1212, 1213}}, {2, {1214, 1215}}, {2, {1214, 1215}}, {2, {1216, 1231}}, {2, {1217, 1218}}, {2, {1217, 1218}}, {2, {1219, 1220}}, {2, {1219, 1220}}, {2, {1221, 1222}}, {2, {1221, 1222}}, {2, {1223, 1224}}, {2, {1223, 1224}}, {2, {1225, 1226}}, {2, {1225, 1226}}, {2, {1227, 1228}}, {2, {1227, 1228}}, {2, {1229, 1230}}, {2, {1229, 1230}}, {2, {1216, 1231}}, {2, {1232, 1233}}, {2, {1232, 1233}}, {2, {1234, 1235}}, {2, {1234, 1235}}, {2, {1236, 1237}}, {2, {1236, 1237}}, {2, {1238, 1239}}, {2, {1238, 1239}}, {2, {1240, 1241}}, {2, {1240, 1241}}, {2, {1242, 1243}}, {2, {1242, 1243}}, {2, {1244, 1245}}, {2, {1244, 1245}}, {2, {1246, 1247}}, {2, {1246, 1247}}, {2, {1248, 1249}}, {2, {1248, 1249}}, {2, {1250, 1251}}, {2, {1250, 1251}}, {2, {1252, 1253}}, {2, {1252, 1253}}, {2, {1254, 1255}}, {2, {1254, 1255}}, {2, {1256, 1257}}, {2, {1256, 1257}}, {2, {1258, 1259}}, {2, {1258, 1259}}, {2, {1260, 1261}}, {2, {1260, 1261}}, {2, {1262, 1263}}, {2, {1262, 1263}}, {2, {1264, 1265}}, {2, {1264, 1265}}, {2, {1266, 1267}}, {2, {1266, 1267}}, {2, {1268, 1269}}, {2, {1268, 1269}}, {2, {1270, 1271}}, {2, {1270, 1271}}, {2, {1272, 1273}}, {2, {1272, 1273}}, {2, {1274, 1275}}, {2, {1274, 1275}}, {2, {1276, 1277}}, {2, {1276, 1277}}, {2, {1278, 1279}}, {2, {1278, 1279}}, {2, {1280, 1281}}, {2, {1280, 1281}}, {2, {1282, 1283}}, {2, {1282, 1283}}, {2, {1284, 1285}}, {2, {1284, 1285}}, {2, {1286, 1287}}, {2, {1286, 1287}}, {2, {1288, 1289}}, {2, {1288, 1289}}, {2, {1290, 1291}}, {2, {1290, 1291}}, {2, {1292, 1293}}, {2, {1292, 1293}}, {2, {1294, 1295}}, {2, {1294, 1295}}, {2, {1296, 1297}}, {2, {1296, 1297}}, {2, {1298, 1299}}, {2, {1298, 1299}}, {2, {1329, 1377}}, {2, {1330, 1378}}, {2, {1331, 1379}}, {2, {1332, 1380}}, {2, {1333, 1381}}, {2, {1334, 1382}}, {2, {1335, 1383}}, {2, {1336, 1384}}, {2, {1337, 1385}}, {2, {1338, 1386}}, {2, {1339, 1387}}, {2, {1340, 1388}}, {2, {1341, 1389}}, {2, {1342, 1390}}, {2, {1343, 1391}}, {2, {1344, 1392}}, {2, {1345, 1393}}, {2, {1346, 1394}}, {2, {1347, 1395}}, {2, {1348, 1396}}, {2, {1349, 1397}}, {2, {1350, 1398}}, {2, {1351, 1399}}, {2, {1352, 1400}}, {2, {1353, 1401}}, {2, {1354, 1402}}, {2, {1355, 1403}}, {2, {1356, 1404}}, {2, {1357, 1405}}, {2, {1358, 1406}}, {2, {1359, 1407}}, {2, {1360, 1408}}, {2, {1361, 1409}}, {2, {1362, 1410}}, {2, {1363, 1411}}, {2, {1364, 1412}}, {2, {1365, 1413}}, {2, {1366, 1414}}, {2, {1329, 1377}}, {2, {1330, 1378}}, {2, {1331, 1379}}, {2, {1332, 1380}}, {2, {1333, 1381}}, {2, {1334, 1382}}, {2, {1335, 1383}}, {2, {1336, 1384}}, {2, {1337, 1385}}, {2, {1338, 1386}}, {2, {1339, 1387}}, {2, {1340, 1388}}, {2, {1341, 1389}}, {2, {1342, 1390}}, {2, {1343, 1391}}, {2, {1344, 1392}}, {2, {1345, 1393}}, {2, {1346, 1394}}, {2, {1347, 1395}}, {2, {1348, 1396}}, {2, {1349, 1397}}, {2, {1350, 1398}}, {2, {1351, 1399}}, {2, {1352, 1400}}, {2, {1353, 1401}}, {2, {1354, 1402}}, {2, {1355, 1403}}, {2, {1356, 1404}}, {2, {1357, 1405}}, {2, {1358, 1406}}, {2, {1359, 1407}}, {2, {1360, 1408}}, {2, {1361, 1409}}, {2, {1362, 1410}}, {2, {1363, 1411}}, {2, {1364, 1412}}, {2, {1365, 1413}}, {2, {1366, 1414}}, {2, {4256, 11520}}, {2, {4257, 11521}}, {2, {4258, 11522}}, {2, {4259, 11523}}, {2, {4260, 11524}}, {2, {4261, 11525}}, {2, {4262, 11526}}, {2, {4263, 11527}}, {2, {4264, 11528}}, {2, {4265, 11529}}, {2, {4266, 11530}}, {2, {4267, 11531}}, {2, {4268, 11532}}, {2, {4269, 11533}}, {2, {4270, 11534}}, {2, {4271, 11535}}, {2, {4272, 11536}}, {2, {4273, 11537}}, {2, {4274, 11538}}, {2, {4275, 11539}}, {2, {4276, 11540}}, {2, {4277, 11541}}, {2, {4278, 11542}}, {2, {4279, 11543}}, {2, {4280, 11544}}, {2, {4281, 11545}}, {2, {4282, 11546}}, {2, {4283, 11547}}, {2, {4284, 11548}}, {2, {4285, 11549}}, {2, {4286, 11550}}, {2, {4287, 11551}}, {2, {4288, 11552}}, {2, {4289, 11553}}, {2, {4290, 11554}}, {2, {4291, 11555}}, {2, {4292, 11556}}, {2, {4293, 11557}}, {2, {7549, 11363}}, {2, {7680, 7681}}, {2, {7680, 7681}}, {2, {7682, 7683}}, {2, {7682, 7683}}, {2, {7684, 7685}}, {2, {7684, 7685}}, {2, {7686, 7687}}, {2, {7686, 7687}}, {2, {7688, 7689}}, {2, {7688, 7689}}, {2, {7690, 7691}}, {2, {7690, 7691}}, {2, {7692, 7693}}, {2, {7692, 7693}}, {2, {7694, 7695}}, {2, {7694, 7695}}, {2, {7696, 7697}}, {2, {7696, 7697}}, {2, {7698, 7699}}, {2, {7698, 7699}}, {2, {7700, 7701}}, {2, {7700, 7701}}, {2, {7702, 7703}}, {2, {7702, 7703}}, {2, {7704, 7705}}, {2, {7704, 7705}}, {2, {7706, 7707}}, {2, {7706, 7707}}, {2, {7708, 7709}}, {2, {7708, 7709}}, {2, {7710, 7711}}, {2, {7710, 7711}}, {2, {7712, 7713}}, {2, {7712, 7713}}, {2, {7714, 7715}}, {2, {7714, 7715}}, {2, {7716, 7717}}, {2, {7716, 7717}}, {2, {7718, 7719}}, {2, {7718, 7719}}, {2, {7720, 7721}}, {2, {7720, 7721}}, {2, {7722, 7723}}, {2, {7722, 7723}}, {2, {7724, 7725}}, {2, {7724, 7725}}, {2, {7726, 7727}}, {2, {7726, 7727}}, {2, {7728, 7729}}, {2, {7728, 7729}}, {2, {7730, 7731}}, {2, {7730, 7731}}, {2, {7732, 7733}}, {2, {7732, 7733}}, {2, {7734, 7735}}, {2, {7734, 7735}}, {2, {7736, 7737}}, {2, {7736, 7737}}, {2, {7738, 7739}}, {2, {7738, 7739}}, {2, {7740, 7741}}, {2, {7740, 7741}}, {2, {7742, 7743}}, {2, {7742, 7743}}, {2, {7744, 7745}}, {2, {7744, 7745}}, {2, {7746, 7747}}, {2, {7746, 7747}}, {2, {7748, 7749}}, {2, {7748, 7749}}, {2, {7750, 7751}}, {2, {7750, 7751}}, {2, {7752, 7753}}, {2, {7752, 7753}}, {2, {7754, 7755}}, {2, {7754, 7755}}, {2, {7756, 7757}}, {2, {7756, 7757}}, {2, {7758, 7759}}, {2, {7758, 7759}}, {2, {7760, 7761}}, {2, {7760, 7761}}, {2, {7762, 7763}}, {2, {7762, 7763}}, {2, {7764, 7765}}, {2, {7764, 7765}}, {2, {7766, 7767}}, {2, {7766, 7767}}, {2, {7768, 7769}}, {2, {7768, 7769}}, {2, {7770, 7771}}, {2, {7770, 7771}}, {2, {7772, 7773}}, {2, {7772, 7773}}, {2, {7774, 7775}}, {2, {7774, 7775}}, {3, {7776, 7777, 7835}}, {3, {7776, 7777, 7835}}, {2, {7778, 7779}}, {2, {7778, 7779}}, {2, {7780, 7781}}, {2, {7780, 7781}}, {2, {7782, 7783}}, {2, {7782, 7783}}, {2, {7784, 7785}}, {2, {7784, 7785}}, {2, {7786, 7787}}, {2, {7786, 7787}}, {2, {7788, 7789}}, {2, {7788, 7789}}, {2, {7790, 7791}}, {2, {7790, 7791}}, {2, {7792, 7793}}, {2, {7792, 7793}}, {2, {7794, 7795}}, {2, {7794, 7795}}, {2, {7796, 7797}}, {2, {7796, 7797}}, {2, {7798, 7799}}, {2, {7798, 7799}}, {2, {7800, 7801}}, {2, {7800, 7801}}, {2, {7802, 7803}}, {2, {7802, 7803}}, {2, {7804, 7805}}, {2, {7804, 7805}}, {2, {7806, 7807}}, {2, {7806, 7807}}, {2, {7808, 7809}}, {2, {7808, 7809}}, {2, {7810, 7811}}, {2, {7810, 7811}}, {2, {7812, 7813}}, {2, {7812, 7813}}, {2, {7814, 7815}}, {2, {7814, 7815}}, {2, {7816, 7817}}, {2, {7816, 7817}}, {2, {7818, 7819}}, {2, {7818, 7819}}, {2, {7820, 7821}}, {2, {7820, 7821}}, {2, {7822, 7823}}, {2, {7822, 7823}}, {2, {7824, 7825}}, {2, {7824, 7825}}, {2, {7826, 7827}}, {2, {7826, 7827}}, {2, {7828, 7829}}, {2, {7828, 7829}}, {3, {7776, 7777, 7835}}, {2, {7840, 7841}}, {2, {7840, 7841}}, {2, {7842, 7843}}, {2, {7842, 7843}}, {2, {7844, 7845}}, {2, {7844, 7845}}, {2, {7846, 7847}}, {2, {7846, 7847}}, {2, {7848, 7849}}, {2, {7848, 7849}}, {2, {7850, 7851}}, {2, {7850, 7851}}, {2, {7852, 7853}}, {2, {7852, 7853}}, {2, {7854, 7855}}, {2, {7854, 7855}}, {2, {7856, 7857}}, {2, {7856, 7857}}, {2, {7858, 7859}}, {2, {7858, 7859}}, {2, {7860, 7861}}, {2, {7860, 7861}}, {2, {7862, 7863}}, {2, {7862, 7863}}, {2, {7864, 7865}}, {2, {7864, 7865}}, {2, {7866, 7867}}, {2, {7866, 7867}}, {2, {7868, 7869}}, {2, {7868, 7869}}, {2, {7870, 7871}}, {2, {7870, 7871}}, {2, {7872, 7873}}, {2, {7872, 7873}}, {2, {7874, 7875}}, {2, {7874, 7875}}, {2, {7876, 7877}}, {2, {7876, 7877}}, {2, {7878, 7879}}, {2, {7878, 7879}}, {2, {7880, 7881}}, {2, {7880, 7881}}, {2, {7882, 7883}}, {2, {7882, 7883}}, {2, {7884, 7885}}, {2, {7884, 7885}}, {2, {7886, 7887}}, {2, {7886, 7887}}, {2, {7888, 7889}}, {2, {7888, 7889}}, {2, {7890, 7891}}, {2, {7890, 7891}}, {2, {7892, 7893}}, {2, {7892, 7893}}, {2, {7894, 7895}}, {2, {7894, 7895}}, {2, {7896, 7897}}, {2, {7896, 7897}}, {2, {7898, 7899}}, {2, {7898, 7899}}, {2, {7900, 7901}}, {2, {7900, 7901}}, {2, {7902, 7903}}, {2, {7902, 7903}}, {2, {7904, 7905}}, {2, {7904, 7905}}, {2, {7906, 7907}}, {2, {7906, 7907}}, {2, {7908, 7909}}, {2, {7908, 7909}}, {2, {7910, 7911}}, {2, {7910, 7911}}, {2, {7912, 7913}}, {2, {7912, 7913}}, {2, {7914, 7915}}, {2, {7914, 7915}}, {2, {7916, 7917}}, {2, {7916, 7917}}, {2, {7918, 7919}}, {2, {7918, 7919}}, {2, {7920, 7921}}, {2, {7920, 7921}}, {2, {7922, 7923}}, {2, {7922, 7923}}, {2, {7924, 7925}}, {2, {7924, 7925}}, {2, {7926, 7927}}, {2, {7926, 7927}}, {2, {7928, 7929}}, {2, {7928, 7929}}, {2, {7936, 7944}}, {2, {7937, 7945}}, {2, {7938, 7946}}, {2, {7939, 7947}}, {2, {7940, 7948}}, {2, {7941, 7949}}, {2, {7942, 7950}}, {2, {7943, 7951}}, {2, {7936, 7944}}, {2, {7937, 7945}}, {2, {7938, 7946}}, {2, {7939, 7947}}, {2, {7940, 7948}}, {2, {7941, 7949}}, {2, {7942, 7950}}, {2, {7943, 7951}}, {2, {7952, 7960}}, {2, {7953, 7961}}, {2, {7954, 7962}}, {2, {7955, 7963}}, {2, {7956, 7964}}, {2, {7957, 7965}}, {2, {7952, 7960}}, {2, {7953, 7961}}, {2, {7954, 7962}}, {2, {7955, 7963}}, {2, {7956, 7964}}, {2, {7957, 7965}}, {2, {7968, 7976}}, {2, {7969, 7977}}, {2, {7970, 7978}}, {2, {7971, 7979}}, {2, {7972, 7980}}, {2, {7973, 7981}}, {2, {7974, 7982}}, {2, {7975, 7983}}, {2, {7968, 7976}}, {2, {7969, 7977}}, {2, {7970, 7978}}, {2, {7971, 7979}}, {2, {7972, 7980}}, {2, {7973, 7981}}, {2, {7974, 7982}}, {2, {7975, 7983}}, {2, {7984, 7992}}, {2, {7985, 7993}}, {2, {7986, 7994}}, {2, {7987, 7995}}, {2, {7988, 7996}}, {2, {7989, 7997}}, {2, {7990, 7998}}, {2, {7991, 7999}}, {2, {7984, 7992}}, {2, {7985, 7993}}, {2, {7986, 7994}}, {2, {7987, 7995}}, {2, {7988, 7996}}, {2, {7989, 7997}}, {2, {7990, 7998}}, {2, {7991, 7999}}, {2, {8000, 8008}}, {2, {8001, 8009}}, {2, {8002, 8010}}, {2, {8003, 8011}}, {2, {8004, 8012}}, {2, {8005, 8013}}, {2, {8000, 8008}}, {2, {8001, 8009}}, {2, {8002, 8010}}, {2, {8003, 8011}}, {2, {8004, 8012}}, {2, {8005, 8013}}, {2, {8017, 8025}}, {2, {8019, 8027}}, {2, {8021, 8029}}, {2, {8023, 8031}}, {2, {8017, 8025}}, {2, {8019, 8027}}, {2, {8021, 8029}}, {2, {8023, 8031}}, {2, {8032, 8040}}, {2, {8033, 8041}}, {2, {8034, 8042}}, {2, {8035, 8043}}, {2, {8036, 8044}}, {2, {8037, 8045}}, {2, {8038, 8046}}, {2, {8039, 8047}}, {2, {8032, 8040}}, {2, {8033, 8041}}, {2, {8034, 8042}}, {2, {8035, 8043}}, {2, {8036, 8044}}, {2, {8037, 8045}}, {2, {8038, 8046}}, {2, {8039, 8047}}, {2, {8048, 8122}}, {2, {8049, 8123}}, {2, {8050, 8136}}, {2, {8051, 8137}}, {2, {8052, 8138}}, {2, {8053, 8139}}, {2, {8054, 8154}}, {2, {8055, 8155}}, {2, {8056, 8184}}, {2, {8057, 8185}}, {2, {8058, 8170}}, {2, {8059, 8171}}, {2, {8060, 8186}}, {2, {8061, 8187}}, {2, {8112, 8120}}, {2, {8113, 8121}}, {2, {8112, 8120}}, {2, {8113, 8121}}, {2, {8048, 8122}}, {2, {8049, 8123}}, {4, {837, 921, 953, 8126}}, {2, {8050, 8136}}, {2, {8051, 8137}}, {2, {8052, 8138}}, {2, {8053, 8139}}, {2, {8144, 8152}}, {2, {8145, 8153}}, {2, {8144, 8152}}, {2, {8145, 8153}}, {2, {8054, 8154}}, {2, {8055, 8155}}, {2, {8160, 8168}}, {2, {8161, 8169}}, {2, {8165, 8172}}, {2, {8160, 8168}}, {2, {8161, 8169}}, {2, {8058, 8170}}, {2, {8059, 8171}}, {2, {8165, 8172}}, {2, {8056, 8184}}, {2, {8057, 8185}}, {2, {8060, 8186}}, {2, {8061, 8187}}, {2, {8498, 8526}}, {2, {8498, 8526}}, {2, {8544, 8560}}, {2, {8545, 8561}}, {2, {8546, 8562}}, {2, {8547, 8563}}, {2, {8548, 8564}}, {2, {8549, 8565}}, {2, {8550, 8566}}, {2, {8551, 8567}}, {2, {8552, 8568}}, {2, {8553, 8569}}, {2, {8554, 8570}}, {2, {8555, 8571}}, {2, {8556, 8572}}, {2, {8557, 8573}}, {2, {8558, 8574}}, {2, {8559, 8575}}, {2, {8544, 8560}}, {2, {8545, 8561}}, {2, {8546, 8562}}, {2, {8547, 8563}}, {2, {8548, 8564}}, {2, {8549, 8565}}, {2, {8550, 8566}}, {2, {8551, 8567}}, {2, {8552, 8568}}, {2, {8553, 8569}}, {2, {8554, 8570}}, {2, {8555, 8571}}, {2, {8556, 8572}}, {2, {8557, 8573}}, {2, {8558, 8574}}, {2, {8559, 8575}}, {2, {8579, 8580}}, {2, {8579, 8580}}, {2, {9398, 9424}}, {2, {9399, 9425}}, {2, {9400, 9426}}, {2, {9401, 9427}}, {2, {9402, 9428}}, {2, {9403, 9429}}, {2, {9404, 9430}}, {2, {9405, 9431}}, {2, {9406, 9432}}, {2, {9407, 9433}}, {2, {9408, 9434}}, {2, {9409, 9435}}, {2, {9410, 9436}}, {2, {9411, 9437}}, {2, {9412, 9438}}, {2, {9413, 9439}}, {2, {9414, 9440}}, {2, {9415, 9441}}, {2, {9416, 9442}}, {2, {9417, 9443}}, {2, {9418, 9444}}, {2, {9419, 9445}}, {2, {9420, 9446}}, {2, {9421, 9447}}, {2, {9422, 9448}}, {2, {9423, 9449}}, {2, {9398, 9424}}, {2, {9399, 9425}}, {2, {9400, 9426}}, {2, {9401, 9427}}, {2, {9402, 9428}}, {2, {9403, 9429}}, {2, {9404, 9430}}, {2, {9405, 9431}}, {2, {9406, 9432}}, {2, {9407, 9433}}, {2, {9408, 9434}}, {2, {9409, 9435}}, {2, {9410, 9436}}, {2, {9411, 9437}}, {2, {9412, 9438}}, {2, {9413, 9439}}, {2, {9414, 9440}}, {2, {9415, 9441}}, {2, {9416, 9442}}, {2, {9417, 9443}}, {2, {9418, 9444}}, {2, {9419, 9445}}, {2, {9420, 9446}}, {2, {9421, 9447}}, {2, {9422, 9448}}, {2, {9423, 9449}}, {2, {11264, 11312}}, {2, {11265, 11313}}, {2, {11266, 11314}}, {2, {11267, 11315}}, {2, {11268, 11316}}, {2, {11269, 11317}}, {2, {11270, 11318}}, {2, {11271, 11319}}, {2, {11272, 11320}}, {2, {11273, 11321}}, {2, {11274, 11322}}, {2, {11275, 11323}}, {2, {11276, 11324}}, {2, {11277, 11325}}, {2, {11278, 11326}}, {2, {11279, 11327}}, {2, {11280, 11328}}, {2, {11281, 11329}}, {2, {11282, 11330}}, {2, {11283, 11331}}, {2, {11284, 11332}}, {2, {11285, 11333}}, {2, {11286, 11334}}, {2, {11287, 11335}}, {2, {11288, 11336}}, {2, {11289, 11337}}, {2, {11290, 11338}}, {2, {11291, 11339}}, {2, {11292, 11340}}, {2, {11293, 11341}}, {2, {11294, 11342}}, {2, {11295, 11343}}, {2, {11296, 11344}}, {2, {11297, 11345}}, {2, {11298, 11346}}, {2, {11299, 11347}}, {2, {11300, 11348}}, {2, {11301, 11349}}, {2, {11302, 11350}}, {2, {11303, 11351}}, {2, {11304, 11352}}, {2, {11305, 11353}}, {2, {11306, 11354}}, {2, {11307, 11355}}, {2, {11308, 11356}}, {2, {11309, 11357}}, {2, {11310, 11358}}, {2, {11264, 11312}}, {2, {11265, 11313}}, {2, {11266, 11314}}, {2, {11267, 11315}}, {2, {11268, 11316}}, {2, {11269, 11317}}, {2, {11270, 11318}}, {2, {11271, 11319}}, {2, {11272, 11320}}, {2, {11273, 11321}}, {2, {11274, 11322}}, {2, {11275, 11323}}, {2, {11276, 11324}}, {2, {11277, 11325}}, {2, {11278, 11326}}, {2, {11279, 11327}}, {2, {11280, 11328}}, {2, {11281, 11329}}, {2, {11282, 11330}}, {2, {11283, 11331}}, {2, {11284, 11332}}, {2, {11285, 11333}}, {2, {11286, 11334}}, {2, {11287, 11335}}, {2, {11288, 11336}}, {2, {11289, 11337}}, {2, {11290, 11338}}, {2, {11291, 11339}}, {2, {11292, 11340}}, {2, {11293, 11341}}, {2, {11294, 11342}}, {2, {11295, 11343}}, {2, {11296, 11344}}, {2, {11297, 11345}}, {2, {11298, 11346}}, {2, {11299, 11347}}, {2, {11300, 11348}}, {2, {11301, 11349}}, {2, {11302, 11350}}, {2, {11303, 11351}}, {2, {11304, 11352}}, {2, {11305, 11353}}, {2, {11306, 11354}}, {2, {11307, 11355}}, {2, {11308, 11356}}, {2, {11309, 11357}}, {2, {11310, 11358}}, {2, {11360, 11361}}, {2, {11360, 11361}}, {2, {619, 11362}}, {2, {7549, 11363}}, {2, {637, 11364}}, {2, {570, 11365}}, {2, {574, 11366}}, {2, {11367, 11368}}, {2, {11367, 11368}}, {2, {11369, 11370}}, {2, {11369, 11370}}, {2, {11371, 11372}}, {2, {11371, 11372}}, {2, {11381, 11382}}, {2, {11381, 11382}}, {2, {11392, 11393}}, {2, {11392, 11393}}, {2, {11394, 11395}}, {2, {11394, 11395}}, {2, {11396, 11397}}, {2, {11396, 11397}}, {2, {11398, 11399}}, {2, {11398, 11399}}, {2, {11400, 11401}}, {2, {11400, 11401}}, {2, {11402, 11403}}, {2, {11402, 11403}}, {2, {11404, 11405}}, {2, {11404, 11405}}, {2, {11406, 11407}}, {2, {11406, 11407}}, {2, {11408, 11409}}, {2, {11408, 11409}}, {2, {11410, 11411}}, {2, {11410, 11411}}, {2, {11412, 11413}}, {2, {11412, 11413}}, {2, {11414, 11415}}, {2, {11414, 11415}}, {2, {11416, 11417}}, {2, {11416, 11417}}, {2, {11418, 11419}}, {2, {11418, 11419}}, {2, {11420, 11421}}, {2, {11420, 11421}}, {2, {11422, 11423}}, {2, {11422, 11423}}, {2, {11424, 11425}}, {2, {11424, 11425}}, {2, {11426, 11427}}, {2, {11426, 11427}}, {2, {11428, 11429}}, {2, {11428, 11429}}, {2, {11430, 11431}}, {2, {11430, 11431}}, {2, {11432, 11433}}, {2, {11432, 11433}}, {2, {11434, 11435}}, {2, {11434, 11435}}, {2, {11436, 11437}}, {2, {11436, 11437}}, {2, {11438, 11439}}, {2, {11438, 11439}}, {2, {11440, 11441}}, {2, {11440, 11441}}, {2, {11442, 11443}}, {2, {11442, 11443}}, {2, {11444, 11445}}, {2, {11444, 11445}}, {2, {11446, 11447}}, {2, {11446, 11447}}, {2, {11448, 11449}}, {2, {11448, 11449}}, {2, {11450, 11451}}, {2, {11450, 11451}}, {2, {11452, 11453}}, {2, {11452, 11453}}, {2, {11454, 11455}}, {2, {11454, 11455}}, {2, {11456, 11457}}, {2, {11456, 11457}}, {2, {11458, 11459}}, {2, {11458, 11459}}, {2, {11460, 11461}}, {2, {11460, 11461}}, {2, {11462, 11463}}, {2, {11462, 11463}}, {2, {11464, 11465}}, {2, {11464, 11465}}, {2, {11466, 11467}}, {2, {11466, 11467}}, {2, {11468, 11469}}, {2, {11468, 11469}}, {2, {11470, 11471}}, {2, {11470, 11471}}, {2, {11472, 11473}}, {2, {11472, 11473}}, {2, {11474, 11475}}, {2, {11474, 11475}}, {2, {11476, 11477}}, {2, {11476, 11477}}, {2, {11478, 11479}}, {2, {11478, 11479}}, {2, {11480, 11481}}, {2, {11480, 11481}}, {2, {11482, 11483}}, {2, {11482, 11483}}, {2, {11484, 11485}}, {2, {11484, 11485}}, {2, {11486, 11487}}, {2, {11486, 11487}}, {2, {11488, 11489}}, {2, {11488, 11489}}, {2, {11490, 11491}}, {2, {11490, 11491}}, {2, {4256, 11520}}, {2, {4257, 11521}}, {2, {4258, 11522}}, {2, {4259, 11523}}, {2, {4260, 11524}}, {2, {4261, 11525}}, {2, {4262, 11526}}, {2, {4263, 11527}}, {2, {4264, 11528}}, {2, {4265, 11529}}, {2, {4266, 11530}}, {2, {4267, 11531}}, {2, {4268, 11532}}, {2, {4269, 11533}}, {2, {4270, 11534}}, {2, {4271, 11535}}, {2, {4272, 11536}}, {2, {4273, 11537}}, {2, {4274, 11538}}, {2, {4275, 11539}}, {2, {4276, 11540}}, {2, {4277, 11541}}, {2, {4278, 11542}}, {2, {4279, 11543}}, {2, {4280, 11544}}, {2, {4281, 11545}}, {2, {4282, 11546}}, {2, {4283, 11547}}, {2, {4284, 11548}}, {2, {4285, 11549}}, {2, {4286, 11550}}, {2, {4287, 11551}}, {2, {4288, 11552}}, {2, {4289, 11553}}, {2, {4290, 11554}}, {2, {4291, 11555}}, {2, {4292, 11556}}, {2, {4293, 11557}}, {0, {0}} }; // NOLINT
+static const uint16_t kEcma262UnCanonicalizeTable0Size = 1656;
+static const int32_t kEcma262UnCanonicalizeTable0[3312] = { 65, 1, 66, 5, 67, 9, 68, 13, 69, 17, 70, 21, 71, 25, 72, 29, 73, 33, 74, 37, 75, 41, 76, 45, 77, 49, 78, 53, 79, 57, 80, 61, 81, 65, 82, 69, 83, 73, 84, 77, 85, 81, 86, 85, 87, 89, 88, 93, 89, 97, 90, 101, 97, 105, 98, 109, 99, 113, 100, 117, 101, 121, 102, 125, 103, 129, 104, 133, 105, 137, 106, 141, 107, 145, 108, 149, 109, 153, 110, 157, 111, 161, 112, 165, 113, 169, 114, 173, 115, 177, 116, 181, 117, 185, 118, 189, 119, 193, 120, 197, 121, 201, 122, 205, 181, 209, 192, 213, 193, 217, 194, 221, 195, 225, 196, 229, 197, 233, 198, 237, 199, 241, 200, 245, 201, 249, 202, 253, 203, 257, 204, 261, 205, 265, 206, 269, 207, 273, 208, 277, 209, 281, 210, 285, 211, 289, 212, 293, 213, 297, 214, 301, 216, 305, 217, 309, 218, 313, 219, 317, 220, 321, 221, 325, 222, 329, 224, 333, 225, 337, 226, 341, 227, 345, 228, 349, 229, 353, 230, 357, 231, 361, 232, 365, 233, 369, 234, 373, 235, 377, 236, 381, 237, 385, 238, 389, 239, 393, 240, 397, 241, 401, 242, 405, 243, 409, 244, 413, 245, 417, 246, 421, 248, 425, 249, 429, 250, 433, 251, 437, 252, 441, 253, 445, 254, 449, 255, 453, 256, 457, 257, 461, 258, 465, 259, 469, 260, 473, 261, 477, 262, 481, 263, 485, 264, 489, 265, 493, 266, 497, 267, 501, 268, 505, 269, 509, 270, 513, 271, 517, 272, 521, 273, 525, 274, 529, 275, 533, 276, 537, 277, 541, 278, 545, 279, 549, 280, 553, 281, 557, 282, 561, 283, 565, 284, 569, 285, 573, 286, 577, 287, 581, 288, 585, 289, 589, 290, 593, 291, 597, 292, 601, 293, 605, 294, 609, 295, 613, 296, 617, 297, 621, 298, 625, 299, 629, 300, 633, 301, 637, 302, 641, 303, 645, 306, 649, 307, 653, 308, 657, 309, 661, 310, 665, 311, 669, 313, 673, 314, 677, 315, 681, 316, 685, 317, 689, 318, 693, 319, 697, 320, 701, 321, 705, 322, 709, 323, 713, 324, 717, 325, 721, 326, 725, 327, 729, 328, 733, 330, 737, 331, 741, 332, 745, 333, 749, 334, 753, 335, 757, 336, 761, 337, 765, 338, 769, 339, 773, 340, 777, 341, 781, 342, 785, 343, 789, 344, 793, 345, 797, 346, 801, 347, 805, 348, 809, 349, 813, 350, 817, 351, 821, 352, 825, 353, 829, 354, 833, 355, 837, 356, 841, 357, 845, 358, 849, 359, 853, 360, 857, 361, 861, 362, 865, 363, 869, 364, 873, 365, 877, 366, 881, 367, 885, 368, 889, 369, 893, 370, 897, 371, 901, 372, 905, 373, 909, 374, 913, 375, 917, 376, 921, 377, 925, 378, 929, 379, 933, 380, 937, 381, 941, 382, 945, 384, 949, 385, 953, 386, 957, 387, 961, 388, 965, 389, 969, 390, 973, 391, 977, 392, 981, 393, 985, 394, 989, 395, 993, 396, 997, 398, 1001, 399, 1005, 400, 1009, 401, 1013, 402, 1017, 403, 1021, 404, 1025, 405, 1029, 406, 1033, 407, 1037, 408, 1041, 409, 1045, 410, 1049, 412, 1053, 413, 1057, 414, 1061, 415, 1065, 416, 1069, 417, 1073, 418, 1077, 419, 1081, 420, 1085, 421, 1089, 422, 1093, 423, 1097, 424, 1101, 425, 1105, 428, 1109, 429, 1113, 430, 1117, 431, 1121, 432, 1125, 433, 1129, 434, 1133, 435, 1137, 436, 1141, 437, 1145, 438, 1149, 439, 1153, 440, 1157, 441, 1161, 444, 1165, 445, 1169, 447, 1173, 452, 1177, 453, 1181, 454, 1185, 455, 1189, 456, 1193, 457, 1197, 458, 1201, 459, 1205, 460, 1209, 461, 1213, 462, 1217, 463, 1221, 464, 1225, 465, 1229, 466, 1233, 467, 1237, 468, 1241, 469, 1245, 470, 1249, 471, 1253, 472, 1257, 473, 1261, 474, 1265, 475, 1269, 476, 1273, 477, 1277, 478, 1281, 479, 1285, 480, 1289, 481, 1293, 482, 1297, 483, 1301, 484, 1305, 485, 1309, 486, 1313, 487, 1317, 488, 1321, 489, 1325, 490, 1329, 491, 1333, 492, 1337, 493, 1341, 494, 1345, 495, 1349, 497, 1353, 498, 1357, 499, 1361, 500, 1365, 501, 1369, 502, 1373, 503, 1377, 504, 1381, 505, 1385, 506, 1389, 507, 1393, 508, 1397, 509, 1401, 510, 1405, 511, 1409, 512, 1413, 513, 1417, 514, 1421, 515, 1425, 516, 1429, 517, 1433, 518, 1437, 519, 1441, 520, 1445, 521, 1449, 522, 1453, 523, 1457, 524, 1461, 525, 1465, 526, 1469, 527, 1473, 528, 1477, 529, 1481, 530, 1485, 531, 1489, 532, 1493, 533, 1497, 534, 1501, 535, 1505, 536, 1509, 537, 1513, 538, 1517, 539, 1521, 540, 1525, 541, 1529, 542, 1533, 543, 1537, 544, 1541, 546, 1545, 547, 1549, 548, 1553, 549, 1557, 550, 1561, 551, 1565, 552, 1569, 553, 1573, 554, 1577, 555, 1581, 556, 1585, 557, 1589, 558, 1593, 559, 1597, 560, 1601, 561, 1605, 562, 1609, 563, 1613, 570, 1617, 571, 1621, 572, 1625, 573, 1629, 574, 1633, 577, 1637, 578, 1641, 579, 1645, 580, 1649, 581, 1653, 582, 1657, 583, 1661, 584, 1665, 585, 1669, 586, 1673, 587, 1677, 588, 1681, 589, 1685, 590, 1689, 591, 1693, 595, 1697, 596, 1701, 598, 1705, 599, 1709, 601, 1713, 603, 1717, 608, 1721, 611, 1725, 616, 1729, 617, 1733, 619, 1737, 623, 1741, 626, 1745, 629, 1749, 637, 1753, 640, 1757, 643, 1761, 648, 1765, 649, 1769, 650, 1773, 651, 1777, 652, 1781, 658, 1785, 837, 1789, 891, 1793, 892, 1797, 893, 1801, 902, 1805, 904, 1809, 905, 1813, 906, 1817, 908, 1821, 910, 1825, 911, 1829, 913, 1833, 914, 1837, 915, 1841, 916, 1845, 917, 1849, 918, 1853, 919, 1857, 920, 1861, 921, 1865, 922, 1869, 923, 1873, 924, 1877, 925, 1881, 926, 1885, 927, 1889, 928, 1893, 929, 1897, 931, 1901, 932, 1905, 933, 1909, 934, 1913, 935, 1917, 936, 1921, 937, 1925, 938, 1929, 939, 1933, 940, 1937, 941, 1941, 942, 1945, 943, 1949, 945, 1953, 946, 1957, 947, 1961, 948, 1965, 949, 1969, 950, 1973, 951, 1977, 952, 1981, 953, 1985, 954, 1989, 955, 1993, 956, 1997, 957, 2001, 958, 2005, 959, 2009, 960, 2013, 961, 2017, 962, 2021, 963, 2025, 964, 2029, 965, 2033, 966, 2037, 967, 2041, 968, 2045, 969, 2049, 970, 2053, 971, 2057, 972, 2061, 973, 2065, 974, 2069, 976, 2073, 977, 2077, 981, 2081, 982, 2085, 984, 2089, 985, 2093, 986, 2097, 987, 2101, 988, 2105, 989, 2109, 990, 2113, 991, 2117, 992, 2121, 993, 2125, 994, 2129, 995, 2133, 996, 2137, 997, 2141, 998, 2145, 999, 2149, 1000, 2153, 1001, 2157, 1002, 2161, 1003, 2165, 1004, 2169, 1005, 2173, 1006, 2177, 1007, 2181, 1008, 2185, 1009, 2189, 1010, 2193, 1013, 2197, 1015, 2201, 1016, 2205, 1017, 2209, 1018, 2213, 1019, 2217, 1021, 2221, 1022, 2225, 1023, 2229, 1024, 2233, 1025, 2237, 1026, 2241, 1027, 2245, 1028, 2249, 1029, 2253, 1030, 2257, 1031, 2261, 1032, 2265, 1033, 2269, 1034, 2273, 1035, 2277, 1036, 2281, 1037, 2285, 1038, 2289, 1039, 2293, 1040, 2297, 1041, 2301, 1042, 2305, 1043, 2309, 1044, 2313, 1045, 2317, 1046, 2321, 1047, 2325, 1048, 2329, 1049, 2333, 1050, 2337, 1051, 2341, 1052, 2345, 1053, 2349, 1054, 2353, 1055, 2357, 1056, 2361, 1057, 2365, 1058, 2369, 1059, 2373, 1060, 2377, 1061, 2381, 1062, 2385, 1063, 2389, 1064, 2393, 1065, 2397, 1066, 2401, 1067, 2405, 1068, 2409, 1069, 2413, 1070, 2417, 1071, 2421, 1072, 2425, 1073, 2429, 1074, 2433, 1075, 2437, 1076, 2441, 1077, 2445, 1078, 2449, 1079, 2453, 1080, 2457, 1081, 2461, 1082, 2465, 1083, 2469, 1084, 2473, 1085, 2477, 1086, 2481, 1087, 2485, 1088, 2489, 1089, 2493, 1090, 2497, 1091, 2501, 1092, 2505, 1093, 2509, 1094, 2513, 1095, 2517, 1096, 2521, 1097, 2525, 1098, 2529, 1099, 2533, 1100, 2537, 1101, 2541, 1102, 2545, 1103, 2549, 1104, 2553, 1105, 2557, 1106, 2561, 1107, 2565, 1108, 2569, 1109, 2573, 1110, 2577, 1111, 2581, 1112, 2585, 1113, 2589, 1114, 2593, 1115, 2597, 1116, 2601, 1117, 2605, 1118, 2609, 1119, 2613, 1120, 2617, 1121, 2621, 1122, 2625, 1123, 2629, 1124, 2633, 1125, 2637, 1126, 2641, 1127, 2645, 1128, 2649, 1129, 2653, 1130, 2657, 1131, 2661, 1132, 2665, 1133, 2669, 1134, 2673, 1135, 2677, 1136, 2681, 1137, 2685, 1138, 2689, 1139, 2693, 1140, 2697, 1141, 2701, 1142, 2705, 1143, 2709, 1144, 2713, 1145, 2717, 1146, 2721, 1147, 2725, 1148, 2729, 1149, 2733, 1150, 2737, 1151, 2741, 1152, 2745, 1153, 2749, 1162, 2753, 1163, 2757, 1164, 2761, 1165, 2765, 1166, 2769, 1167, 2773, 1168, 2777, 1169, 2781, 1170, 2785, 1171, 2789, 1172, 2793, 1173, 2797, 1174, 2801, 1175, 2805, 1176, 2809, 1177, 2813, 1178, 2817, 1179, 2821, 1180, 2825, 1181, 2829, 1182, 2833, 1183, 2837, 1184, 2841, 1185, 2845, 1186, 2849, 1187, 2853, 1188, 2857, 1189, 2861, 1190, 2865, 1191, 2869, 1192, 2873, 1193, 2877, 1194, 2881, 1195, 2885, 1196, 2889, 1197, 2893, 1198, 2897, 1199, 2901, 1200, 2905, 1201, 2909, 1202, 2913, 1203, 2917, 1204, 2921, 1205, 2925, 1206, 2929, 1207, 2933, 1208, 2937, 1209, 2941, 1210, 2945, 1211, 2949, 1212, 2953, 1213, 2957, 1214, 2961, 1215, 2965, 1216, 2969, 1217, 2973, 1218, 2977, 1219, 2981, 1220, 2985, 1221, 2989, 1222, 2993, 1223, 2997, 1224, 3001, 1225, 3005, 1226, 3009, 1227, 3013, 1228, 3017, 1229, 3021, 1230, 3025, 1231, 3029, 1232, 3033, 1233, 3037, 1234, 3041, 1235, 3045, 1236, 3049, 1237, 3053, 1238, 3057, 1239, 3061, 1240, 3065, 1241, 3069, 1242, 3073, 1243, 3077, 1244, 3081, 1245, 3085, 1246, 3089, 1247, 3093, 1248, 3097, 1249, 3101, 1250, 3105, 1251, 3109, 1252, 3113, 1253, 3117, 1254, 3121, 1255, 3125, 1256, 3129, 1257, 3133, 1258, 3137, 1259, 3141, 1260, 3145, 1261, 3149, 1262, 3153, 1263, 3157, 1264, 3161, 1265, 3165, 1266, 3169, 1267, 3173, 1268, 3177, 1269, 3181, 1270, 3185, 1271, 3189, 1272, 3193, 1273, 3197, 1274, 3201, 1275, 3205, 1276, 3209, 1277, 3213, 1278, 3217, 1279, 3221, 1280, 3225, 1281, 3229, 1282, 3233, 1283, 3237, 1284, 3241, 1285, 3245, 1286, 3249, 1287, 3253, 1288, 3257, 1289, 3261, 1290, 3265, 1291, 3269, 1292, 3273, 1293, 3277, 1294, 3281, 1295, 3285, 1296, 3289, 1297, 3293, 1298, 3297, 1299, 3301, 1329, 3305, 1330, 3309, 1331, 3313, 1332, 3317, 1333, 3321, 1334, 3325, 1335, 3329, 1336, 3333, 1337, 3337, 1338, 3341, 1339, 3345, 1340, 3349, 1341, 3353, 1342, 3357, 1343, 3361, 1344, 3365, 1345, 3369, 1346, 3373, 1347, 3377, 1348, 3381, 1349, 3385, 1350, 3389, 1351, 3393, 1352, 3397, 1353, 3401, 1354, 3405, 1355, 3409, 1356, 3413, 1357, 3417, 1358, 3421, 1359, 3425, 1360, 3429, 1361, 3433, 1362, 3437, 1363, 3441, 1364, 3445, 1365, 3449, 1366, 3453, 1377, 3457, 1378, 3461, 1379, 3465, 1380, 3469, 1381, 3473, 1382, 3477, 1383, 3481, 1384, 3485, 1385, 3489, 1386, 3493, 1387, 3497, 1388, 3501, 1389, 3505, 1390, 3509, 1391, 3513, 1392, 3517, 1393, 3521, 1394, 3525, 1395, 3529, 1396, 3533, 1397, 3537, 1398, 3541, 1399, 3545, 1400, 3549, 1401, 3553, 1402, 3557, 1403, 3561, 1404, 3565, 1405, 3569, 1406, 3573, 1407, 3577, 1408, 3581, 1409, 3585, 1410, 3589, 1411, 3593, 1412, 3597, 1413, 3601, 1414, 3605, 4256, 3609, 4257, 3613, 4258, 3617, 4259, 3621, 4260, 3625, 4261, 3629, 4262, 3633, 4263, 3637, 4264, 3641, 4265, 3645, 4266, 3649, 4267, 3653, 4268, 3657, 4269, 3661, 4270, 3665, 4271, 3669, 4272, 3673, 4273, 3677, 4274, 3681, 4275, 3685, 4276, 3689, 4277, 3693, 4278, 3697, 4279, 3701, 4280, 3705, 4281, 3709, 4282, 3713, 4283, 3717, 4284, 3721, 4285, 3725, 4286, 3729, 4287, 3733, 4288, 3737, 4289, 3741, 4290, 3745, 4291, 3749, 4292, 3753, 4293, 3757, 7549, 3761, 7680, 3765, 7681, 3769, 7682, 3773, 7683, 3777, 7684, 3781, 7685, 3785, 7686, 3789, 7687, 3793, 7688, 3797, 7689, 3801, 7690, 3805, 7691, 3809, 7692, 3813, 7693, 3817, 7694, 3821, 7695, 3825, 7696, 3829, 7697, 3833, 7698, 3837, 7699, 3841, 7700, 3845, 7701, 3849, 7702, 3853, 7703, 3857, 7704, 3861, 7705, 3865, 7706, 3869, 7707, 3873, 7708, 3877, 7709, 3881, 7710, 3885, 7711, 3889, 7712, 3893, 7713, 3897, 7714, 3901, 7715, 3905, 7716, 3909, 7717, 3913, 7718, 3917, 7719, 3921, 7720, 3925, 7721, 3929, 7722, 3933, 7723, 3937, 7724, 3941, 7725, 3945, 7726, 3949, 7727, 3953, 7728, 3957, 7729, 3961, 7730, 3965, 7731, 3969, 7732, 3973, 7733, 3977, 7734, 3981, 7735, 3985, 7736, 3989, 7737, 3993, 7738, 3997, 7739, 4001, 7740, 4005, 7741, 4009, 7742, 4013, 7743, 4017, 7744, 4021, 7745, 4025, 7746, 4029, 7747, 4033, 7748, 4037, 7749, 4041, 7750, 4045, 7751, 4049, 7752, 4053, 7753, 4057, 7754, 4061, 7755, 4065, 7756, 4069, 7757, 4073, 7758, 4077, 7759, 4081, 7760, 4085, 7761, 4089, 7762, 4093, 7763, 4097, 7764, 4101, 7765, 4105, 7766, 4109, 7767, 4113, 7768, 4117, 7769, 4121, 7770, 4125, 7771, 4129, 7772, 4133, 7773, 4137, 7774, 4141, 7775, 4145, 7776, 4149, 7777, 4153, 7778, 4157, 7779, 4161, 7780, 4165, 7781, 4169, 7782, 4173, 7783, 4177, 7784, 4181, 7785, 4185, 7786, 4189, 7787, 4193, 7788, 4197, 7789, 4201, 7790, 4205, 7791, 4209, 7792, 4213, 7793, 4217, 7794, 4221, 7795, 4225, 7796, 4229, 7797, 4233, 7798, 4237, 7799, 4241, 7800, 4245, 7801, 4249, 7802, 4253, 7803, 4257, 7804, 4261, 7805, 4265, 7806, 4269, 7807, 4273, 7808, 4277, 7809, 4281, 7810, 4285, 7811, 4289, 7812, 4293, 7813, 4297, 7814, 4301, 7815, 4305, 7816, 4309, 7817, 4313, 7818, 4317, 7819, 4321, 7820, 4325, 7821, 4329, 7822, 4333, 7823, 4337, 7824, 4341, 7825, 4345, 7826, 4349, 7827, 4353, 7828, 4357, 7829, 4361, 7835, 4365, 7840, 4369, 7841, 4373, 7842, 4377, 7843, 4381, 7844, 4385, 7845, 4389, 7846, 4393, 7847, 4397, 7848, 4401, 7849, 4405, 7850, 4409, 7851, 4413, 7852, 4417, 7853, 4421, 7854, 4425, 7855, 4429, 7856, 4433, 7857, 4437, 7858, 4441, 7859, 4445, 7860, 4449, 7861, 4453, 7862, 4457, 7863, 4461, 7864, 4465, 7865, 4469, 7866, 4473, 7867, 4477, 7868, 4481, 7869, 4485, 7870, 4489, 7871, 4493, 7872, 4497, 7873, 4501, 7874, 4505, 7875, 4509, 7876, 4513, 7877, 4517, 7878, 4521, 7879, 4525, 7880, 4529, 7881, 4533, 7882, 4537, 7883, 4541, 7884, 4545, 7885, 4549, 7886, 4553, 7887, 4557, 7888, 4561, 7889, 4565, 7890, 4569, 7891, 4573, 7892, 4577, 7893, 4581, 7894, 4585, 7895, 4589, 7896, 4593, 7897, 4597, 7898, 4601, 7899, 4605, 7900, 4609, 7901, 4613, 7902, 4617, 7903, 4621, 7904, 4625, 7905, 4629, 7906, 4633, 7907, 4637, 7908, 4641, 7909, 4645, 7910, 4649, 7911, 4653, 7912, 4657, 7913, 4661, 7914, 4665, 7915, 4669, 7916, 4673, 7917, 4677, 7918, 4681, 7919, 4685, 7920, 4689, 7921, 4693, 7922, 4697, 7923, 4701, 7924, 4705, 7925, 4709, 7926, 4713, 7927, 4717, 7928, 4721, 7929, 4725, 7936, 4729, 7937, 4733, 7938, 4737, 7939, 4741, 7940, 4745, 7941, 4749, 7942, 4753, 7943, 4757, 7944, 4761, 7945, 4765, 7946, 4769, 7947, 4773, 7948, 4777, 7949, 4781, 7950, 4785, 7951, 4789, 7952, 4793, 7953, 4797, 7954, 4801, 7955, 4805, 7956, 4809, 7957, 4813, 7960, 4817, 7961, 4821, 7962, 4825, 7963, 4829, 7964, 4833, 7965, 4837, 7968, 4841, 7969, 4845, 7970, 4849, 7971, 4853, 7972, 4857, 7973, 4861, 7974, 4865, 7975, 4869, 7976, 4873, 7977, 4877, 7978, 4881, 7979, 4885, 7980, 4889, 7981, 4893, 7982, 4897, 7983, 4901, 7984, 4905, 7985, 4909, 7986, 4913, 7987, 4917, 7988, 4921, 7989, 4925, 7990, 4929, 7991, 4933, 7992, 4937, 7993, 4941, 7994, 4945, 7995, 4949, 7996, 4953, 7997, 4957, 7998, 4961, 7999, 4965, 8000, 4969, 8001, 4973, 8002, 4977, 8003, 4981, 8004, 4985, 8005, 4989, 8008, 4993, 8009, 4997, 8010, 5001, 8011, 5005, 8012, 5009, 8013, 5013, 8017, 5017, 8019, 5021, 8021, 5025, 8023, 5029, 8025, 5033, 8027, 5037, 8029, 5041, 8031, 5045, 8032, 5049, 8033, 5053, 8034, 5057, 8035, 5061, 8036, 5065, 8037, 5069, 8038, 5073, 8039, 5077, 8040, 5081, 8041, 5085, 8042, 5089, 8043, 5093, 8044, 5097, 8045, 5101, 8046, 5105, 8047, 5109, 8048, 5113, 8049, 5117, 8050, 5121, 8051, 5125, 8052, 5129, 8053, 5133, 8054, 5137, 8055, 5141, 8056, 5145, 8057, 5149, 8058, 5153, 8059, 5157, 8060, 5161, 8061, 5165, 8112, 5169, 8113, 5173, 8120, 5177, 8121, 5181, 8122, 5185, 8123, 5189, 8126, 5193, 8136, 5197, 8137, 5201, 8138, 5205, 8139, 5209, 8144, 5213, 8145, 5217, 8152, 5221, 8153, 5225, 8154, 5229, 8155, 5233, 8160, 5237, 8161, 5241, 8165, 5245, 8168, 5249, 8169, 5253, 8170, 5257, 8171, 5261, 8172, 5265, 8184, 5269, 8185, 5273, 8186, 5277, 8187, 5281, 8498, 5285, 8526, 5289, 8544, 5293, 8545, 5297, 8546, 5301, 8547, 5305, 8548, 5309, 8549, 5313, 8550, 5317, 8551, 5321, 8552, 5325, 8553, 5329, 8554, 5333, 8555, 5337, 8556, 5341, 8557, 5345, 8558, 5349, 8559, 5353, 8560, 5357, 8561, 5361, 8562, 5365, 8563, 5369, 8564, 5373, 8565, 5377, 8566, 5381, 8567, 5385, 8568, 5389, 8569, 5393, 8570, 5397, 8571, 5401, 8572, 5405, 8573, 5409, 8574, 5413, 8575, 5417, 8579, 5421, 8580, 5425, 9398, 5429, 9399, 5433, 9400, 5437, 9401, 5441, 9402, 5445, 9403, 5449, 9404, 5453, 9405, 5457, 9406, 5461, 9407, 5465, 9408, 5469, 9409, 5473, 9410, 5477, 9411, 5481, 9412, 5485, 9413, 5489, 9414, 5493, 9415, 5497, 9416, 5501, 9417, 5505, 9418, 5509, 9419, 5513, 9420, 5517, 9421, 5521, 9422, 5525, 9423, 5529, 9424, 5533, 9425, 5537, 9426, 5541, 9427, 5545, 9428, 5549, 9429, 5553, 9430, 5557, 9431, 5561, 9432, 5565, 9433, 5569, 9434, 5573, 9435, 5577, 9436, 5581, 9437, 5585, 9438, 5589, 9439, 5593, 9440, 5597, 9441, 5601, 9442, 5605, 9443, 5609, 9444, 5613, 9445, 5617, 9446, 5621, 9447, 5625, 9448, 5629, 9449, 5633, 11264, 5637, 11265, 5641, 11266, 5645, 11267, 5649, 11268, 5653, 11269, 5657, 11270, 5661, 11271, 5665, 11272, 5669, 11273, 5673, 11274, 5677, 11275, 5681, 11276, 5685, 11277, 5689, 11278, 5693, 11279, 5697, 11280, 5701, 11281, 5705, 11282, 5709, 11283, 5713, 11284, 5717, 11285, 5721, 11286, 5725, 11287, 5729, 11288, 5733, 11289, 5737, 11290, 5741, 11291, 5745, 11292, 5749, 11293, 5753, 11294, 5757, 11295, 5761, 11296, 5765, 11297, 5769, 11298, 5773, 11299, 5777, 11300, 5781, 11301, 5785, 11302, 5789, 11303, 5793, 11304, 5797, 11305, 5801, 11306, 5805, 11307, 5809, 11308, 5813, 11309, 5817, 11310, 5821, 11312, 5825, 11313, 5829, 11314, 5833, 11315, 5837, 11316, 5841, 11317, 5845, 11318, 5849, 11319, 5853, 11320, 5857, 11321, 5861, 11322, 5865, 11323, 5869, 11324, 5873, 11325, 5877, 11326, 5881, 11327, 5885, 11328, 5889, 11329, 5893, 11330, 5897, 11331, 5901, 11332, 5905, 11333, 5909, 11334, 5913, 11335, 5917, 11336, 5921, 11337, 5925, 11338, 5929, 11339, 5933, 11340, 5937, 11341, 5941, 11342, 5945, 11343, 5949, 11344, 5953, 11345, 5957, 11346, 5961, 11347, 5965, 11348, 5969, 11349, 5973, 11350, 5977, 11351, 5981, 11352, 5985, 11353, 5989, 11354, 5993, 11355, 5997, 11356, 6001, 11357, 6005, 11358, 6009, 11360, 6013, 11361, 6017, 11362, 6021, 11363, 6025, 11364, 6029, 11365, 6033, 11366, 6037, 11367, 6041, 11368, 6045, 11369, 6049, 11370, 6053, 11371, 6057, 11372, 6061, 11381, 6065, 11382, 6069, 11392, 6073, 11393, 6077, 11394, 6081, 11395, 6085, 11396, 6089, 11397, 6093, 11398, 6097, 11399, 6101, 11400, 6105, 11401, 6109, 11402, 6113, 11403, 6117, 11404, 6121, 11405, 6125, 11406, 6129, 11407, 6133, 11408, 6137, 11409, 6141, 11410, 6145, 11411, 6149, 11412, 6153, 11413, 6157, 11414, 6161, 11415, 6165, 11416, 6169, 11417, 6173, 11418, 6177, 11419, 6181, 11420, 6185, 11421, 6189, 11422, 6193, 11423, 6197, 11424, 6201, 11425, 6205, 11426, 6209, 11427, 6213, 11428, 6217, 11429, 6221, 11430, 6225, 11431, 6229, 11432, 6233, 11433, 6237, 11434, 6241, 11435, 6245, 11436, 6249, 11437, 6253, 11438, 6257, 11439, 6261, 11440, 6265, 11441, 6269, 11442, 6273, 11443, 6277, 11444, 6281, 11445, 6285, 11446, 6289, 11447, 6293, 11448, 6297, 11449, 6301, 11450, 6305, 11451, 6309, 11452, 6313, 11453, 6317, 11454, 6321, 11455, 6325, 11456, 6329, 11457, 6333, 11458, 6337, 11459, 6341, 11460, 6345, 11461, 6349, 11462, 6353, 11463, 6357, 11464, 6361, 11465, 6365, 11466, 6369, 11467, 6373, 11468, 6377, 11469, 6381, 11470, 6385, 11471, 6389, 11472, 6393, 11473, 6397, 11474, 6401, 11475, 6405, 11476, 6409, 11477, 6413, 11478, 6417, 11479, 6421, 11480, 6425, 11481, 6429, 11482, 6433, 11483, 6437, 11484, 6441, 11485, 6445, 11486, 6449, 11487, 6453, 11488, 6457, 11489, 6461, 11490, 6465, 11491, 6469, 11520, 6473, 11521, 6477, 11522, 6481, 11523, 6485, 11524, 6489, 11525, 6493, 11526, 6497, 11527, 6501, 11528, 6505, 11529, 6509, 11530, 6513, 11531, 6517, 11532, 6521, 11533, 6525, 11534, 6529, 11535, 6533, 11536, 6537, 11537, 6541, 11538, 6545, 11539, 6549, 11540, 6553, 11541, 6557, 11542, 6561, 11543, 6565, 11544, 6569, 11545, 6573, 11546, 6577, 11547, 6581, 11548, 6585, 11549, 6589, 11550, 6593, 11551, 6597, 11552, 6601, 11553, 6605, 11554, 6609, 11555, 6613, 11556, 6617, 11557, 6621 }; // NOLINT
+static const MultiCharacterSpecialCase<4> kEcma262UnCanonicalizeMultiStrings1[] = { {2, {65313, 65345}}, {2, {65314, 65346}}, {2, {65315, 65347}}, {2, {65316, 65348}}, {2, {65317, 65349}}, {2, {65318, 65350}}, {2, {65319, 65351}}, {2, {65320, 65352}}, {2, {65321, 65353}}, {2, {65322, 65354}}, {2, {65323, 65355}}, {2, {65324, 65356}}, {2, {65325, 65357}}, {2, {65326, 65358}}, {2, {65327, 65359}}, {2, {65328, 65360}}, {2, {65329, 65361}}, {2, {65330, 65362}}, {2, {65331, 65363}}, {2, {65332, 65364}}, {2, {65333, 65365}}, {2, {65334, 65366}}, {2, {65335, 65367}}, {2, {65336, 65368}}, {2, {65337, 65369}}, {2, {65338, 65370}}, {2, {65313, 65345}}, {2, {65314, 65346}}, {2, {65315, 65347}}, {2, {65316, 65348}}, {2, {65317, 65349}}, {2, {65318, 65350}}, {2, {65319, 65351}}, {2, {65320, 65352}}, {2, {65321, 65353}}, {2, {65322, 65354}}, {2, {65323, 65355}}, {2, {65324, 65356}}, {2, {65325, 65357}}, {2, {65326, 65358}}, {2, {65327, 65359}}, {2, {65328, 65360}}, {2, {65329, 65361}}, {2, {65330, 65362}}, {2, {65331, 65363}}, {2, {65332, 65364}}, {2, {65333, 65365}}, {2, {65334, 65366}}, {2, {65335, 65367}}, {2, {65336, 65368}}, {2, {65337, 65369}}, {2, {65338, 65370}}, {0, {0}} }; // NOLINT
+static const uint16_t kEcma262UnCanonicalizeTable1Size = 52;
+static const int32_t kEcma262UnCanonicalizeTable1[104] = { 32545, 1, 32546, 5, 32547, 9, 32548, 13, 32549, 17, 32550, 21, 32551, 25, 32552, 29, 32553, 33, 32554, 37, 32555, 41, 32556, 45, 32557, 49, 32558, 53, 32559, 57, 32560, 61, 32561, 65, 32562, 69, 32563, 73, 32564, 77, 32565, 81, 32566, 85, 32567, 89, 32568, 93, 32569, 97, 32570, 101, 32577, 105, 32578, 109, 32579, 113, 32580, 117, 32581, 121, 32582, 125, 32583, 129, 32584, 133, 32585, 137, 32586, 141, 32587, 145, 32588, 149, 32589, 153, 32590, 157, 32591, 161, 32592, 165, 32593, 169, 32594, 173, 32595, 177, 32596, 181, 32597, 185, 32598, 189, 32599, 193, 32600, 197, 32601, 201, 32602, 205 }; // NOLINT
+static const MultiCharacterSpecialCase<4> kEcma262UnCanonicalizeMultiStrings2[] = { {2, {66560, 66600}}, {2, {66561, 66601}}, {2, {66562, 66602}}, {2, {66563, 66603}}, {2, {66564, 66604}}, {2, {66565, 66605}}, {2, {66566, 66606}}, {2, {66567, 66607}}, {2, {66568, 66608}}, {2, {66569, 66609}}, {2, {66570, 66610}}, {2, {66571, 66611}}, {2, {66572, 66612}}, {2, {66573, 66613}}, {2, {66574, 66614}}, {2, {66575, 66615}}, {2, {66576, 66616}}, {2, {66577, 66617}}, {2, {66578, 66618}}, {2, {66579, 66619}}, {2, {66580, 66620}}, {2, {66581, 66621}}, {2, {66582, 66622}}, {2, {66583, 66623}}, {2, {66584, 66624}}, {2, {66585, 66625}}, {2, {66586, 66626}}, {2, {66587, 66627}}, {2, {66588, 66628}}, {2, {66589, 66629}}, {2, {66590, 66630}}, {2, {66591, 66631}}, {2, {66592, 66632}}, {2, {66593, 66633}}, {2, {66594, 66634}}, {2, {66595, 66635}}, {2, {66596, 66636}}, {2, {66597, 66637}}, {2, {66598, 66638}}, {2, {66599, 66639}}, {2, {66560, 66600}}, {2, {66561, 66601}}, {2, {66562, 66602}}, {2, {66563, 66603}}, {2, {66564, 66604}}, {2, {66565, 66605}}, {2, {66566, 66606}}, {2, {66567, 66607}}, {2, {66568, 66608}}, {2, {66569, 66609}}, {2, {66570, 66610}}, {2, {66571, 66611}}, {2, {66572, 66612}}, {2, {66573, 66613}}, {2, {66574, 66614}}, {2, {66575, 66615}}, {2, {66576, 66616}}, {2, {66577, 66617}}, {2, {66578, 66618}}, {2, {66579, 66619}}, {2, {66580, 66620}}, {2, {66581, 66621}}, {2, {66582, 66622}}, {2, {66583, 66623}}, {2, {66584, 66624}}, {2, {66585, 66625}}, {2, {66586, 66626}}, {2, {66587, 66627}}, {2, {66588, 66628}}, {2, {66589, 66629}}, {2, {66590, 66630}}, {2, {66591, 66631}}, {2, {66592, 66632}}, {2, {66593, 66633}}, {2, {66594, 66634}}, {2, {66595, 66635}}, {2, {66596, 66636}}, {2, {66597, 66637}}, {2, {66598, 66638}}, {2, {66599, 66639}}, {0, {0}} }; // NOLINT
+static const uint16_t kEcma262UnCanonicalizeTable2Size = 80;
+static const int32_t kEcma262UnCanonicalizeTable2[160] = { 1024, 1, 1025, 5, 1026, 9, 1027, 13, 1028, 17, 1029, 21, 1030, 25, 1031, 29, 1032, 33, 1033, 37, 1034, 41, 1035, 45, 1036, 49, 1037, 53, 1038, 57, 1039, 61, 1040, 65, 1041, 69, 1042, 73, 1043, 77, 1044, 81, 1045, 85, 1046, 89, 1047, 93, 1048, 97, 1049, 101, 1050, 105, 1051, 109, 1052, 113, 1053, 117, 1054, 121, 1055, 125, 1056, 129, 1057, 133, 1058, 137, 1059, 141, 1060, 145, 1061, 149, 1062, 153, 1063, 157, 1064, 161, 1065, 165, 1066, 169, 1067, 173, 1068, 177, 1069, 181, 1070, 185, 1071, 189, 1072, 193, 1073, 197, 1074, 201, 1075, 205, 1076, 209, 1077, 213, 1078, 217, 1079, 221, 1080, 225, 1081, 229, 1082, 233, 1083, 237, 1084, 241, 1085, 245, 1086, 249, 1087, 253, 1088, 257, 1089, 261, 1090, 265, 1091, 269, 1092, 273, 1093, 277, 1094, 281, 1095, 285, 1096, 289, 1097, 293, 1098, 297, 1099, 301, 1100, 305, 1101, 309, 1102, 313, 1103, 317 }; // NOLINT
+int Ecma262UnCanonicalize::Convert(uchar c,
+                      uchar n,
+                      uchar* result,
+                      bool* allow_caching_ptr) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupMapping(kEcma262UnCanonicalizeTable0,
+                                     kEcma262UnCanonicalizeTable0Size,
+                                     kEcma262UnCanonicalizeMultiStrings0,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    case 1: return LookupMapping(kEcma262UnCanonicalizeTable1,
+                                     kEcma262UnCanonicalizeTable1Size,
+                                     kEcma262UnCanonicalizeMultiStrings1,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    case 2: return LookupMapping(kEcma262UnCanonicalizeTable2,
+                                     kEcma262UnCanonicalizeTable2Size,
+                                     kEcma262UnCanonicalizeMultiStrings2,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    default: return 0;
+  }
+}
+
+static const MultiCharacterSpecialCase<1> kCanonicalizationRangeMultiStrings0[] = { {0, {0}} }; // NOLINT
+static const uint16_t kCanonicalizationRangeTable0Size = 1831;
+static const int32_t kCanonicalizationRangeTable0[3662] = { 0, 67109124, 1073741825, 0, 64, 0, 65, 67108708, 1073741890, -260, 90, -260, 91, 67108524, 1073741916, -364, 96, -364, 97, 67108580, 1073741922, -388, 122, -388, 123, 67108604, 1073741948, -492, 180, -492, 181, 67108144, 182, 67108176, 1073742007, -728, 191, -728, 192, 67108188, 1073742017, -768, 214, -768, 215, 67108008, 216, 67108028, 1073742041, -864, 222, -864, 223, 67107976, 224, 67108060, 1073742049, -896, 246, -896, 247, 67107880, 248, 67107900, 1073742073, -992, 254, -992, 255, 67107848, 256, 67107844, 257, 67107840, 258, 67107836, 259, 67107832, 260, 67107828, 261, 67107824, 262, 67107820, 263, 67107816, 264, 67107812, 265, 67107808, 266, 67107804, 267, 67107800, 268, 67107796, 269, 67107792, 270, 67107788, 271, 67107784, 272, 67107780, 273, 67107776, 274, 67107772, 275, 67107768, 276, 67107764, 277, 67107760, 278, 67107756, 279, 67107752, 280, 67107748, 281, 67107744, 282, 67107740, 283, 67107736, 284, 67107732, 285, 67107728, 286, 67107724, 287, 67107720, 288, 67107716, 289, 67107712, 290, 67107708, 291, 67107704, 292, 67107700, 293, 67107696, 294, 67107692, 295, 67107688, 296, 67107684, 297, 67107680, 298, 67107676, 299, 67107672, 300, 67107668, 301, 67107664, 302, 67107660, 1073742127, 67107656, 304, 67107656, 305, -1216, 306, 67107644, 307, 67107640, 308, 67107636, 309, 67107632, 310, 67107628, 311, 67107624, 312, 67107620, 313, 67107616, 314, 67107612, 315, 67107608, 316, 67107604, 317, 67107600, 318, 67107596, 319, 67107592, 320, 67107588, 321, 67107584, 322, 67107580, 323, 67107576, 324, 67107572, 325, 67107568, 326, 67107564, 327, 67107560, 328, 67107556, 329, 67107552, 330, 67107548, 331, 67107544, 332, 67107540, 333, 67107536, 334, 67107532, 335, 67107528, 336, 67107524, 337, 67107520, 338, 67107516, 339, 67107512, 340, 67107508, 341, 67107504, 342, 67107500, 343, 67107496, 344, 67107492, 345, 67107488, 346, 67107484, 347, 67107480, 348, 67107476, 349, 67107472, 350, 67107468, 351, 67107464, 352, 67107460, 353, 67107456, 354, 67107452, 355, 67107448, 356, 67107444, 357, 67107440, 358, 67107436, 359, 67107432, 360, 67107428, 361, 67107424, 362, 67107420, 363, 67107416, 364, 67107412, 365, 67107408, 366, 67107404, 367, 67107400, 368, 67107396, 369, 67107392, 370, 67107388, 371, 67107384, 372, 67107380, 373, 67107376, 374, 67107372, 375, 67107368, 376, 67107364, 377, 67107360, 378, 67107356, 379, 67107352, 380, 67107348, 381, 67107344, 382, 67107340, 383, 67107336, 384, 67107332, 385, 67107328, 386, 67107324, 387, 67107320, 388, 67107316, 389, 67107312, 390, 67107308, 391, 67107304, 1073742216, 67107300, 393, 67107300, 394, -1572, 395, 67107288, 396, 67107284, 397, 67107280, 398, 67107276, 399, 67107272, 400, 67107268, 401, 67107264, 402, 67107260, 403, 67107256, 404, 67107252, 405, 67107248, 406, 67107244, 407, 67107240, 408, 67107236, 409, 67107232, 410, 67107228, 411, 67107224, 412, 67107220, 413, 67107216, 414, 67107212, 415, 67107208, 416, 67107204, 417, 67107200, 418, 67107196, 419, 67107192, 420, 67107188, 421, 67107184, 422, 67107180, 423, 67107176, 424, 67107172, 1073742249, 67107168, 426, 67107168, 427, -1704, 428, 67107156, 429, 67107152, 430, 67107148, 431, 67107144, 1073742256, 67107140, 433, 67107140, 434, -1732, 435, 67107128, 436, 67107124, 437, 67107120, 438, 67107116, 439, 67107112, 440, 67107108, 1073742265, 67107104, 442, 67107104, 443, -1768, 444, 67107092, 445, 67107088, 446, 67107084, 447, 67107080, 448, 67107088, 1073742273, -1792, 451, -1792, 452, 67107060, 453, 67107056, 454, 67107052, 455, 67107048, 456, 67107044, 457, 67107040, 458, 67107036, 459, 67107032, 460, 67107028, 461, 67107024, 462, 67107020, 463, 67107016, 464, 67107012, 465, 67107008, 466, 67107004, 467, 67107000, 468, 67106996, 469, 67106992, 470, 67106988, 471, 67106984, 472, 67106980, 473, 67106976, 474, 67106972, 475, 67106968, 476, 67106964, 477, 67106960, 478, 67106956, 479, 67106952, 480, 67106948, 481, 67106944, 482, 67106940, 483, 67106936, 484, 67106932, 485, 67106928, 486, 67106924, 487, 67106920, 488, 67106916, 489, 67106912, 490, 67106908, 491, 67106904, 492, 67106900, 493, 67106896, 494, 67106892, 495, 67106888, 496, 67106884, 497, 67106880, 498, 67106876, 499, 67106872, 500, 67106868, 501, 67106864, 502, 67106860, 503, 67106856, 504, 67106852, 505, 67106848, 506, 67106844, 507, 67106840, 508, 67106836, 509, 67106832, 510, 67106828, 511, 67106824, 512, 67106820, 513, 67106816, 514, 67106812, 515, 67106808, 516, 67106804, 517, 67106800, 518, 67106796, 519, 67106792, 520, 67106788, 521, 67106784, 522, 67106780, 523, 67106776, 524, 67106772, 525, 67106768, 526, 67106764, 527, 67106760, 528, 67106756, 529, 67106752, 530, 67106748, 531, 67106744, 532, 67106740, 533, 67106736, 534, 67106732, 535, 67106728, 536, 67106724, 537, 67106720, 538, 67106716, 539, 67106712, 540, 67106708, 541, 67106704, 542, 67106700, 543, 67106696, 544, 67106692, 545, 67106688, 546, 67106684, 547, 67106680, 548, 67106676, 549, 67106672, 550, 67106668, 551, 67106664, 552, 67106660, 553, 67106656, 554, 67106652, 555, 67106648, 556, 67106644, 557, 67106640, 558, 67106636, 559, 67106632, 560, 67106628, 561, 67106624, 562, 67106620, 563, 67106616, 564, 67106632, 1073742389, -2256, 569, -2256, 570, 67106588, 571, 67106584, 572, 67106580, 573, 67106576, 1073742398, 67106572, 575, 67106572, 576, -2300, 577, 67106560, 578, 67106556, 579, 67106552, 580, 67106548, 581, 67106544, 582, 67106540, 583, 67106536, 584, 67106532, 585, 67106528, 586, 67106524, 587, 67106520, 588, 67106516, 589, 67106512, 590, 67106508, 591, 67106504, 592, 67106508, 1073742417, -2368, 594, -2368, 595, 67106488, 596, 67106484, 1073742421, 67106480, 598, 67106480, 599, -2392, 600, 67106468, 601, 67106464, 602, 67106460, 603, 67106456, 604, 67106464, 1073742429, -2416, 607, -2416, 1073742432, 67106436, 609, 67106436, 610, -2436, 611, 67106424, 612, 67106432, 1073742437, -2448, 615, -2448, 616, 67106404, 617, 67106400, 618, 67106396, 619, 67106392, 620, 67106396, 1073742445, -2480, 622, -2480, 1073742447, 67106376, 624, 67106376, 625, -2496, 1073742450, 67106364, 627, 67106364, 628, -2508, 629, 67106352, 630, 67106372, 1073742455, -2520, 636, -2520, 1073742461, 67106320, 638, 67106320, 639, -2552, 1073742464, 67106308, 641, 67106308, 642, -2564, 643, 67106296, 644, 67106304, 1073742469, -2576, 647, -2576, 648, 67106276, 1073742473, 67106272, 650, 67106272, 651, -2600, 652, 67106260, 653, 67106272, 1073742478, -2612, 657, -2612, 658, 67106236, 659, 67106940, 1073742484, -2636, 836, -2636, 837, 67105520, 838, 67105724, 1073742663, -3352, 879, -3352, 1073742708, -3352, 885, -3352, 890, -3352, 891, 67105312, 1073742716, -3564, 893, -3564, 894, 67105320, 1073742724, -3576, 901, -3576, 902, 67105260, 903, 67105256, 904, 67105260, 1073742729, -3616, 906, -3616, 908, 67105236, 910, 67105232, 911, -3640, 912, 67105220, 913, 67105216, 1073742738, 67105212, 915, 67105212, 916, -3660, 1073742741, 67105200, 918, 67105200, 919, -3672, 920, 67105188, 921, 67105184, 922, 67105180, 923, 67105176, 924, 67105172, 925, 67105176, 1073742750, -3700, 927, -3700, 928, 67105156, 929, 67105152, 1073742755, 67105144, 932, 67105144, 933, -3728, 934, 67105132, 935, 67105144, 1073742760, -3740, 939, -3740, 940, 67105108, 941, 67105112, 1073742766, -3764, 943, -3764, 944, 67105092, 945, 67105088, 1073742770, 67105084, 947, 67105084, 948, -3788, 1073742773, 67105072, 950, 67105072, 951, -3800, 952, 67105060, 953, 67105056, 954, 67105052, 955, 67105048, 956, 67105044, 957, 67105048, 1073742782, -3828, 959, -3828, 960, 67105028, 961, 67105024, 962, 67105020, 1073742787, 67105016, 964, 67105016, 965, -3856, 966, 67105004, 967, 67105016, 1073742792, -3868, 971, -3868, 1073742796, 67104980, 973, 67104980, 974, -3892, 976, 67104964, 977, 67104960, 978, 67104964, 1073742803, -3912, 980, -3912, 981, 67104944, 982, 67104940, 983, 67104936, 984, 67104932, 985, 67104928, 986, 67104924, 987, 67104920, 988, 67104916, 989, 67104912, 990, 67104908, 991, 67104904, 992, 67104900, 993, 67104896, 994, 67104892, 995, 67104888, 996, 67104884, 997, 67104880, 998, 67104876, 999, 67104872, 1000, 67104868, 1001, 67104864, 1002, 67104860, 1003, 67104856, 1004, 67104852, 1005, 67104848, 1006, 67104844, 1007, 67104840, 1008, 67104836, 1009, 67104832, 1073742834, 67104828, 1011, 67104828, 1012, -4044, 1013, 67104816, 1014, 67104812, 1015, 67104808, 1016, 67104804, 1017, 67104800, 1018, 67104796, 1019, 67104792, 1020, 67104788, 1021, 67104792, 1073742846, -4084, 1023, -4084, 1024, 67104832, 1073742849, -4096, 1039, -4096, 1040, 67104832, 1073742865, -4160, 1071, -4160, 1072, 67104704, 1073742897, -4288, 1103, -4288, 1104, 67104512, 1073742929, -4416, 1119, -4416, 1120, 67104388, 1121, 67104384, 1122, 67104380, 1123, 67104376, 1124, 67104372, 1125, 67104368, 1126, 67104364, 1127, 67104360, 1128, 67104356, 1129, 67104352, 1130, 67104348, 1131, 67104344, 1132, 67104340, 1133, 67104336, 1134, 67104332, 1135, 67104328, 1136, 67104324, 1137, 67104320, 1138, 67104316, 1139, 67104312, 1140, 67104308, 1141, 67104304, 1142, 67104300, 1143, 67104296, 1144, 67104292, 1145, 67104288, 1146, 67104284, 1147, 67104280, 1148, 67104276, 1149, 67104272, 1150, 67104268, 1151, 67104264, 1152, 67104260, 1153, 67104256, 1154, 67104280, 1073742979, -4616, 1158, -4616, 1073742984, -4616, 1161, -4616, 1162, 67104220, 1163, 67104216, 1164, 67104212, 1165, 67104208, 1166, 67104204, 1167, 67104200, 1168, 67104196, 1169, 67104192, 1170, 67104188, 1171, 67104184, 1172, 67104180, 1173, 67104176, 1174, 67104172, 1175, 67104168, 1176, 67104164, 1177, 67104160, 1178, 67104156, 1179, 67104152, 1180, 67104148, 1181, 67104144, 1182, 67104140, 1183, 67104136, 1184, 67104132, 1185, 67104128, 1186, 67104124, 1187, 67104120, 1188, 67104116, 1189, 67104112, 1190, 67104108, 1191, 67104104, 1192, 67104100, 1193, 67104096, 1194, 67104092, 1195, 67104088, 1196, 67104084, 1197, 67104080, 1198, 67104076, 1199, 67104072, 1200, 67104068, 1201, 67104064, 1202, 67104060, 1203, 67104056, 1204, 67104052, 1205, 67104048, 1206, 67104044, 1207, 67104040, 1208, 67104036, 1209, 67104032, 1210, 67104028, 1211, 67104024, 1212, 67104020, 1213, 67104016, 1214, 67104012, 1215, 67104008, 1216, 67104004, 1217, 67104000, 1218, 67103996, 1219, 67103992, 1220, 67103988, 1221, 67103984, 1222, 67103980, 1223, 67103976, 1224, 67103972, 1225, 67103968, 1226, 67103964, 1227, 67103960, 1228, 67103956, 1229, 67103952, 1230, 67103948, 1231, 67103944, 1232, 67103940, 1233, 67103936, 1234, 67103932, 1235, 67103928, 1236, 67103924, 1237, 67103920, 1238, 67103916, 1239, 67103912, 1240, 67103908, 1241, 67103904, 1242, 67103900, 1243, 67103896, 1244, 67103892, 1245, 67103888, 1246, 67103884, 1247, 67103880, 1248, 67103876, 1249, 67103872, 1250, 67103868, 1251, 67103864, 1252, 67103860, 1253, 67103856, 1254, 67103852, 1255, 67103848, 1256, 67103844, 1257, 67103840, 1258, 67103836, 1259, 67103832, 1260, 67103828, 1261, 67103824, 1262, 67103820, 1263, 67103816, 1264, 67103812, 1265, 67103808, 1266, 67103804, 1267, 67103800, 1268, 67103796, 1269, 67103792, 1270, 67103788, 1271, 67103784, 1272, 67103780, 1273, 67103776, 1274, 67103772, 1275, 67103768, 1276, 67103764, 1277, 67103760, 1278, 67103756, 1279, 67103752, 1280, 67103748, 1281, 67103744, 1282, 67103740, 1283, 67103736, 1284, 67103732, 1285, 67103728, 1286, 67103724, 1287, 67103720, 1288, 67103716, 1289, 67103712, 1290, 67103708, 1291, 67103704, 1292, 67103700, 1293, 67103696, 1294, 67103692, 1295, 67103688, 1296, 67103684, 1297, 67103680, 1298, 67103676, 1299, 67103672, 1329, 67103700, 1073743154, -5316, 1366, -5316, 1073743193, -5468, 1375, -5468, 1377, 67103508, 1073743202, -5508, 1414, -5508, 1415, 67114568, 1073743241, -5660, 1418, -5660, 1073743249, -5660, 1479, -5660, 1073743312, -5660, 1514, -5660, 1073743344, -5660, 1524, -5660, 1073743360, -5660, 1539, -5660, 1073743371, -5660, 1557, -5660, 1563, -5660, 1073743390, -5660, 1567, -5660, 1073743393, -5660, 1594, -5660, 1073743424, -5660, 1630, -5660, 1073743456, -5660, 1805, -5660, 1073743631, -5660, 1866, -5660, 1073743693, -5660, 1901, -5660, 1073743744, -5660, 1969, -5660, 1073743808, -5660, 2042, -5660, 1073744129, -5660, 2361, -5660, 1073744188, -5660, 2381, -5660, 1073744208, -5660, 2388, -5660, 1073744216, -5660, 2416, -5660, 1073744251, -5660, 2431, -5660, 1073744257, -5660, 2435, -5660, 1073744261, -5660, 2444, -5660, 1073744271, -5660, 2448, -5660, 1073744275, -5660, 2472, -5660, 1073744298, -5660, 2480, -5660, 2482, -5660, 1073744310, -5660, 2489, -5660, 1073744316, -5660, 2500, -5660, 1073744327, -5660, 2504, -5660, 1073744331, -5660, 2510, -5660, 2519, -5660, 1073744348, -5660, 2525, -5660, 1073744351, -5660, 2531, -5660, 1073744358, -5660, 2554, -5660, 1073744385, -5660, 2563, -5660, 1073744389, -5660, 2570, -5660, 1073744399, -5660, 2576, -5660, 1073744403, -5660, 2600, -5660, 1073744426, -5660, 2608, -5660, 1073744434, -5660, 2611, -5660, 1073744437, -5660, 2614, -5660, 1073744440, -5660, 2617, -5660, 2620, -5660, 1073744446, -5660, 2626, -5660, 1073744455, -5660, 2632, -5660, 1073744459, -5660, 2637, -5660, 1073744473, -5660, 2652, -5660, 2654, -5660, 1073744486, -5660, 2676, -5660, 1073744513, -5660, 2691, -5660, 1073744517, -5660, 2701, -5660, 1073744527, -5660, 2705, -5660, 1073744531, -5660, 2728, -5660, 1073744554, -5660, 2736, -5660, 1073744562, -5660, 2739, -5660, 1073744565, -5660, 2745, -5660, 1073744572, -5660, 2757, -5660, 1073744583, -5660, 2761, -5660, 1073744587, -5660, 2765, -5660, 2768, -5660, 1073744608, -5660, 2787, -5660, 1073744614, -5660, 2799, -5660, 2801, -5660, 1073744641, -5660, 2819, -5660, 1073744645, -5660, 2828, -5660, 1073744655, -5660, 2832, -5660, 1073744659, -5660, 2856, -5660, 1073744682, -5660, 2864, -5660, 1073744690, -5660, 2867, -5660, 1073744693, -5660, 2873, -5660, 1073744700, -5660, 2883, -5660, 1073744711, -5660, 2888, -5660, 1073744715, -5660, 2893, -5660, 1073744726, -5660, 2903, -5660, 1073744732, -5660, 2909, -5660, 1073744735, -5660, 2913, -5660, 1073744742, -5660, 2929, -5660, 1073744770, -5660, 2947, -5660, 1073744773, -5660, 2954, -5660, 1073744782, -5660, 2960, -5660, 1073744786, -5660, 2965, -5660, 1073744793, -5660, 2970, -5660, 2972, -5660, 1073744798, -5660, 2975, -5660, 1073744803, -5660, 2980, -5660, 1073744808, -5660, 2986, -5660, 1073744814, -5660, 3001, -5660, 1073744830, -5660, 3010, -5660, 1073744838, -5660, 3016, -5660, 1073744842, -5660, 3021, -5660, 3031, -5660, 1073744870, -5660, 3066, -5660, 1073744897, -5660, 3075, -5660, 1073744901, -5660, 3084, -5660, 1073744910, -5660, 3088, -5660, 1073744914, -5660, 3112, -5660, 1073744938, -5660, 3123, -5660, 1073744949, -5660, 3129, -5660, 1073744958, -5660, 3140, -5660, 1073744966, -5660, 3144, -5660, 1073744970, -5660, 3149, -5660, 1073744981, -5660, 3158, -5660, 1073744992, -5660, 3169, -5660, 1073744998, -5660, 3183, -5660, 1073745026, -5660, 3203, -5660, 1073745029, -5660, 3212, -5660, 1073745038, -5660, 3216, -5660, 1073745042, -5660, 3240, -5660, 1073745066, -5660, 3251, -5660, 1073745077, -5660, 3257, -5660, 1073745084, -5660, 3268, -5660, 1073745094, -5660, 3272, -5660, 1073745098, -5660, 3277, -5660, 1073745109, -5660, 3286, -5660, 3294, -5660, 1073745120, -5660, 3299, -5660, 1073745126, -5660, 3311, -5660, 1073745137, -5660, 3314, -5660, 1073745154, -5660, 3331, -5660, 1073745157, -5660, 3340, -5660, 1073745166, -5660, 3344, -5660, 1073745170, -5660, 3368, -5660, 1073745194, -5660, 3385, -5660, 1073745214, -5660, 3395, -5660, 1073745222, -5660, 3400, -5660, 1073745226, -5660, 3405, -5660, 3415, -5660, 1073745248, -5660, 3425, -5660, 1073745254, -5660, 3439, -5660, 1073745282, -5660, 3459, -5660, 1073745285, -5660, 3478, -5660, 1073745306, -5660, 3505, -5660, 1073745331, -5660, 3515, -5660, 3517, -5660, 1073745344, -5660, 3526, -5660, 3530, -5660, 1073745359, -5660, 3540, -5660, 3542, -5660, 1073745368, -5660, 3551, -5660, 1073745394, -5660, 3572, -5660, 1073745409, -5660, 3642, -5660, 1073745471, -5660, 3675, -5660, 1073745537, -5660, 3714, -5660, 3716, -5660, 1073745543, -5660, 3720, -5660, 3722, -5660, 3725, -5660, 1073745556, -5660, 3735, -5660, 1073745561, -5660, 3743, -5660, 1073745569, -5660, 3747, -5660, 3749, -5660, 3751, -5660, 1073745578, -5660, 3755, -5660, 1073745581, -5660, 3769, -5660, 1073745595, -5660, 3773, -5660, 1073745600, -5660, 3780, -5660, 3782, -5660, 1073745608, -5660, 3789, -5660, 1073745616, -5660, 3801, -5660, 1073745628, -5660, 3805, -5660, 1073745664, -5660, 3911, -5660, 1073745737, -5660, 3946, -5660, 1073745777, -5660, 3979, -5660, 1073745808, -5660, 3991, -5660, 1073745817, -5660, 4028, -5660, 1073745854, -5660, 4044, -5660, 1073745871, -5660, 4049, -5660, 1073745920, -5660, 4129, -5660, 1073745955, -5660, 4135, -5660, 1073745961, -5660, 4138, -5660, 1073745964, -5660, 4146, -5660, 1073745974, -5660, 4153, -5660, 1073745984, -5660, 4185, -5660, 4256, 67091992, 1073746081, -17024, 4293, -17024, 1073746128, -17176, 4348, -17176, 1073746176, -17176, 4441, -17176, 1073746271, -17176, 4514, -17176, 1073746344, -17176, 4601, -17176, 1073746432, -17176, 4680, -17176, 1073746506, -17176, 4685, -17176, 1073746512, -17176, 4694, -17176, 4696, -17176, 1073746522, -17176, 4701, -17176, 1073746528, -17176, 4744, -17176, 1073746570, -17176, 4749, -17176, 1073746576, -17176, 4784, -17176, 1073746610, -17176, 4789, -17176, 1073746616, -17176, 4798, -17176, 4800, -17176, 1073746626, -17176, 4805, -17176, 1073746632, -17176, 4822, -17176, 1073746648, -17176, 4880, -17176, 1073746706, -17176, 4885, -17176, 1073746712, -17176, 4954, -17176, 1073746783, -17176, 4988, -17176, 1073746816, -17176, 5017, -17176, 1073746848, -17176, 5108, -17176, 1073746945, -17176, 5750, -17176, 1073747584, -17176, 5788, -17176, 1073747616, -17176, 5872, -17176, 1073747712, -17176, 5900, -17176, 1073747726, -17176, 5908, -17176, 1073747744, -17176, 5942, -17176, 1073747776, -17176, 5971, -17176, 1073747808, -17176, 5996, -17176, 1073747822, -17176, 6000, -17176, 1073747826, -17176, 6003, -17176, 1073747840, -17176, 6109, -17176, 1073747936, -17176, 6121, -17176, 1073747952, -17176, 6137, -17176, 1073747968, -17176, 6158, -17176, 1073747984, -17176, 6169, -17176, 1073748000, -17176, 6263, -17176, 1073748096, -17176, 6313, -17176, 1073748224, -17176, 6428, -17176, 1073748256, -17176, 6443, -17176, 1073748272, -17176, 6459, -17176, 6464, -17176, 1073748292, -17176, 6509, -17176, 1073748336, -17176, 6516, -17176, 1073748352, -17176, 6569, -17176, 1073748400, -17176, 6601, -17176, 1073748432, -17176, 6617, -17176, 1073748446, -17176, 6683, -17176, 1073748510, -17176, 6687, -17176, 1073748736, -17176, 6987, -17176, 1073748816, -17176, 7036, -17176, 1073749248, -17176, 7548, -17176, 7549, 67078672, 7550, 67079184, 1073749375, -30200, 7626, -30200, 1073749502, -30200, 7679, -30200, 7680, 67078148, 7681, 67078144, 7682, 67078140, 7683, 67078136, 7684, 67078132, 7685, 67078128, 7686, 67078124, 7687, 67078120, 7688, 67078116, 7689, 67078112, 7690, 67078108, 7691, 67078104, 7692, 67078100, 7693, 67078096, 7694, 67078092, 7695, 67078088, 7696, 67078084, 7697, 67078080, 7698, 67078076, 7699, 67078072, 7700, 67078068, 7701, 67078064, 7702, 67078060, 7703, 67078056, 7704, 67078052, 7705, 67078048, 7706, 67078044, 7707, 67078040, 7708, 67078036, 7709, 67078032, 7710, 67078028, 7711, 67078024, 7712, 67078020, 7713, 67078016, 7714, 67078012, 7715, 67078008, 7716, 67078004, 7717, 67078000, 7718, 67077996, 7719, 67077992, 7720, 67077988, 7721, 67077984, 7722, 67077980, 7723, 67077976, 7724, 67077972, 7725, 67077968, 7726, 67077964, 7727, 67077960, 7728, 67077956, 7729, 67077952, 7730, 67077948, 7731, 67077944, 7732, 67077940, 7733, 67077936, 7734, 67077932, 7735, 67077928, 7736, 67077924, 7737, 67077920, 7738, 67077916, 7739, 67077912, 7740, 67077908, 7741, 67077904, 7742, 67077900, 7743, 67077896, 7744, 67077892, 7745, 67077888, 7746, 67077884, 7747, 67077880, 7748, 67077876, 7749, 67077872, 7750, 67077868, 7751, 67077864, 7752, 67077860, 7753, 67077856, 7754, 67077852, 7755, 67077848, 7756, 67077844, 7757, 67077840, 7758, 67077836, 7759, 67077832, 7760, 67077828, 7761, 67077824, 7762, 67077820, 7763, 67077816, 7764, 67077812, 7765, 67077808, 7766, 67077804, 7767, 67077800, 7768, 67077796, 7769, 67077792, 7770, 67077788, 7771, 67077784, 7772, 67077780, 7773, 67077776, 7774, 67077772, 7775, 67077768, 7776, 67077764, 7777, 67077760, 7778, 67077756, 7779, 67077752, 7780, 67077748, 7781, 67077744, 7782, 67077740, 7783, 67077736, 7784, 67077732, 7785, 67077728, 7786, 67077724, 7787, 67077720, 7788, 67077716, 7789, 67077712, 7790, 67077708, 7791, 67077704, 7792, 67077700, 7793, 67077696, 7794, 67077692, 7795, 67077688, 7796, 67077684, 7797, 67077680, 7798, 67077676, 7799, 67077672, 7800, 67077668, 7801, 67077664, 7802, 67077660, 7803, 67077656, 7804, 67077652, 7805, 67077648, 7806, 67077644, 7807, 67077640, 7808, 67077636, 7809, 67077632, 7810, 67077628, 7811, 67077624, 7812, 67077620, 7813, 67077616, 7814, 67077612, 7815, 67077608, 7816, 67077604, 7817, 67077600, 7818, 67077596, 7819, 67077592, 7820, 67077588, 7821, 67077584, 7822, 67077580, 7823, 67077576, 7824, 67077572, 7825, 67077568, 7826, 67077564, 7827, 67077560, 7828, 67077556, 7829, 67077552, 7830, 67077564, 1073749655, -31320, 7834, -31320, 7835, 67077528, 7840, 67077508, 7841, 67077504, 7842, 67077500, 7843, 67077496, 7844, 67077492, 7845, 67077488, 7846, 67077484, 7847, 67077480, 7848, 67077476, 7849, 67077472, 7850, 67077468, 7851, 67077464, 7852, 67077460, 7853, 67077456, 7854, 67077452, 7855, 67077448, 7856, 67077444, 7857, 67077440, 7858, 67077436, 7859, 67077432, 7860, 67077428, 7861, 67077424, 7862, 67077420, 7863, 67077416, 7864, 67077412, 7865, 67077408, 7866, 67077404, 7867, 67077400, 7868, 67077396, 7869, 67077392, 7870, 67077388, 7871, 67077384, 7872, 67077380, 7873, 67077376, 7874, 67077372, 7875, 67077368, 7876, 67077364, 7877, 67077360, 7878, 67077356, 7879, 67077352, 7880, 67077348, 7881, 67077344, 7882, 67077340, 7883, 67077336, 7884, 67077332, 7885, 67077328, 7886, 67077324, 7887, 67077320, 7888, 67077316, 7889, 67077312, 7890, 67077308, 7891, 67077304, 7892, 67077300, 7893, 67077296, 7894, 67077292, 7895, 67077288, 7896, 67077284, 7897, 67077280, 7898, 67077276, 7899, 67077272, 7900, 67077268, 7901, 67077264, 7902, 67077260, 7903, 67077256, 7904, 67077252, 7905, 67077248, 7906, 67077244, 7907, 67077240, 7908, 67077236, 7909, 67077232, 7910, 67077228, 7911, 67077224, 7912, 67077220, 7913, 67077216, 7914, 67077212, 7915, 67077208, 7916, 67077204, 7917, 67077200, 7918, 67077196, 7919, 67077192, 7920, 67077188, 7921, 67077184, 7922, 67077180, 7923, 67077176, 7924, 67077172, 7925, 67077168, 7926, 67077164, 7927, 67077160, 7928, 67077156, 7929, 67077152, 7936, 67077152, 1073749761, -31744, 7943, -31744, 7944, 67077120, 1073749769, -31776, 7951, -31776, 7952, 67077080, 1073749777, -31808, 7957, -31808, 7960, 67077048, 1073749785, -31840, 7965, -31840, 7968, 67077024, 1073749793, -31872, 7975, -31872, 7976, 67076992, 1073749801, -31904, 7983, -31904, 7984, 67076960, 1073749809, -31936, 7991, -31936, 7992, 67076928, 1073749817, -31968, 7999, -31968, 8000, 67076888, 1073749825, -32000, 8005, -32000, 8008, 67076856, 1073749833, -32032, 8013, -32032, 8016, -32056, 8017, 67076800, 8018, 67076796, 8019, 67076792, 8020, 67076788, 8021, 67076784, 8022, 67076780, 8023, 67076776, 8025, 67076768, 8027, 67076760, 8029, 67076752, 8031, 67076744, 8032, 67076768, 1073749857, -32128, 8039, -32128, 8040, 67076736, 1073749865, -32160, 8047, -32160, 8048, 67076680, 8049, -32192, 8050, 67076680, 1073749875, -32200, 8053, -32200, 8054, 67076656, 8055, -32216, 8056, 67076648, 8057, -32224, 8058, 67076640, 8059, -32232, 8060, 67076632, 8061, -32240, 1073749888, -32248, 8111, -32248, 8112, 67076424, 8113, -32448, 8114, 67076432, 1073749939, -32456, 8116, -32456, 1073749942, -32456, 8119, -32456, 8120, 67076392, 8121, -32480, 8122, 67076384, 8123, -32488, 8124, 67076376, 8125, -32496, 8126, 67076364, 8127, 67076392, 1073749952, -32508, 8132, -32508, 1073749958, -32508, 8135, -32508, 8136, 67076336, 1073749961, -32544, 8139, -32544, 8140, 67076320, 1073749965, -32560, 8143, -32560, 8144, 67076296, 8145, -32576, 8146, 67076304, 8147, -32584, 1073749974, -32584, 8151, -32584, 8152, 67076264, 8153, -32608, 8154, 67076256, 8155, -32616, 1073749981, -32624, 8159, -32624, 8160, 67076232, 8161, -32640, 8162, 67076228, 1073749987, -32648, 8164, -32648, 1073749989, 67076208, 8166, 67076208, 8167, -32664, 8168, 67076200, 8169, -32672, 8170, 67076192, 8171, -32680, 8172, 67076180, 8173, 67076216, 1073749998, -32692, 8175, -32692, 1073750002, -32692, 8180, -32692, 1073750006, -32692, 8183, -32692, 8184, 67076136, 8185, -32736, 8186, 67076128, 8187, -32744, 8188, 67077352, 1073750013, -32752, 8190, -32752, 1073750016, -32752, 8291, -32752, 1073750122, -32752, 8305, -32752, 1073750132, -32752, 8334, -32752, 1073750160, -32752, 8340, -32752, 1073750176, -32752, 8373, -32752, 1073750224, -32752, 8431, -32752, 1073750272, -32752, 8497, -32752, 8498, 67074876, 8499, 67074976, 1073750324, -33996, 8525, -33996, 8526, 67074764, 1073750355, -34108, 8543, -34108, 8544, 67074752, 1073750369, -34176, 8559, -34176, 8560, 67074688, 1073750385, -34240, 8575, -34240, 8576, 67074572, 1073750401, -34304, 8578, -34304, 8579, 67074552, 8580, 67074548, 1073750416, -34324, 9191, -34324, 1073751040, -34324, 9254, -34324, 1073751104, -34324, 9290, -34324, 1073751136, -34324, 9397, -34324, 9398, 67071376, 1073751223, -37592, 9423, -37592, 9424, 67071272, 1073751249, -37696, 9449, -37696, 9450, 67078320, 1073751275, -37800, 9884, -37800, 1073751712, -37800, 9906, -37800, 1073751809, -37800, 9988, -37800, 1073751814, -37800, 9993, -37800, 1073751820, -37800, 10023, -37800, 1073751849, -37800, 10059, -37800, 10061, -37800, 1073751887, -37800, 10066, -37800, 10070, -37800, 1073751896, -37800, 10078, -37800, 1073751905, -37800, 10132, -37800, 1073751960, -37800, 10159, -37800, 1073751985, -37800, 10174, -37800, 1073752000, -37800, 10186, -37800, 1073752016, -37800, 10219, -37800, 1073752048, -37800, 11034, -37800, 1073752864, -37800, 11043, -37800, 11264, 67063996, 1073753089, -45056, 11310, -45056, 11312, 67063804, 1073753137, -45248, 11358, -45248, 11360, 67063428, 11361, 67063424, 11362, 67063420, 11363, 67063416, 11364, 67063412, 11365, 67063408, 11366, 67063404, 11367, 67063400, 11368, 67063396, 11369, 67063392, 11370, 67063388, 11371, 67063384, 11372, 67063380, 11380, -45492, 11381, 67063344, 11382, 67063340, 11383, 67063368, 11392, 67063300, 11393, 67063296, 11394, 67063292, 11395, 67063288, 11396, 67063284, 11397, 67063280, 11398, 67063276, 11399, 67063272, 11400, 67063268, 11401, 67063264, 11402, 67063260, 11403, 67063256, 11404, 67063252, 11405, 67063248, 11406, 67063244, 11407, 67063240, 11408, 67063236, 11409, 67063232, 11410, 67063228, 11411, 67063224, 11412, 67063220, 11413, 67063216, 11414, 67063212, 11415, 67063208, 11416, 67063204, 11417, 67063200, 11418, 67063196, 11419, 67063192, 11420, 67063188, 11421, 67063184, 11422, 67063180, 11423, 67063176, 11424, 67063172, 11425, 67063168, 11426, 67063164, 11427, 67063160, 11428, 67063156, 11429, 67063152, 11430, 67063148, 11431, 67063144, 11432, 67063140, 11433, 67063136, 11434, 67063132, 11435, 67063128, 11436, 67063124, 11437, 67063120, 11438, 67063116, 11439, 67063112, 11440, 67063108, 11441, 67063104, 11442, 67063100, 11443, 67063096, 11444, 67063092, 11445, 67063088, 11446, 67063084, 11447, 67063080, 11448, 67063076, 11449, 67063072, 11450, 67063068, 11451, 67063064, 11452, 67063060, 11453, 67063056, 11454, 67063052, 11455, 67063048, 11456, 67063044, 11457, 67063040, 11458, 67063036, 11459, 67063032, 11460, 67063028, 11461, 67063024, 11462, 67063020, 11463, 67063016, 11464, 67063012, 11465, 67063008, 11466, 67063004, 11467, 67063000, 11468, 67062996, 11469, 67062992, 11470, 67062988, 11471, 67062984, 11472, 67062980, 11473, 67062976, 11474, 67062972, 11475, 67062968, 11476, 67062964, 11477, 67062960, 11478, 67062956, 11479, 67062952, 11480, 67062948, 11481, 67062944, 11482, 67062940, 11483, 67062936, 11484, 67062932, 11485, 67062928, 11486, 67062924, 11487, 67062920, 11488, 67062916, 11489, 67062912, 11490, 67062908, 11491, 67062904, 11492, 67063008, 1073753317, -45968, 11498, -45968, 1073753337, -45968, 11519, -45968, 11520, 67062936, 1073753345, -46080, 11557, -46080, 1073753392, -46232, 11621, -46232, 11631, -46232, 1073753472, -46232, 11670, -46232, 1073753504, -46232, 11686, -46232, 1073753512, -46232, 11694, -46232, 1073753520, -46232, 11702, -46232, 1073753528, -46232, 11710, -46232, 1073753536, -46232, 11718, -46232, 1073753544, -46232, 11726, -46232, 1073753552, -46232, 11734, -46232, 1073753560, -46232, 11742, -46232, 1073753600, -46232, 11799, -46232, 1073753628, -46232, 11805, -46232, 1073753728, -46232, 11929, -46232, 1073753755, -46232, 12019, -46232, 1073753856, -46232, 12245, -46232, 1073754096, -46232, 12283, -46232, 1073754112, -46232, 12351, -46232, 1073754177, -46232, 12438, -46232, 1073754265, -46232, 12543, -46232, 1073754373, -46232, 12588, -46232, 1073754417, -46232, 12686, -46232, 1073754512, -46232, 12727, -46232, 1073754560, -46232, 12751, -46232, 1073754608, -46232, 12830, -46232, 1073754656, -46232, 12867, -46232, 1073754704, -46232, 13054, -46232, 1073754880, -46232, 19893, -46232, 1073761728, -46232, 32767, -46232 }; // NOLINT
+static const MultiCharacterSpecialCase<1> kCanonicalizationRangeMultiStrings1[] = { {0, {0}} }; // NOLINT
+static const uint16_t kCanonicalizationRangeTable1Size = 88;
+static const int32_t kCanonicalizationRangeTable1[176] = { 1073741824, -46232, 8123, -46232, 1073750016, -46232, 9356, -46232, 1073751184, -46232, 9414, -46232, 1073751808, -46232, 10010, -46232, 1073751840, -46232, 10017, -46232, 1073752064, -46232, 10283, -46232, 1073752128, -46232, 10359, -46232, 1073753088, -46232, 22435, -46232, 1073764352, -46232, 31277, -46232, 1073773104, -46232, 31338, -46232, 1073773168, -46232, 31449, -46232, 1073773312, -46232, 31494, -46232, 1073773331, -46232, 31511, -46232, 1073773341, -46232, 31542, -46232, 1073773368, -46232, 31548, -46232, 31550, -46232, 1073773376, -46232, 31553, -46232, 1073773379, -46232, 31556, -46232, 1073773382, -46232, 31665, -46232, 1073773523, -46232, 32063, -46232, 1073773904, -46232, 32143, -46232, 1073773970, -46232, 32199, -46232, 1073774064, -46232, 32253, -46232, 1073774080, -46232, 32281, -46232, 1073774112, -46232, 32291, -46232, 1073774128, -46232, 32338, -46232, 1073774164, -46232, 32358, -46232, 1073774184, -46232, 32363, -46232, 1073774192, -46232, 32372, -46232, 1073774198, -46232, 32508, -46232, 32511, -46232, 1073774337, -46232, 32544, -46232, 32545, 66847716, 1073774370, -261252, 32570, -261252, 32571, 66847532, 1073774396, -261356, 32576, -261356, 32577, 66847588, 1073774402, -261380, 32602, -261380, 32603, 66848040, 1073774428, -261484, 32702, -261484, 1073774530, -261484, 32711, -261484, 1073774538, -261484, 32719, -261484, 1073774546, -261484, 32727, -261484, 1073774554, -261484, 32732, -261484, 1073774560, -261484, 32742, -261484, 1073774568, -261484, 32750, -261484, 1073774585, -261484, 32765, -261484 }; // NOLINT
+int CanonicalizationRange::Convert(uchar c,
+                      uchar n,
+                      uchar* result,
+                      bool* allow_caching_ptr) {
+  int chunk_index = c >> 15;
+  switch (chunk_index) {
+    case 0: return LookupMapping(kCanonicalizationRangeTable0,
+                                     kCanonicalizationRangeTable0Size,
+                                     kCanonicalizationRangeMultiStrings0,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    case 1: return LookupMapping(kCanonicalizationRangeTable1,
+                                     kCanonicalizationRangeTable1Size,
+                                     kCanonicalizationRangeMultiStrings1,
+                                     c,
+                                     n,
+                                     result,
+                                     allow_caching_ptr);
+    default: return 0;
+  }
+}
+
+
+uchar UnicodeData::kMaxCodePoint = 1114109;
+
+int UnicodeData::GetByteCount() {
+  return 0 + (sizeof(uint16_t) * kUppercaseTable0Size) + (sizeof(uint16_t) * kUppercaseTable1Size) + (sizeof(uint16_t) * kUppercaseTable2Size) + (sizeof(uint16_t) * kUppercaseTable3Size) + (sizeof(uint16_t) * kLowercaseTable0Size) + (sizeof(uint16_t) * kLowercaseTable1Size) + (sizeof(uint16_t) * kLowercaseTable2Size) + (sizeof(uint16_t) * kLowercaseTable3Size) + (sizeof(uint16_t) * kLetterTable0Size) + (sizeof(uint16_t) * kLetterTable1Size) + (sizeof(uint16_t) * kLetterTable2Size) + (sizeof(uint16_t) * kLetterTable3Size) + (sizeof(uint16_t) * kLetterTable4Size) + (sizeof(uint16_t) * kLetterTable5Size) + (sizeof(uint16_t) * kSpaceTable0Size) + (sizeof(uint16_t) * kNumberTable0Size) + (sizeof(uint16_t) * kNumberTable1Size) + (sizeof(uint16_t) * kNumberTable2Size) + (sizeof(uint16_t) * kNumberTable3Size) + (sizeof(uint16_t) * kWhiteSpaceTable0Size) + (sizeof(uint16_t) * kLineTerminatorTable0Size) + (sizeof(uint16_t) * kCombiningMarkTable0Size) + (sizeof(uint16_t) * kCombiningMarkTable1Size) + (sizeof(uint16_t) * kCombiningMarkTable2Size) + (sizeof(uint16_t) * kCombiningMarkTable3Size) + (sizeof(uint16_t) * kCombiningMarkTable28Size) + (sizeof(uint16_t) * kConnectorPunctuationTable0Size) + (sizeof(uint16_t) * kConnectorPunctuationTable1Size) + (sizeof(uint16_t) * kToLowercaseTable0Size) + (sizeof(uint16_t) * kToLowercaseTable1Size) + (sizeof(uint16_t) * kToLowercaseTable2Size) + (sizeof(uint16_t) * kToUppercaseTable0Size) + (sizeof(uint16_t) * kToUppercaseTable1Size) + (sizeof(uint16_t) * kToUppercaseTable2Size) + (sizeof(uint16_t) * kEcma262CanonicalizeTable0Size) + (sizeof(uint16_t) * kEcma262CanonicalizeTable1Size) + (sizeof(uint16_t) * kEcma262CanonicalizeTable2Size) + (sizeof(uint16_t) * kEcma262UnCanonicalizeTable0Size) + (sizeof(uint16_t) * kEcma262UnCanonicalizeTable1Size) + (sizeof(uint16_t) * kEcma262UnCanonicalizeTable2Size) + (sizeof(uint16_t) * kCanonicalizationRangeTable0Size) + (sizeof(uint16_t) * kCanonicalizationRangeTable1Size); // NOLINT
+}
+
+}  // namespace unicode
diff --git a/src/unicode.h b/src/unicode.h
new file mode 100644
index 0000000..f5e4210
--- /dev/null
+++ b/src/unicode.h
@@ -0,0 +1,279 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UNICODE_H_
+#define V8_UNICODE_H_
+
+#include <sys/types.h>
+
+/**
+ * \file
+ * Definitions and convenience functions for working with unicode.
+ */
+
+namespace unibrow {
+
+typedef unsigned int uchar;
+typedef unsigned char byte;
+
+/**
+ * The max length of the result of converting the case of a single
+ * character.
+ */
+static const int kMaxMappingSize = 4;
+
+template <class T, int size = 256>
+class Predicate {
+ public:
+  inline Predicate() { }
+  inline bool get(uchar c);
+ private:
+  friend class Test;
+  bool CalculateValue(uchar c);
+  struct CacheEntry {
+    inline CacheEntry() : code_point_(0), value_(0) { }
+    inline CacheEntry(uchar code_point, bool value)
+      : code_point_(code_point),
+        value_(value) { }
+    uchar code_point_ : 21;
+    bool value_ : 1;
+  };
+  static const int kSize = size;
+  static const int kMask = kSize - 1;
+  CacheEntry entries_[kSize];
+};
+
+// A cache used in case conversion.  It caches the value for characters
+// that either have no mapping or map to a single character independent
+// of context.  Characters that map to more than one character or that
+// map differently depending on context are always looked up.
+template <class T, int size = 256>
+class Mapping {
+ public:
+  inline Mapping() { }
+  inline int get(uchar c, uchar n, uchar* result);
+ private:
+  friend class Test;
+  int CalculateValue(uchar c, uchar n, uchar* result);
+  struct CacheEntry {
+    inline CacheEntry() : code_point_(kNoChar), offset_(0) { }
+    inline CacheEntry(uchar code_point, signed offset)
+      : code_point_(code_point),
+        offset_(offset) { }
+    uchar code_point_;
+    signed offset_;
+    static const int kNoChar = (1 << 21) - 1;
+  };
+  static const int kSize = size;
+  static const int kMask = kSize - 1;
+  CacheEntry entries_[kSize];
+};
+
+class UnicodeData {
+ private:
+  friend class Test;
+  static int GetByteCount();
+  static uchar kMaxCodePoint;
+};
+
+// --- U t f   8 ---
+
+template <typename Data>
+class Buffer {
+ public:
+  inline Buffer(Data data, unsigned length) : data_(data), length_(length) { }
+  inline Buffer() : data_(0), length_(0) { }
+  Data data() { return data_; }
+  unsigned length() { return length_; }
+ private:
+  Data data_;
+  unsigned length_;
+};
+
+class Utf8 {
+ public:
+  static inline uchar Length(uchar chr);
+  static inline unsigned Encode(char* out, uchar c);
+  static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
+      unsigned capacity, unsigned* chars_read, unsigned* offset);
+  static const uchar kBadChar = 0xFFFD;
+  static const unsigned kMaxEncodedSize   = 4;
+  static const unsigned kMaxOneByteChar   = 0x7f;
+  static const unsigned kMaxTwoByteChar   = 0x7ff;
+  static const unsigned kMaxThreeByteChar = 0xffff;
+  static const unsigned kMaxFourByteChar  = 0x1fffff;
+
+ private:
+  template <unsigned s> friend class Utf8InputBuffer;
+  friend class Test;
+  static inline uchar ValueOf(const byte* str,
+                              unsigned length,
+                              unsigned* cursor);
+  static uchar CalculateValue(const byte* str,
+                              unsigned length,
+                              unsigned* cursor);
+};
+
+// --- C h a r a c t e r   S t r e a m ---
+
+class CharacterStream {
+ public:
+  inline uchar GetNext();
+  inline bool has_more() { return remaining_ != 0; }
+  // Note that default implementation is not efficient.
+  virtual void Seek(unsigned);
+  unsigned Length();
+  virtual ~CharacterStream() { }
+  static inline bool EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
+      unsigned& offset);
+  static inline bool EncodeAsciiCharacter(uchar c, byte* buffer,
+      unsigned capacity, unsigned& offset);
+  static inline bool EncodeNonAsciiCharacter(uchar c, byte* buffer,
+      unsigned capacity, unsigned& offset);
+  static inline uchar DecodeCharacter(const byte* buffer, unsigned* offset);
+  virtual void Rewind() = 0;
+ protected:
+  virtual void FillBuffer() = 0;
+  // The number of characters left in the current buffer
+  unsigned remaining_;
+  // The current offset within the buffer
+  unsigned cursor_;
+  // The buffer containing the decoded characters.
+  const byte* buffer_;
+};
+
+// --- I n p u t   B u f f e r ---
+
+/**
+ * Provides efficient access to encoded characters in strings.  It
+ * does so by reading characters one block at a time, rather than one
+ * character at a time, which gives string implementations an
+ * opportunity to optimize the decoding.
+ */
+template <class Reader, class Input = Reader*, unsigned kSize = 256>
+class InputBuffer : public CharacterStream {
+ public:
+  virtual void Rewind();
+  inline void Reset(Input input);
+  void Seek(unsigned position);
+  inline void Reset(unsigned position, Input input);
+ protected:
+  InputBuffer() { }
+  explicit InputBuffer(Input input) { Reset(input); }
+  virtual void FillBuffer();
+
+  // A custom offset that can be used by the string implementation to
+  // mark progress within the encoded string.
+  unsigned offset_;
+  // The input string
+  Input input_;
+  // To avoid heap allocation, we keep an internal buffer to which
+  // the encoded string can write its characters.  The string
+  // implementation is free to decide whether it wants to use this
+  // buffer or not.
+  byte util_buffer_[kSize];
+};
+
+// --- U t f 8   I n p u t   B u f f e r ---
+
+template <unsigned s = 256>
+class Utf8InputBuffer : public InputBuffer<Utf8, Buffer<const char*>, s> {
+ public:
+  inline Utf8InputBuffer() { }
+  inline Utf8InputBuffer(const char* data, unsigned length);
+  inline void Reset(const char* data, unsigned length) {
+    InputBuffer<Utf8, Buffer<const char*>, s>::Reset(
+        Buffer<const char*>(data, length));
+  }
+};
+
+struct Uppercase {
+  static bool Is(uchar c);
+};
+struct Lowercase {
+  static bool Is(uchar c);
+};
+struct Letter {
+  static bool Is(uchar c);
+};
+struct Space {
+  static bool Is(uchar c);
+};
+struct Number {
+  static bool Is(uchar c);
+};
+struct WhiteSpace {
+  static bool Is(uchar c);
+};
+struct LineTerminator {
+  static bool Is(uchar c);
+};
+struct CombiningMark {
+  static bool Is(uchar c);
+};
+struct ConnectorPunctuation {
+  static bool Is(uchar c);
+};
+struct ToLowercase {
+  static const int kMaxWidth = 3;
+  static int Convert(uchar c,
+                     uchar n,
+                     uchar* result,
+                     bool* allow_caching_ptr);
+};
+struct ToUppercase {
+  static const int kMaxWidth = 3;
+  static int Convert(uchar c,
+                     uchar n,
+                     uchar* result,
+                     bool* allow_caching_ptr);
+};
+struct Ecma262Canonicalize {
+  static const int kMaxWidth = 1;
+  static int Convert(uchar c,
+                     uchar n,
+                     uchar* result,
+                     bool* allow_caching_ptr);
+};
+struct Ecma262UnCanonicalize {
+  static const int kMaxWidth = 4;
+  static int Convert(uchar c,
+                     uchar n,
+                     uchar* result,
+                     bool* allow_caching_ptr);
+};
+struct CanonicalizationRange {
+  static const int kMaxWidth = 1;
+  static int Convert(uchar c,
+                     uchar n,
+                     uchar* result,
+                     bool* allow_caching_ptr);
+};
+
+}  // namespace unibrow
+
+#endif  // V8_UNICODE_H_
diff --git a/src/uri.js b/src/uri.js
new file mode 100644
index 0000000..5af71b6
--- /dev/null
+++ b/src/uri.js
@@ -0,0 +1,415 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file contains support for URI manipulations written in
+// JavaScript.
+
+// Expect $String = global.String;
+
+// Lazily initialized.
+var hexCharArray = 0;
+var hexCharCodeArray = 0;
+
+
+function URIAddEncodedOctetToBuffer(octet, result, index) {
+  result[index++] = 37; // Char code of '%'.
+  result[index++] = hexCharCodeArray[octet >> 4];
+  result[index++] = hexCharCodeArray[octet & 0x0F];
+  return index;
+}
+
+
+function URIEncodeOctets(octets, result, index) {
+  if (hexCharCodeArray === 0) {
+    hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+                        65, 66, 67, 68, 69, 70];
+  }
+  index = URIAddEncodedOctetToBuffer(octets[0], result, index);
+  if (octets[1]) index = URIAddEncodedOctetToBuffer(octets[1], result, index);
+  if (octets[2]) index = URIAddEncodedOctetToBuffer(octets[2], result, index);
+  if (octets[3]) index = URIAddEncodedOctetToBuffer(octets[3], result, index);
+  return index;
+}
+
+
+function URIEncodeSingle(cc, result, index) {
+  var x = (cc >> 12) & 0xF;
+  var y = (cc >> 6) & 63;
+  var z = cc & 63;
+  var octets = new $Array(3);
+  if (cc <= 0x007F) {
+    octets[0] = cc;
+  } else if (cc <= 0x07FF) {
+    octets[0] = y + 192;
+    octets[1] = z + 128;
+  } else {
+    octets[0] = x + 224;
+    octets[1] = y + 128;
+    octets[2] = z + 128;
+  }
+  return URIEncodeOctets(octets, result, index);
+}
+
+
+function URIEncodePair(cc1 , cc2, result, index) {
+  var u = ((cc1 >> 6) & 0xF) + 1;
+  var w = (cc1 >> 2) & 0xF;
+  var x = cc1 & 3;
+  var y = (cc2 >> 6) & 0xF;
+  var z = cc2 & 63;
+  var octets = new $Array(4);
+  octets[0] = (u >> 2) + 240;
+  octets[1] = (((u & 3) << 4) | w) + 128;
+  octets[2] = ((x << 4) | y) + 128;
+  octets[3] = z + 128;
+  return URIEncodeOctets(octets, result, index);
+}
+
+
+function URIHexCharsToCharCode(ch1, ch2) {
+  if (HexValueOf(ch1) == -1 || HexValueOf(ch2) == -1) {
+    throw new $URIError("URI malformed");
+  }
+  return HexStrToCharCode(ch1 + ch2);
+}
+
+
+function URIDecodeOctets(octets, result, index) {
+  var value;
+  var o0 = octets[0];
+  if (o0 < 0x80) {
+    value = o0;
+  } else if (o0 < 0xc2) {
+    throw new $URIError("URI malformed");
+  } else {
+    var o1 = octets[1];
+    if (o0 < 0xe0) {
+      var a = o0 & 0x1f;
+      if ((o1 < 0x80) || (o1 > 0xbf))
+        throw new $URIError("URI malformed");
+      var b = o1 & 0x3f;
+      value = (a << 6) + b;
+      if (value < 0x80 || value > 0x7ff)
+        throw new $URIError("URI malformed");
+    } else {
+      var o2 = octets[2];
+      if (o0 < 0xf0) {
+        var a = o0 & 0x0f;
+        if ((o1 < 0x80) || (o1 > 0xbf))
+          throw new $URIError("URI malformed");
+        var b = o1 & 0x3f;
+        if ((o2 < 0x80) || (o2 > 0xbf))
+          throw new $URIError("URI malformed");
+        var c = o2 & 0x3f;
+        value = (a << 12) + (b << 6) + c;
+        if ((value < 0x800) || (value > 0xffff))
+          throw new $URIError("URI malformed");
+      } else {
+        var o3 = octets[3];
+        if (o0 < 0xf8) {
+          var a = (o0 & 0x07);
+          if ((o1 < 0x80) || (o1 > 0xbf))
+            throw new $URIError("URI malformed");
+          var b = (o1 & 0x3f);
+          if ((o2 < 0x80) || (o2 > 0xbf))
+            throw new $URIError("URI malformed");
+          var c = (o2 & 0x3f);
+          if ((o3 < 0x80) || (o3 > 0xbf))
+            throw new $URIError("URI malformed");
+          var d = (o3 & 0x3f);
+          value = (a << 18) + (b << 12) + (c << 6) + d;
+          if ((value < 0x10000) || (value > 0x10ffff))
+            throw new $URIError("URI malformed");
+        } else {
+          throw new $URIError("URI malformed");
+        }
+      }
+    }
+  }
+  if (value < 0x10000) {
+    result[index++] = value;
+    return index;
+  } else {
+    result[index++] = (value >> 10) + 0xd7c0;
+    result[index++] = (value & 0x3ff) + 0xdc00;
+    return index;
+  }
+}
+
+
+// ECMA-262, section 15.1.3
+function Encode(uri, unescape) {
+  var uriLength = uri.length;
+  var result = new $Array(uriLength);
+  var index = 0;
+  for (var k = 0; k < uriLength; k++) {
+    var cc1 = uri.charCodeAt(k);
+    if (unescape(cc1)) {
+      result[index++] = cc1;
+    } else {
+      if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed");
+      if (cc1 < 0xD800 || cc1 > 0xDBFF) {
+        index = URIEncodeSingle(cc1, result, index);
+      } else {
+        k++;
+        if (k == uriLength) throw new $URIError("URI malformed");
+        var cc2 = uri.charCodeAt(k);
+        if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed");
+        index = URIEncodePair(cc1, cc2, result, index);
+      }
+    }
+  }
+  return %StringFromCharCodeArray(result);
+}
+
+
+// ECMA-262, section 15.1.3
+function Decode(uri, reserved) {
+  var uriLength = uri.length;
+  var result = new $Array(uriLength);
+  var index = 0;
+  for (var k = 0; k < uriLength; k++) {
+    var ch = uri.charAt(k);
+    if (ch == '%') {
+      if (k + 2 >= uriLength) throw new $URIError("URI malformed");
+      var cc = URIHexCharsToCharCode(uri.charAt(++k), uri.charAt(++k));
+      if (cc >> 7) {
+        var n = 0;
+        while (((cc << ++n) & 0x80) != 0) ;
+        if (n == 1 || n > 4) throw new $URIError("URI malformed");
+        var octets = new $Array(n);
+        octets[0] = cc;
+        if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
+        for (var i = 1; i < n; i++) {
+          k++;
+          octets[i] = URIHexCharsToCharCode(uri.charAt(++k), uri.charAt(++k));
+        }
+        index = URIDecodeOctets(octets, result, index);
+      } else {
+        if (reserved(cc)) {
+          result[index++] = 37; // Char code of '%'.
+          result[index++] = uri.charCodeAt(k - 1);
+          result[index++] = uri.charCodeAt(k);
+        } else {
+          result[index++] = cc;
+        }
+      }
+    } else {
+      result[index++] = ch.charCodeAt(0);
+    }
+  }
+  result.length = index;
+  return %StringFromCharCodeArray(result);
+}
+
+
+// ECMA-262 - 15.1.3.1.
+function URIDecode(uri) {
+  function reservedPredicate(cc) {
+    // #$
+    if (35 <= cc && cc <= 36) return true;
+    // &
+    if (cc == 38) return true;
+    // +,
+    if (43 <= cc && cc <= 44) return true;
+    // /
+    if (cc == 47) return true;
+    // :;
+    if (58 <= cc && cc <= 59) return true;
+    // =
+    if (cc == 61) return true;
+    // ?@
+    if (63 <= cc && cc <= 64) return true;
+    
+    return false;
+  };
+  var string = ToString(uri);
+  return Decode(string, reservedPredicate);
+}
+
+
+// ECMA-262 - 15.1.3.2.
+function URIDecodeComponent(component) {
+  function reservedPredicate(cc) { return false; };
+  var string = ToString(component);
+  return Decode(string, reservedPredicate);
+}
+
+
+// Does the char code correspond to an alpha-numeric char.
+function isAlphaNumeric(cc) {
+  // a - z
+  if (97 <= cc && cc <= 122) return true;
+  // A - Z
+  if (65 <= cc && cc <= 90) return true;
+  // 0 - 9
+  if (48 <= cc && cc <= 57) return true;
+  
+  return false;
+}
+
+
+// ECMA-262 - 15.1.3.3.
+function URIEncode(uri) {
+  function unescapePredicate(cc) {
+    if (isAlphaNumeric(cc)) return true;
+    // !
+    if (cc == 33) return true;
+    // #$
+    if (35 <= cc && cc <= 36) return true;
+    // &'()*+,-./
+    if (38 <= cc && cc <= 47) return true;
+    // :;
+    if (58 <= cc && cc <= 59) return true;
+    // =
+    if (cc == 61) return true;
+    // ?@
+    if (63 <= cc && cc <= 64) return true;
+    // _
+    if (cc == 95) return true;
+    // ~
+    if (cc == 126) return true;
+    
+    return false;
+  };
+
+  var string = ToString(uri);
+  return Encode(string, unescapePredicate);
+}
+
+
+// ECMA-262 - 15.1.3.4
+function URIEncodeComponent(component) {
+  function unescapePredicate(cc) {
+    if (isAlphaNumeric(cc)) return true;
+    // !
+    if (cc == 33) return true;
+    // '()*
+    if (39 <= cc && cc <= 42) return true;
+    // -.
+    if (45 <= cc && cc <= 46) return true;
+    // _
+    if (cc == 95) return true;
+    // ~
+    if (cc == 126) return true;
+    
+    return false;
+  };
+
+  var string = ToString(component);
+  return Encode(string, unescapePredicate);
+}
+
+
+function HexValueOf(c) {
+  var code = c.charCodeAt(0);
+  
+  // 0-9
+  if (code >= 48 && code <= 57) return code - 48;
+  // A-F
+  if (code >= 65 && code <= 70) return code - 55;
+  // a-f
+  if (code >= 97 && code <= 102) return code - 87;
+  
+  return -1;
+}
+
+
+// Convert a character code to 4-digit hex string representation
+// 64 -> 0040, 62234 -> F31A.
+function CharCodeToHex4Str(cc) {
+  var r = "";
+  if (hexCharArray === 0) {
+    hexCharArray = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
+                    "A", "B", "C", "D", "E", "F"];
+  }
+  for (var i = 0; i < 4; ++i) {
+    var c = hexCharArray[cc & 0x0F];
+    r = c + r;
+    cc = cc >>> 4;
+  }
+  return r;
+}
+
+
+// Converts hex string to char code. Not efficient.
+function HexStrToCharCode(s) {
+  var m = 0;
+  var r = 0;
+  for (var i = s.length - 1; i >= 0; --i) {
+    r = r + (HexValueOf(s.charAt(i)) << m);
+    m = m + 4;
+  }
+  return r;
+}
+
+
+// Returns true if all digits in string s are valid hex numbers
+function IsValidHex(s) {
+  for (var i = 0; i < s.length; ++i) {
+    var cc = s.charCodeAt(i);
+    if ((48 <= cc && cc <= 57) || (65 <= cc && cc <= 70) || (97 <= cc && cc <= 102)) {
+      // '0'..'9', 'A'..'F' and 'a' .. 'f'.
+    } else {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+// ECMA-262 - B.2.1.
+function URIEscape(str) {
+  var s = ToString(str);
+  return %URIEscape(s);
+}
+
+
+// ECMA-262 - B.2.2.
+function URIUnescape(str) {
+  var s = ToString(str);
+  return %URIUnescape(s);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetupURI() {
+  // Setup non-enumerable URI functions on the global object and set
+  // their names.
+  InstallFunctions(global, DONT_ENUM, $Array(
+    "escape", URIEscape,
+    "unescape", URIUnescape,
+    "decodeURI", URIDecode,
+    "decodeURIComponent", URIDecodeComponent,
+    "encodeURI", URIEncode,
+    "encodeURIComponent", URIEncodeComponent
+  ));
+}
+
+SetupURI();
+
diff --git a/src/usage-analyzer.cc b/src/usage-analyzer.cc
new file mode 100644
index 0000000..23a4d9f
--- /dev/null
+++ b/src/usage-analyzer.cc
@@ -0,0 +1,415 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "scopes.h"
+#include "usage-analyzer.h"
+
+namespace v8 {
+namespace internal {
+
+// Weight boundaries
+static const int MinWeight = 1;
+static const int MaxWeight = 1000000;
+static const int InitialWeight = 100;
+
+
+class UsageComputer: public AstVisitor {
+ public:
+  static bool Traverse(AstNode* node);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  void VisitVariable(Variable* var);
+
+ private:
+  int weight_;
+  bool is_write_;
+
+  UsageComputer(int weight, bool is_write);
+  virtual ~UsageComputer();
+
+  // Helper functions
+  void RecordUses(UseCount* uses);
+  void Read(Expression* x);
+  void Write(Expression* x);
+  void ReadList(ZoneList<Expression*>* list);
+  void ReadList(ZoneList<ObjectLiteral::Property*>* list);
+
+  friend class WeightScaler;
+};
+
+
+class WeightScaler BASE_EMBEDDED {
+ public:
+  WeightScaler(UsageComputer* uc, float scale);
+  ~WeightScaler();
+
+ private:
+  UsageComputer* uc_;
+  int old_weight_;
+};
+
+
+// ----------------------------------------------------------------------------
+// Implementation of UsageComputer
+
+bool UsageComputer::Traverse(AstNode* node) {
+  UsageComputer uc(InitialWeight, false);
+  uc.Visit(node);
+  return !uc.HasStackOverflow();
+}
+
+
+void UsageComputer::VisitBlock(Block* node) {
+  VisitStatements(node->statements());
+}
+
+
+void UsageComputer::VisitDeclaration(Declaration* node) {
+  Write(node->proxy());
+  if (node->fun() != NULL)
+    VisitFunctionLiteral(node->fun());
+}
+
+
+void UsageComputer::VisitExpressionStatement(ExpressionStatement* node) {
+  Visit(node->expression());
+}
+
+
+void UsageComputer::VisitEmptyStatement(EmptyStatement* node) {
+  // nothing to do
+}
+
+
+void UsageComputer::VisitIfStatement(IfStatement* node) {
+  Read(node->condition());
+  { WeightScaler ws(this, 0.5);  // executed 50% of the time
+    Visit(node->then_statement());
+    Visit(node->else_statement());
+  }
+}
+
+
+void UsageComputer::VisitContinueStatement(ContinueStatement* node) {
+  // nothing to do
+}
+
+
+void UsageComputer::VisitBreakStatement(BreakStatement* node) {
+  // nothing to do
+}
+
+
+void UsageComputer::VisitReturnStatement(ReturnStatement* node) {
+  Read(node->expression());
+}
+
+
+void UsageComputer::VisitWithEnterStatement(WithEnterStatement* node) {
+  Read(node->expression());
+}
+
+
+void UsageComputer::VisitWithExitStatement(WithExitStatement* node) {
+  // nothing to do
+}
+
+
+void UsageComputer::VisitSwitchStatement(SwitchStatement* node) {
+  Read(node->tag());
+  ZoneList<CaseClause*>* cases = node->cases();
+  for (int i = cases->length(); i-- > 0;) {
+    WeightScaler ws(this, static_cast<float>(1.0 / cases->length()));
+    CaseClause* clause = cases->at(i);
+    if (!clause->is_default())
+      Read(clause->label());
+    VisitStatements(clause->statements());
+  }
+}
+
+
+void UsageComputer::VisitLoopStatement(LoopStatement* node) {
+  if (node->init() != NULL)
+    Visit(node->init());
+  { WeightScaler ws(this, 10.0);  // executed in each iteration
+    if (node->cond() != NULL)
+      Read(node->cond());
+    if (node->next() != NULL)
+      Visit(node->next());
+    Visit(node->body());
+  }
+}
+
+
+void UsageComputer::VisitForInStatement(ForInStatement* node) {
+  WeightScaler ws(this, 10.0);
+  Write(node->each());
+  Read(node->enumerable());
+  Visit(node->body());
+}
+
+
+void UsageComputer::VisitTryCatch(TryCatch* node) {
+  Visit(node->try_block());
+  { WeightScaler ws(this, 0.25);
+    Write(node->catch_var());
+    Visit(node->catch_block());
+  }
+}
+
+
+void UsageComputer::VisitTryFinally(TryFinally* node) {
+  Visit(node->try_block());
+  Visit(node->finally_block());
+}
+
+
+void UsageComputer::VisitDebuggerStatement(DebuggerStatement* node) {
+}
+
+
+void UsageComputer::VisitFunctionLiteral(FunctionLiteral* node) {
+  ZoneList<Declaration*>* decls = node->scope()->declarations();
+  for (int i = 0; i < decls->length(); i++) VisitDeclaration(decls->at(i));
+  VisitStatements(node->body());
+}
+
+
+void UsageComputer::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* node) {
+  // Do nothing.
+}
+
+
+void UsageComputer::VisitConditional(Conditional* node) {
+  Read(node->condition());
+  { WeightScaler ws(this, 0.5);
+    Read(node->then_expression());
+    Read(node->else_expression());
+  }
+}
+
+
+void UsageComputer::VisitSlot(Slot* node) {
+  UNREACHABLE();
+}
+
+
+void UsageComputer::VisitVariable(Variable* node) {
+  RecordUses(node->var_uses());
+}
+
+
+void UsageComputer::VisitVariableProxy(VariableProxy* node) {
+  // The proxy may refer to a variable in which case it was bound via
+  // VariableProxy::BindTo.
+  RecordUses(node->var_uses());
+}
+
+
+void UsageComputer::VisitLiteral(Literal* node) {
+  // nothing to do
+}
+
+void UsageComputer::VisitRegExpLiteral(RegExpLiteral* node) {
+  // nothing to do
+}
+
+
+void UsageComputer::VisitObjectLiteral(ObjectLiteral* node) {
+  ReadList(node->properties());
+}
+
+
+void UsageComputer::VisitArrayLiteral(ArrayLiteral* node) {
+  ReadList(node->values());
+}
+
+
+void UsageComputer::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  Read(node->value());
+}
+
+
+void UsageComputer::VisitAssignment(Assignment* node) {
+  if (node->op() != Token::ASSIGN)
+    Read(node->target());
+  Write(node->target());
+  Read(node->value());
+}
+
+
+void UsageComputer::VisitThrow(Throw* node) {
+  Read(node->exception());
+}
+
+
+void UsageComputer::VisitProperty(Property* node) {
+  // In any case (read or write) we read both the
+  // node's object and the key.
+  Read(node->obj());
+  Read(node->key());
+  // If the node's object is a variable proxy,
+  // we have a 'simple' object property access. We count
+  // the access via the variable or proxy's object uses.
+  VariableProxy* proxy = node->obj()->AsVariableProxy();
+  if (proxy != NULL) {
+    RecordUses(proxy->obj_uses());
+  }
+}
+
+
+void UsageComputer::VisitCall(Call* node) {
+  Read(node->expression());
+  ReadList(node->arguments());
+}
+
+
+void UsageComputer::VisitCallNew(CallNew* node) {
+  Read(node->expression());
+  ReadList(node->arguments());
+}
+
+
+void UsageComputer::VisitCallRuntime(CallRuntime* node) {
+  ReadList(node->arguments());
+}
+
+
+void UsageComputer::VisitUnaryOperation(UnaryOperation* node) {
+  Read(node->expression());
+}
+
+
+void UsageComputer::VisitCountOperation(CountOperation* node) {
+  Read(node->expression());
+  Write(node->expression());
+}
+
+
+void UsageComputer::VisitBinaryOperation(BinaryOperation* node) {
+  Read(node->left());
+  Read(node->right());
+}
+
+
+void UsageComputer::VisitCompareOperation(CompareOperation* node) {
+  Read(node->left());
+  Read(node->right());
+}
+
+
+void UsageComputer::VisitThisFunction(ThisFunction* node) {
+}
+
+
+UsageComputer::UsageComputer(int weight, bool is_write) {
+  weight_ = weight;
+  is_write_ = is_write;
+}
+
+
+UsageComputer::~UsageComputer() {
+  // nothing to do
+}
+
+
+void UsageComputer::RecordUses(UseCount* uses) {
+  if (is_write_)
+    uses->RecordWrite(weight_);
+  else
+    uses->RecordRead(weight_);
+}
+
+
+void UsageComputer::Read(Expression* x) {
+  if (is_write_) {
+    UsageComputer uc(weight_, false);
+    uc.Visit(x);
+  } else {
+    Visit(x);
+  }
+}
+
+
+void UsageComputer::Write(Expression* x) {
+  if (!is_write_) {
+    UsageComputer uc(weight_, true);
+    uc.Visit(x);
+  } else {
+    Visit(x);
+  }
+}
+
+
+void UsageComputer::ReadList(ZoneList<Expression*>* list) {
+  for (int i = list->length(); i-- > 0; )
+    Read(list->at(i));
+}
+
+
+void UsageComputer::ReadList(ZoneList<ObjectLiteral::Property*>* list) {
+  for (int i = list->length(); i-- > 0; )
+    Read(list->at(i)->value());
+}
+
+
+// ----------------------------------------------------------------------------
+// Implementation of WeightScaler
+
+WeightScaler::WeightScaler(UsageComputer* uc, float scale) {
+  uc_ = uc;
+  old_weight_ = uc->weight_;
+  int new_weight = static_cast<int>(uc->weight_ * scale);
+  if (new_weight <= 0) new_weight = MinWeight;
+  else if (new_weight > MaxWeight) new_weight = MaxWeight;
+  uc->weight_ = new_weight;
+}
+
+
+WeightScaler::~WeightScaler() {
+  uc_->weight_ = old_weight_;
+}
+
+
+// ----------------------------------------------------------------------------
+// Interface to variable usage analysis
+
+bool AnalyzeVariableUsage(FunctionLiteral* lit) {
+  if (!FLAG_usage_computation) return true;
+  HistogramTimerScope timer(&Counters::usage_analysis);
+  return UsageComputer::Traverse(lit);
+}
+
+} }  // namespace v8::internal
diff --git a/src/usage-analyzer.h b/src/usage-analyzer.h
new file mode 100644
index 0000000..1b0ea4a
--- /dev/null
+++ b/src/usage-analyzer.h
@@ -0,0 +1,40 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_USAGE_ANALYZER_H_
+#define V8_USAGE_ANALYZER_H_
+
+namespace v8 {
+namespace internal {
+
+// Compute usage counts for all variables.
+// Used for variable allocation.
+bool AnalyzeVariableUsage(FunctionLiteral* lit);
+
+} }  // namespace v8::internal
+
+#endif  // V8_USAGE_ANALYZER_H_
diff --git a/src/utils.cc b/src/utils.cc
new file mode 100644
index 0000000..3c684b8
--- /dev/null
+++ b/src/utils.cc
@@ -0,0 +1,312 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "platform.h"
+
+#include "sys/stat.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
+// figure 3-3, page 48, where the function is called clp2.
+uint32_t RoundUpToPowerOf2(uint32_t x) {
+  x = x - 1;
+  x = x | (x >> 1);
+  x = x | (x >> 2);
+  x = x | (x >> 4);
+  x = x | (x >> 8);
+  x = x | (x >> 16);
+  return x + 1;
+}
+
+
+byte* EncodeInt(byte* p, int x) {
+  while (x < -64 || x >= 64) {
+    *p++ = static_cast<byte>(x & 127);
+    x = ArithmeticShiftRight(x, 7);
+  }
+  // -64 <= x && x < 64
+  *p++ = static_cast<byte>(x + 192);
+  return p;
+}
+
+
+byte* DecodeInt(byte* p, int* x) {
+  int r = 0;
+  unsigned int s = 0;
+  byte b = *p++;
+  while (b < 128) {
+    r |= static_cast<int>(b) << s;
+    s += 7;
+    b = *p++;
+  }
+  // b >= 128
+  *x = r | ((static_cast<int>(b) - 192) << s);
+  return p;
+}
+
+
+byte* EncodeUnsignedIntBackward(byte* p, unsigned int x) {
+  while (x >= 128) {
+    *--p = static_cast<byte>(x & 127);
+    x = x >> 7;
+  }
+  // x < 128
+  *--p = static_cast<byte>(x + 128);
+  return p;
+}
+
+
+// Thomas Wang, Integer Hash Functions.
+// http://www.concentric.net/~Ttwang/tech/inthash.htm
+uint32_t ComputeIntegerHash(uint32_t key) {
+  uint32_t hash = key;
+  hash = ~hash + (hash << 15);  // hash = (hash << 15) - hash - 1;
+  hash = hash ^ (hash >> 12);
+  hash = hash + (hash << 2);
+  hash = hash ^ (hash >> 4);
+  hash = hash * 2057;  // hash = (hash + (hash << 3)) + (hash << 11);
+  hash = hash ^ (hash >> 16);
+  return hash;
+}
+
+
+void PrintF(const char* format, ...) {
+  va_list arguments;
+  va_start(arguments, format);
+  OS::VPrint(format, arguments);
+  va_end(arguments);
+}
+
+
+void Flush() {
+  fflush(stdout);
+}
+
+
+char* ReadLine(const char* prompt) {
+  char* result = NULL;
+  char line_buf[256];
+  int offset = 0;
+  bool keep_going = true;
+  fprintf(stdout, "%s", prompt);
+  fflush(stdout);
+  while (keep_going) {
+    if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
+      // fgets got an error. Just give up.
+      if (result != NULL) {
+        DeleteArray(result);
+      }
+      return NULL;
+    }
+    int len = strlen(line_buf);
+    if (len > 1 &&
+        line_buf[len - 2] == '\\' &&
+        line_buf[len - 1] == '\n') {
+      // When we read a line that ends with a "\" we remove the escape and
+      // append the remainder.
+      line_buf[len - 2] = '\n';
+      line_buf[len - 1] = 0;
+      len -= 1;
+    } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
+      // Since we read a new line we are done reading the line. This
+      // will exit the loop after copying this buffer into the result.
+      keep_going = false;
+    }
+    if (result == NULL) {
+      // Allocate the initial result and make room for the terminating '\0'
+      result = NewArray<char>(len + 1);
+    } else {
+      // Allocate a new result with enough room for the new addition.
+      int new_len = offset + len + 1;
+      char* new_result = NewArray<char>(new_len);
+      // Copy the existing input into the new array and set the new
+      // array as the result.
+      memcpy(new_result, result, offset * kCharSize);
+      DeleteArray(result);
+      result = new_result;
+    }
+    // Copy the newly read line into the result.
+    memcpy(result + offset, line_buf, len * kCharSize);
+    offset += len;
+  }
+  ASSERT(result != NULL);
+  result[offset] = '\0';
+  return result;
+}
+
+
+char* ReadCharsFromFile(const char* filename,
+                        int* size,
+                        int extra_space,
+                        bool verbose) {
+  FILE* file = OS::FOpen(filename, "rb");
+  if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
+    if (verbose) {
+      OS::PrintError("Cannot read from file %s.\n", filename);
+    }
+    return NULL;
+  }
+
+  // Get the size of the file and rewind it.
+  *size = ftell(file);
+  rewind(file);
+
+  char* result = NewArray<char>(*size + extra_space);
+  for (int i = 0; i < *size;) {
+    int read = fread(&result[i], 1, *size - i, file);
+    if (read <= 0) {
+      fclose(file);
+      DeleteArray(result);
+      return NULL;
+    }
+    i += read;
+  }
+  fclose(file);
+  return result;
+}
+
+
+byte* ReadBytes(const char* filename, int* size, bool verbose) {
+  char* chars = ReadCharsFromFile(filename, size, 0, verbose);
+  return reinterpret_cast<byte*>(chars);
+}
+
+
+Vector<const char> ReadFile(const char* filename,
+                            bool* exists,
+                            bool verbose) {
+  int size;
+  char* result = ReadCharsFromFile(filename, &size, 1, verbose);
+  if (!result) {
+    *exists = false;
+    return Vector<const char>::empty();
+  }
+  result[size] = '\0';
+  *exists = true;
+  return Vector<const char>(result, size);
+}
+
+
+int WriteCharsToFile(const char* str, int size, FILE* f) {
+  int total = 0;
+  while (total < size) {
+    int write = fwrite(str, 1, size - total, f);
+    if (write == 0) {
+      return total;
+    }
+    total += write;
+    str += write;
+  }
+  return total;
+}
+
+
+int WriteChars(const char* filename,
+               const char* str,
+               int size,
+               bool verbose) {
+  FILE* f = OS::FOpen(filename, "wb");
+  if (f == NULL) {
+    if (verbose) {
+      OS::PrintError("Cannot open file %s for writing.\n", filename);
+    }
+    return 0;
+  }
+  int written = WriteCharsToFile(str, size, f);
+  fclose(f);
+  return written;
+}
+
+
+int WriteBytes(const char* filename,
+               const byte* bytes,
+               int size,
+               bool verbose) {
+  const char* str = reinterpret_cast<const char*>(bytes);
+  return WriteChars(filename, str, size, verbose);
+}
+
+
+StringBuilder::StringBuilder(int size) {
+  buffer_ = Vector<char>::New(size);
+  position_ = 0;
+}
+
+
+void StringBuilder::AddString(const char* s) {
+  AddSubstring(s, strlen(s));
+}
+
+
+void StringBuilder::AddSubstring(const char* s, int n) {
+  ASSERT(!is_finalized() && position_ + n < buffer_.length());
+  ASSERT(static_cast<size_t>(n) <= strlen(s));
+  memcpy(&buffer_[position_], s, n * kCharSize);
+  position_ += n;
+}
+
+
+void StringBuilder::AddFormatted(const char* format, ...) {
+  ASSERT(!is_finalized() && position_ < buffer_.length());
+  va_list args;
+  va_start(args, format);
+  int n = OS::VSNPrintF(buffer_ + position_, format, args);
+  va_end(args);
+  if (n < 0 || n >= (buffer_.length() - position_)) {
+    position_ = buffer_.length();
+  } else {
+    position_ += n;
+  }
+}
+
+
+void StringBuilder::AddPadding(char c, int count) {
+  for (int i = 0; i < count; i++) {
+    AddCharacter(c);
+  }
+}
+
+
+char* StringBuilder::Finalize() {
+  ASSERT(!is_finalized() && position_ < buffer_.length());
+  buffer_[position_] = '\0';
+  // Make sure nobody managed to add a 0-character to the
+  // buffer while building the string.
+  ASSERT(strlen(buffer_.start()) == static_cast<size_t>(position_));
+  position_ = -1;
+  ASSERT(is_finalized());
+  return buffer_.start();
+}
+
+} }  // namespace v8::internal
diff --git a/src/utils.h b/src/utils.h
new file mode 100644
index 0000000..275dbb5
--- /dev/null
+++ b/src/utils.h
@@ -0,0 +1,581 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UTILS_H_
+#define V8_UTILS_H_
+
+#include <stdlib.h>
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// General helper functions
+
+// Returns true iff x is a power of 2.  Does not work for zero.
+template <typename T>
+static inline bool IsPowerOf2(T x) {
+  return (x & (x - 1)) == 0;
+}
+
+
+// The C++ standard leaves the semantics of '>>' undefined for
+// negative signed operands. Most implementations do the right thing,
+// though.
+static inline int ArithmeticShiftRight(int x, int s) {
+  return x >> s;
+}
+
+
+// Compute the 0-relative offset of some absolute value x of type T.
+// This allows conversion of Addresses and integral types into
+// 0-relative int offsets.
+template <typename T>
+static inline intptr_t OffsetFrom(T x) {
+  return x - static_cast<T>(0);
+}
+
+
+// Compute the absolute value of type T for some 0-relative offset x.
+// This allows conversion of 0-relative int offsets into Addresses and
+// integral types.
+template <typename T>
+static inline T AddressFrom(intptr_t x) {
+  return static_cast<T>(0) + x;
+}
+
+
+// Return the largest multiple of m which is <= x.
+template <typename T>
+static inline T RoundDown(T x, int m) {
+  ASSERT(IsPowerOf2(m));
+  return AddressFrom<T>(OffsetFrom(x) & -m);
+}
+
+
+// Return the smallest multiple of m which is >= x.
+template <typename T>
+static inline T RoundUp(T x, int m) {
+  return RoundDown(x + m - 1, m);
+}
+
+
+template <typename T>
+static int Compare(const T& a, const T& b) {
+  if (a == b)
+    return 0;
+  else if (a < b)
+    return -1;
+  else
+    return 1;
+}
+
+
+template <typename T>
+static int PointerValueCompare(const T* a, const T* b) {
+  return Compare<T>(*a, *b);
+}
+
+
+// Returns the smallest power of two which is >= x. If you pass in a
+// number that is already a power of two, it is returned as is.
+uint32_t RoundUpToPowerOf2(uint32_t x);
+
+
+template <typename T>
+static inline bool IsAligned(T value, T alignment) {
+  ASSERT(IsPowerOf2(alignment));
+  return (value & (alignment - 1)) == 0;
+}
+
+
+// Returns true if (addr + offset) is aligned.
+static inline bool IsAddressAligned(Address addr,
+                                    intptr_t alignment,
+                                    int offset) {
+  intptr_t offs = OffsetFrom(addr + offset);
+  return IsAligned(offs, alignment);
+}
+
+
+// Returns the maximum of the two parameters.
+template <typename T>
+static T Max(T a, T b) {
+  return a < b ? b : a;
+}
+
+
+// Returns the minimum of the two parameters.
+template <typename T>
+static T Min(T a, T b) {
+  return a < b ? a : b;
+}
+
+
+// ----------------------------------------------------------------------------
+// BitField is a help template for encoding and decode bitfield with
+// unsigned content.
+template<class T, int shift, int size>
+class BitField {
+ public:
+  // Tells whether the provided value fits into the bit field.
+  static bool is_valid(T value) {
+    return (static_cast<uint32_t>(value) & ~((1U << (size)) - 1)) == 0;
+  }
+
+  // Returns a uint32_t mask of bit field.
+  static uint32_t mask() {
+    return (1U << (size + shift)) - (1U << shift);
+  }
+
+  // Returns a uint32_t with the bit field value encoded.
+  static uint32_t encode(T value) {
+    ASSERT(is_valid(value));
+    return static_cast<uint32_t>(value) << shift;
+  }
+
+  // Extracts the bit field from the value.
+  static T decode(uint32_t value) {
+    return static_cast<T>((value >> shift) & ((1U << (size)) - 1));
+  }
+};
+
+
+// ----------------------------------------------------------------------------
+// Support for compressed, machine-independent encoding
+// and decoding of integer values of arbitrary size.
+
+// Encoding and decoding from/to a buffer at position p;
+// the result is the position after the encoded integer.
+// Small signed integers in the range -64 <= x && x < 64
+// are encoded in 1 byte; larger values are encoded in 2
+// or more bytes. At most sizeof(int) + 1 bytes are used
+// in the worst case.
+byte* EncodeInt(byte* p, int x);
+byte* DecodeInt(byte* p, int* x);
+
+
+// Encoding and decoding from/to a buffer at position p - 1
+// moving backward; the result is the position of the last
+// byte written. These routines are useful to read/write
+// into a buffer starting at the end of the buffer.
+byte* EncodeUnsignedIntBackward(byte* p, unsigned int x);
+
+// The decoding function is inlined since its performance is
+// important to mark-sweep garbage collection.
+inline byte* DecodeUnsignedIntBackward(byte* p, unsigned int* x) {
+  byte b = *--p;
+  if (b >= 128) {
+    *x = static_cast<unsigned int>(b) - 128;
+    return p;
+  }
+  unsigned int r = static_cast<unsigned int>(b);
+  unsigned int s = 7;
+  b = *--p;
+  while (b < 128) {
+    r |= static_cast<unsigned int>(b) << s;
+    s += 7;
+    b = *--p;
+  }
+  // b >= 128
+  *x = r | ((static_cast<unsigned int>(b) - 128) << s);
+  return p;
+}
+
+
+// ----------------------------------------------------------------------------
+// Hash function.
+
+uint32_t ComputeIntegerHash(uint32_t key);
+
+
+// ----------------------------------------------------------------------------
+// I/O support.
+
+// Our version of printf(). Avoids compilation errors that we get
+// with standard printf when attempting to print pointers, etc.
+// (the errors are due to the extra compilation flags, which we
+// want elsewhere).
+void PrintF(const char* format, ...);
+
+// Our version of fflush.
+void Flush();
+
+
+// Read a line of characters after printing the prompt to stdout. The resulting
+// char* needs to be disposed off with DeleteArray by the caller.
+char* ReadLine(const char* prompt);
+
+
+// Read and return the raw bytes in a file. the size of the buffer is returned
+// in size.
+// The returned buffer must be freed by the caller.
+byte* ReadBytes(const char* filename, int* size, bool verbose = true);
+
+
+// Write size chars from str to the file given by filename.
+// The file is overwritten. Returns the number of chars written.
+int WriteChars(const char* filename,
+               const char* str,
+               int size,
+               bool verbose = true);
+
+
+// Write size bytes to the file given by filename.
+// The file is overwritten. Returns the number of bytes written.
+int WriteBytes(const char* filename,
+               const byte* bytes,
+               int size,
+               bool verbose = true);
+
+
+// Write the C code
+// const char* <varname> = "<str>";
+// const int <varname>_len = <len>;
+// to the file given by filename. Only the first len chars are written.
+int WriteAsCFile(const char* filename, const char* varname,
+                 const char* str, int size, bool verbose = true);
+
+
+// ----------------------------------------------------------------------------
+// Miscellaneous
+
+// A static resource holds a static instance that can be reserved in
+// a local scope using an instance of Access.  Attempts to re-reserve
+// the instance will cause an error.
+template <typename T>
+class StaticResource {
+ public:
+  StaticResource() : is_reserved_(false)  {}
+
+ private:
+  template <typename S> friend class Access;
+  T instance_;
+  bool is_reserved_;
+};
+
+
+// Locally scoped access to a static resource.
+template <typename T>
+class Access {
+ public:
+  explicit Access(StaticResource<T>* resource)
+    : resource_(resource)
+    , instance_(&resource->instance_) {
+    ASSERT(!resource->is_reserved_);
+    resource->is_reserved_ = true;
+  }
+
+  ~Access() {
+    resource_->is_reserved_ = false;
+    resource_ = NULL;
+    instance_ = NULL;
+  }
+
+  T* value()  { return instance_; }
+  T* operator -> ()  { return instance_; }
+
+ private:
+  StaticResource<T>* resource_;
+  T* instance_;
+};
+
+
+template <typename T>
+class Vector {
+ public:
+  Vector() : start_(NULL), length_(0) {}
+  Vector(T* data, int length) : start_(data), length_(length) {
+    ASSERT(length == 0 || (length > 0 && data != NULL));
+  }
+
+  static Vector<T> New(int length) {
+    return Vector<T>(NewArray<T>(length), length);
+  }
+
+  // Returns a vector using the same backing storage as this one,
+  // spanning from and including 'from', to but not including 'to'.
+  Vector<T> SubVector(int from, int to) {
+    ASSERT(from < length_);
+    ASSERT(to <= length_);
+    ASSERT(from < to);
+    return Vector<T>(start() + from, to - from);
+  }
+
+  // Returns the length of the vector.
+  int length() const { return length_; }
+
+  // Returns whether or not the vector is empty.
+  bool is_empty() const { return length_ == 0; }
+
+  // Returns the pointer to the start of the data in the vector.
+  T* start() const { return start_; }
+
+  // Access individual vector elements - checks bounds in debug mode.
+  T& operator[](int index) const {
+    ASSERT(0 <= index && index < length_);
+    return start_[index];
+  }
+
+  T& first() { return start_[0]; }
+
+  T& last() { return start_[length_ - 1]; }
+
+  // Returns a clone of this vector with a new backing store.
+  Vector<T> Clone() const {
+    T* result = NewArray<T>(length_);
+    for (int i = 0; i < length_; i++) result[i] = start_[i];
+    return Vector<T>(result, length_);
+  }
+
+  void Sort(int (*cmp)(const T*, const T*)) {
+    typedef int (*RawComparer)(const void*, const void*);
+    qsort(start(),
+          length(),
+          sizeof(T),
+          reinterpret_cast<RawComparer>(cmp));
+  }
+
+  void Sort() {
+    Sort(PointerValueCompare<T>);
+  }
+
+  void Truncate(int length) {
+    ASSERT(length <= length_);
+    length_ = length;
+  }
+
+  // Releases the array underlying this vector. Once disposed the
+  // vector is empty.
+  void Dispose() {
+    if (is_empty()) return;
+    DeleteArray(start_);
+    start_ = NULL;
+    length_ = 0;
+  }
+
+  inline Vector<T> operator+(int offset) {
+    ASSERT(offset < length_);
+    return Vector<T>(start_ + offset, length_ - offset);
+  }
+
+  // Factory method for creating empty vectors.
+  static Vector<T> empty() { return Vector<T>(NULL, 0); }
+
+ protected:
+  void set_start(T* start) { start_ = start; }
+
+ private:
+  T* start_;
+  int length_;
+};
+
+
+// A temporary assignment sets a (non-local) variable to a value on
+// construction and resets it the value on destruction.
+template <typename T>
+class TempAssign {
+ public:
+  TempAssign(T* var, T value): var_(var), old_value_(*var) {
+    *var = value;
+  }
+
+  ~TempAssign() { *var_ = old_value_; }
+
+ private:
+  T* var_;
+  T old_value_;
+};
+
+
+template <typename T, int kSize>
+class EmbeddedVector : public Vector<T> {
+ public:
+  EmbeddedVector() : Vector<T>(buffer_, kSize) { }
+
+  // When copying, make underlying Vector to reference our buffer.
+  EmbeddedVector(const EmbeddedVector& rhs)
+      : Vector<T>(rhs) {
+    memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+    set_start(buffer_);
+  }
+
+  EmbeddedVector& operator=(const EmbeddedVector& rhs) {
+    if (this == &rhs) return *this;
+    Vector<T>::operator=(rhs);
+    memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+    set_start(buffer_);
+    return *this;
+  }
+
+ private:
+  T buffer_[kSize];
+};
+
+
+template <typename T>
+class ScopedVector : public Vector<T> {
+ public:
+  explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { }
+  ~ScopedVector() {
+    DeleteArray(this->start());
+  }
+};
+
+
+inline Vector<const char> CStrVector(const char* data) {
+  return Vector<const char>(data, static_cast<int>(strlen(data)));
+}
+
+inline Vector<char> MutableCStrVector(char* data) {
+  return Vector<char>(data, static_cast<int>(strlen(data)));
+}
+
+inline Vector<char> MutableCStrVector(char* data, int max) {
+  int length = static_cast<int>(strlen(data));
+  return Vector<char>(data, (length < max) ? length : max);
+}
+
+template <typename T>
+inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
+                                             int length) {
+  return Vector< Handle<Object> >(
+      reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
+}
+
+
+// Simple support to read a file into a 0-terminated C-string.
+// The returned buffer must be freed by the caller.
+// On return, *exits tells whether the file existed.
+Vector<const char> ReadFile(const char* filename,
+                            bool* exists,
+                            bool verbose = true);
+
+
+// Simple wrapper that allows an ExternalString to refer to a
+// Vector<const char>. Doesn't assume ownership of the data.
+class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
+ public:
+  explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
+
+  virtual const char* data() const { return data_.start(); }
+
+  virtual size_t length() const { return data_.length(); }
+
+ private:
+  Vector<const char> data_;
+};
+
+
+// Helper class for building result strings in a character buffer. The
+// purpose of the class is to use safe operations that checks the
+// buffer bounds on all operations in debug mode.
+class StringBuilder {
+ public:
+  // Create a string builder with a buffer of the given size. The
+  // buffer is allocated through NewArray<char> and must be
+  // deallocated by the caller of Finalize().
+  explicit StringBuilder(int size);
+
+  StringBuilder(char* buffer, int size)
+      : buffer_(buffer, size), position_(0) { }
+
+  ~StringBuilder() { if (!is_finalized()) Finalize(); }
+
+  int size() const { return buffer_.length(); }
+
+  // Get the current position in the builder.
+  int position() const {
+    ASSERT(!is_finalized());
+    return position_;
+  }
+
+  // Reset the position.
+  void Reset() { position_ = 0; }
+
+  // Add a single character to the builder. It is not allowed to add
+  // 0-characters; use the Finalize() method to terminate the string
+  // instead.
+  void AddCharacter(char c) {
+    ASSERT(c != '\0');
+    ASSERT(!is_finalized() && position_ < buffer_.length());
+    buffer_[position_++] = c;
+  }
+
+  // Add an entire string to the builder. Uses strlen() internally to
+  // compute the length of the input string.
+  void AddString(const char* s);
+
+  // Add the first 'n' characters of the given string 's' to the
+  // builder. The input string must have enough characters.
+  void AddSubstring(const char* s, int n);
+
+  // Add formatted contents to the builder just like printf().
+  void AddFormatted(const char* format, ...);
+
+  // Add character padding to the builder. If count is non-positive,
+  // nothing is added to the builder.
+  void AddPadding(char c, int count);
+
+  // Finalize the string by 0-terminating it and returning the buffer.
+  char* Finalize();
+
+ private:
+  Vector<char> buffer_;
+  int position_;
+
+  bool is_finalized() const { return position_ < 0; }
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
+};
+
+
+// Copy from ASCII/16bit chars to ASCII/16bit chars.
+template <typename sourcechar, typename sinkchar>
+static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+  sinkchar* limit = dest + chars;
+#ifdef V8_HOST_CAN_READ_UNALIGNED
+  if (sizeof(*dest) == sizeof(*src)) {
+    // Number of characters in a uint32_t.
+    static const int kStepSize = sizeof(uint32_t) / sizeof(*dest);  // NOLINT
+    while (dest <= limit - kStepSize) {
+      *reinterpret_cast<uint32_t*>(dest) =
+          *reinterpret_cast<const uint32_t*>(src);
+      dest += kStepSize;
+      src += kStepSize;
+    }
+  }
+#endif
+  while (dest < limit) {
+    *dest++ = static_cast<sinkchar>(*src++);
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_UTILS_H_
diff --git a/src/v8-counters.cc b/src/v8-counters.cc
new file mode 100644
index 0000000..de2ce66
--- /dev/null
+++ b/src/v8-counters.cc
@@ -0,0 +1,55 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "v8-counters.h"
+
+namespace v8 {
+namespace internal {
+
+#define HT(name, caption) \
+  HistogramTimer Counters::name = { #caption, NULL, false, 0, 0 }; \
+
+  HISTOGRAM_TIMER_LIST(HT)
+#undef SR
+
+#define SC(name, caption) \
+  StatsCounter Counters::name = { "c:" #caption, NULL, false };
+
+  STATS_COUNTER_LIST_1(SC)
+  STATS_COUNTER_LIST_2(SC)
+#undef SC
+
+StatsCounter Counters::state_counters[] = {
+#define COUNTER_NAME(name) \
+  { "c:V8.State" #name, NULL, false },
+  STATE_TAG_LIST(COUNTER_NAME)
+#undef COUNTER_NAME
+};
+
+} }  // namespace v8::internal
diff --git a/src/v8-counters.h b/src/v8-counters.h
new file mode 100644
index 0000000..e360b55
--- /dev/null
+++ b/src/v8-counters.h
@@ -0,0 +1,190 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_V8_COUNTERS_H_
+#define V8_V8_COUNTERS_H_
+
+#include "counters.h"
+
+namespace v8 {
+namespace internal {
+
+#define HISTOGRAM_TIMER_LIST(HT)                                      \
+  /* Garbage collection timers. */                                    \
+  HT(gc_compactor, V8.GCCompactor)                                    \
+  HT(gc_scavenger, V8.GCScavenger)                                    \
+  HT(gc_context, V8.GCContext) /* GC context cleanup time */          \
+  /* Parsing timers. */                                               \
+  HT(parse, V8.Parse)                                                 \
+  HT(parse_lazy, V8.ParseLazy)                                        \
+  HT(pre_parse, V8.PreParse)                                          \
+  /* Total compilation times. */                                      \
+  HT(compile, V8.Compile)                                             \
+  HT(compile_eval, V8.CompileEval)                                    \
+  HT(compile_lazy, V8.CompileLazy)                                    \
+  /* Individual compiler passes. */                                   \
+  HT(rewriting, V8.Rewriting)                                         \
+  HT(usage_analysis, V8.UsageAnalysis)                                \
+  HT(variable_allocation, V8.VariableAllocation)                      \
+  HT(ast_optimization, V8.ASTOptimization)                            \
+  HT(code_generation, V8.CodeGeneration)                              \
+  HT(deferred_code_generation, V8.DeferredCodeGeneration)             \
+  HT(code_creation, V8.CodeCreation)
+
+// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
+// Intellisense to crash.  It was broken into two macros (each of length 40
+// lines) rather than one macro (of length about 80 lines) to work around
+// this problem.  Please avoid using recursive macros of this length when
+// possible.
+#define STATS_COUNTER_LIST_1(SC)                                 \
+  /* Global Handle Count*/                                       \
+  SC(global_handles, V8.GlobalHandles)                           \
+  /* Mallocs from PCRE */                                        \
+  SC(pcre_mallocs, V8.PcreMallocCount)                           \
+  /* OS Memory allocated */                                      \
+  SC(memory_allocated, V8.OsMemoryAllocated)                     \
+  SC(props_to_dictionary, V8.ObjectPropertiesToDictionary)       \
+  SC(elements_to_dictionary, V8.ObjectElementsToDictionary)      \
+  SC(alive_after_last_gc, V8.AliveAfterLastGC)                   \
+  SC(objs_since_last_young, V8.ObjsSinceLastYoung)               \
+  SC(objs_since_last_full, V8.ObjsSinceLastFull)                 \
+  SC(symbol_table_capacity, V8.SymbolTableCapacity)              \
+  SC(number_of_symbols, V8.NumberOfSymbols)                      \
+  /* Current amount of memory in external string buffers. */     \
+  SC(total_external_string_memory, V8.TotalExternalStringMemory) \
+  SC(script_wrappers, V8.ScriptWrappers)                         \
+  SC(call_initialize_stubs, V8.CallInitializeStubs)              \
+  SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs)      \
+  SC(call_normal_stubs, V8.CallNormalStubs)                      \
+  SC(call_megamorphic_stubs, V8.CallMegamorphicStubs)            \
+  SC(arguments_adaptors, V8.ArgumentsAdaptors)                   \
+  SC(compilation_cache_hits, V8.CompilationCacheHits)            \
+  SC(compilation_cache_misses, V8.CompilationCacheMisses)        \
+  SC(regexp_cache_hits, V8.RegExpCacheHits)                      \
+  SC(regexp_cache_misses, V8.RegExpCacheMisses)                  \
+  /* Amount of evaled source code. */                            \
+  SC(total_eval_size, V8.TotalEvalSize)                          \
+  /* Amount of loaded source code. */                            \
+  SC(total_load_size, V8.TotalLoadSize)                          \
+  /* Amount of parsed source code. */                            \
+  SC(total_parse_size, V8.TotalParseSize)                        \
+  /* Amount of source code skipped over using preparsing. */     \
+  SC(total_preparse_skipped, V8.TotalPreparseSkipped)            \
+  /* Amount of compiled source code. */                          \
+  SC(total_compile_size, V8.TotalCompileSize)
+
+
+#define STATS_COUNTER_LIST_2(SC)                                    \
+  /* Number of code stubs. */                                       \
+  SC(code_stubs, V8.CodeStubs)                                      \
+  /* Amount of stub code. */                                        \
+  SC(total_stubs_code_size, V8.TotalStubsCodeSize)                  \
+  /* Amount of (JS) compiled code. */                               \
+  SC(total_compiled_code_size, V8.TotalCompiledCodeSize)            \
+  SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
+  SC(gc_compactor_caused_by_promoted_data,                          \
+     V8.GCCompactorCausedByPromotedData)                            \
+  SC(gc_compactor_caused_by_oldspace_exhaustion,                    \
+     V8.GCCompactorCausedByOldspaceExhaustion)                      \
+  SC(gc_compactor_caused_by_weak_handles,                           \
+     V8.GCCompactorCausedByWeakHandles)                             \
+  SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                 \
+  SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)       \
+  /* How is the generic keyed-load stub used? */                    \
+  SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                \
+  SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)          \
+  SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow)              \
+  /* Count how much the monomorphic keyed-load stubs are hit. */    \
+  SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype)  \
+  SC(keyed_load_string_length, V8.KeyedLoadStringLength)            \
+  SC(keyed_load_array_length, V8.KeyedLoadArrayLength)              \
+  SC(keyed_load_constant_function, V8.KeyedLoadConstantFunction)    \
+  SC(keyed_load_field, V8.KeyedLoadField)                           \
+  SC(keyed_load_callback, V8.KeyedLoadCallback)                     \
+  SC(keyed_load_interceptor, V8.KeyedLoadInterceptor)               \
+  SC(keyed_load_inline, V8.KeyedLoadInline)                         \
+  SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss)                \
+  SC(named_load_inline, V8.NamedLoadInline)                         \
+  SC(named_load_inline_miss, V8.NamedLoadInlineMiss)                \
+  SC(named_load_global_inline, V8.NamedLoadGlobalInline)            \
+  SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss)   \
+  SC(keyed_store_field, V8.KeyedStoreField)                         \
+  SC(keyed_store_inline, V8.KeyedStoreInline)                       \
+  SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)              \
+  SC(named_store_global_inline, V8.NamedStoreGlobalInline)          \
+  SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
+  SC(call_global_inline, V8.CallGlobalInline)                       \
+  SC(call_global_inline_miss, V8.CallGlobalInlineMiss)              \
+  SC(constructed_objects, V8.ConstructedObjects)                    \
+  SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime)     \
+  SC(constructed_objects_stub, V8.ConstructedObjectsStub)           \
+  SC(array_function_runtime, V8.ArrayFunctionRuntime)               \
+  SC(array_function_native, V8.ArrayFunctionNative)                 \
+  SC(for_in, V8.ForIn)                                              \
+  SC(enum_cache_hits, V8.EnumCacheHits)                             \
+  SC(enum_cache_misses, V8.EnumCacheMisses)                         \
+  SC(reloc_info_count, V8.RelocInfoCount)                           \
+  SC(reloc_info_size, V8.RelocInfoSize)                             \
+  SC(zone_segment_bytes, V8.ZoneSegmentBytes)                       \
+  SC(compute_entry_frame, V8.ComputeEntryFrame)
+
+
+// This file contains all the v8 counters that are in use.
+class Counters : AllStatic {
+ public:
+#define HT(name, caption) \
+  static HistogramTimer name;
+  HISTOGRAM_TIMER_LIST(HT)
+#undef HT
+
+#define SC(name, caption) \
+  static StatsCounter name;
+  STATS_COUNTER_LIST_1(SC)
+  STATS_COUNTER_LIST_2(SC)
+#undef SC
+
+  enum Id {
+#define RATE_ID(name, caption) k_##name,
+    HISTOGRAM_TIMER_LIST(RATE_ID)
+#undef RATE_ID
+#define COUNTER_ID(name, caption) k_##name,
+  STATS_COUNTER_LIST_1(COUNTER_ID)
+  STATS_COUNTER_LIST_2(COUNTER_ID)
+#undef COUNTER_ID
+#define COUNTER_ID(name) k_##name,
+  STATE_TAG_LIST(COUNTER_ID)
+#undef COUNTER_ID
+    stats_counter_count
+  };
+
+  // Sliding state window counters.
+  static StatsCounter state_counters[];
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_V8_COUNTERS_H_
diff --git a/src/v8.cc b/src/v8.cc
new file mode 100644
index 0000000..f0115ec
--- /dev/null
+++ b/src/v8.cc
@@ -0,0 +1,190 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "debug.h"
+#include "serialize.h"
+#include "stub-cache.h"
+#include "oprofile-agent.h"
+
+#if V8_TARGET_ARCH_ARM
+#include "arm/simulator-arm.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+bool V8::is_running_ = false;
+bool V8::has_been_setup_ = false;
+bool V8::has_been_disposed_ = false;
+bool V8::has_fatal_error_ = false;
+
+bool V8::Initialize(Deserializer *des) {
+  bool create_heap_objects = des == NULL;
+  if (has_been_disposed_ || has_fatal_error_) return false;
+  if (IsRunning()) return true;
+
+  is_running_ = true;
+  has_been_setup_ = true;
+  has_fatal_error_ = false;
+  has_been_disposed_ = false;
+#ifdef DEBUG
+  // The initialization process does not handle memory exhaustion.
+  DisallowAllocationFailure disallow_allocation_failure;
+#endif
+
+  // Enable logging before setting up the heap
+  Logger::Setup();
+  if (des) des->GetLog();
+
+  // Setup the platform OS support.
+  OS::Setup();
+
+  // Initialize other runtime facilities
+#if !V8_HOST_ARCH_ARM && V8_TARGET_ARCH_ARM
+  ::assembler::arm::Simulator::Initialize();
+#endif
+
+  { // NOLINT
+    // Ensure that the thread has a valid stack guard.  The v8::Locker object
+    // will ensure this too, but we don't have to use lockers if we are only
+    // using one thread.
+    ExecutionAccess lock;
+    StackGuard::InitThread(lock);
+  }
+
+  // Setup the object heap
+  ASSERT(!Heap::HasBeenSetup());
+  if (!Heap::Setup(create_heap_objects)) {
+    SetFatalError();
+    return false;
+  }
+
+  Bootstrapper::Initialize(create_heap_objects);
+  Builtins::Setup(create_heap_objects);
+  Top::Initialize();
+
+  if (FLAG_preemption) {
+    v8::Locker locker;
+    v8::Locker::StartPreemption(100);
+  }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  Debug::Setup(create_heap_objects);
+#endif
+  StubCache::Initialize(create_heap_objects);
+
+  // If we are deserializing, read the state into the now-empty heap.
+  if (des != NULL) {
+    des->Deserialize();
+    StubCache::Clear();
+  }
+
+  // Deserializing may put strange things in the root array's copy of the
+  // stack guard.
+  Heap::SetStackLimit(StackGuard::jslimit());
+
+  // Setup the CPU support. Must be done after heap setup and after
+  // any deserialization because we have to have the initial heap
+  // objects in place for creating the code object used for probing.
+  CPU::Setup();
+
+  OProfileAgent::Initialize();
+
+  return true;
+}
+
+
+void V8::SetFatalError() {
+  is_running_ = false;
+  has_fatal_error_ = true;
+}
+
+
+void V8::TearDown() {
+  if (!has_been_setup_ || has_been_disposed_) return;
+
+  OProfileAgent::TearDown();
+
+  if (FLAG_preemption) {
+    v8::Locker locker;
+    v8::Locker::StopPreemption();
+  }
+
+  Builtins::TearDown();
+  Bootstrapper::TearDown();
+
+  Top::TearDown();
+
+  Heap::TearDown();
+  Logger::TearDown();
+
+  is_running_ = false;
+  has_been_disposed_ = true;
+}
+
+
+uint32_t V8::Random() {
+  // Random number generator using George Marsaglia's MWC algorithm.
+  static uint32_t hi = 0;
+  static uint32_t lo = 0;
+
+  // Initialize seed using the system random(). If one of the seeds
+  // should ever become zero again, or if random() returns zero, we
+  // avoid getting stuck with zero bits in hi or lo by re-initializing
+  // them on demand.
+  if (hi == 0) hi = random();
+  if (lo == 0) lo = random();
+
+  // Mix the bits.
+  hi = 36969 * (hi & 0xFFFF) + (hi >> 16);
+  lo = 18273 * (lo & 0xFFFF) + (lo >> 16);
+  return (hi << 16) + (lo & 0xFFFF);
+}
+
+
+bool V8::IdleNotification(bool is_high_priority) {
+  // Returning true tells the caller that there is no need to call
+  // IdleNotification again.
+  if (!FLAG_use_idle_notification) return true;
+  // Ignore high priority instances of V8.
+  if (is_high_priority) return true;
+
+  // Tell the heap that it may want to adjust.
+  return Heap::IdleNotification();
+}
+
+
+Smi* V8::RandomPositiveSmi() {
+  uint32_t random = Random();
+  ASSERT(IsPowerOf2(Smi::kMaxValue + 1));
+  return Smi::FromInt(random & Smi::kMaxValue);
+}
+
+} }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
new file mode 100644
index 0000000..7786d66
--- /dev/null
+++ b/src/v8.h
@@ -0,0 +1,117 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Top include for all V8 .cc files.
+//
+
+#ifndef V8_V8_H_
+#define V8_V8_H_
+
+#if defined(GOOGLE3)
+// Google3 special flag handling.
+#if defined(DEBUG) && defined(NDEBUG)
+// If both are defined in Google3, then we are building an optimized v8 with
+// assertions enabled.
+#undef NDEBUG
+#elif !defined(DEBUG) && !defined(NDEBUG)
+// If neither is defined in Google3, then we are building a debug v8. Mark it
+// as such.
+#define DEBUG
+#endif
+#endif  // defined(GOOGLE3)
+
+// V8 only uses DEBUG, but included external files
+// may use NDEBUG - make sure they are consistent.
+#if defined(DEBUG) && defined(NDEBUG)
+#error both DEBUG and NDEBUG are set
+#endif
+
+// Basic includes
+#include "../include/v8.h"
+#include "globals.h"
+#include "checks.h"
+#include "allocation.h"
+#include "utils.h"
+#include "flags.h"
+
+// Objects & heap
+#include "objects.h"
+#include "spaces.h"
+#include "heap.h"
+#include "objects-inl.h"
+#include "spaces-inl.h"
+#include "heap-inl.h"
+#include "log-inl.h"
+#include "messages.h"
+
+namespace v8 {
+namespace internal {
+
+class V8 : public AllStatic {
+ public:
+  // Global actions.
+
+  // If Initialize is called with des == NULL, the initial state is
+  // created from scratch. If a non-null Deserializer is given, the
+  // initial state is created by reading the deserialized data into an
+  // empty heap.
+  static bool Initialize(Deserializer* des);
+  static void TearDown();
+  static bool IsRunning() { return is_running_; }
+  // To be dead you have to have lived
+  static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
+  static void SetFatalError();
+
+  // Report process out of memory. Implementation found in api.cc.
+  static void FatalProcessOutOfMemory(const char* location);
+
+  // Random number generation support. Not cryptographically safe.
+  static uint32_t Random();
+  static Smi* RandomPositiveSmi();
+
+  // Idle notification directly from the API.
+  static bool IdleNotification(bool is_high_priority);
+
+ private:
+  // True if engine is currently running
+  static bool is_running_;
+  // True if V8 has ever been run
+  static bool has_been_setup_;
+  // True if error has been signaled for current engine
+  // (reset to false if engine is restarted)
+  static bool has_fatal_error_;
+  // True if engine has been shut down
+  // (reset if engine is restarted)
+  static bool has_been_disposed_;
+};
+
+} }  // namespace v8::internal
+
+namespace i = v8::internal;
+
+#endif  // V8_V8_H_
diff --git a/src/v8natives.js b/src/v8natives.js
new file mode 100644
index 0000000..2fecee8
--- /dev/null
+++ b/src/v8natives.js
@@ -0,0 +1,582 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file relies on the fact that the following declarations have been made
+//
+// in runtime.js:
+// const $Object = global.Object;
+// const $Boolean = global.Boolean;
+// const $Number = global.Number;
+// const $Function = global.Function;
+// const $Array = global.Array;
+// const $NaN = 0/0;
+//
+// in math.js:
+// const $floor = MathFloor
+
+const $isNaN = GlobalIsNaN;
+const $isFinite = GlobalIsFinite;
+
+// ----------------------------------------------------------------------------
+
+
+// Helper function used to install functions on objects.
+function InstallFunctions(object, attributes, functions) {
+  if (functions.length >= 8) {
+    %OptimizeObjectForAddingMultipleProperties(object, functions.length >> 1);
+  }
+  for (var i = 0; i < functions.length; i += 2) {
+    var key = functions[i];
+    var f = functions[i + 1];
+    %FunctionSetName(f, key);
+    %SetProperty(object, key, f, attributes);
+  }
+  %TransformToFastProperties(object);
+}
+
+// Emulates JSC by installing functions on a hidden prototype that
+// lies above the current object/prototype.  This lets you override
+// functions on String.prototype etc. and then restore the old function
+// with delete.  See http://code.google.com/p/chromium/issues/detail?id=1717
+function InstallFunctionsOnHiddenPrototype(object, attributes, functions) {
+  var hidden_prototype = new $Object();
+  %SetHiddenPrototype(object, hidden_prototype);
+  InstallFunctions(hidden_prototype, attributes, functions);
+}
+
+
+// ----------------------------------------------------------------------------
+
+
+// ECMA 262 - 15.1.4
+function GlobalIsNaN(number) {
+  var n = ToNumber(number);
+  return NUMBER_IS_NAN(n);
+}
+
+
+// ECMA 262 - 15.1.5
+function GlobalIsFinite(number) {
+  return %NumberIsFinite(ToNumber(number));
+}
+
+
+// ECMA-262 - 15.1.2.2
+function GlobalParseInt(string, radix) {
+  if (radix === void 0) {
+    // Some people use parseInt instead of Math.floor.  This
+    // optimization makes parseInt on a Smi 12 times faster (60ns
+    // vs 800ns).  The following optimization makes parseInt on a
+    // non-Smi number 9 times faster (230ns vs 2070ns).  Together
+    // they make parseInt on a string 1.4% slower (274ns vs 270ns).
+    if (%_IsSmi(string)) return string;
+    if (IS_NUMBER(string) &&
+        ((string < -0.01 && -1e9 < string) ||
+            (0.01 < string && string < 1e9))) {
+      // Truncate number.
+      return string | 0;
+    }
+    radix = 0;
+  } else {
+    radix = TO_INT32(radix);
+    if (!(radix == 0 || (2 <= radix && radix <= 36)))
+      return $NaN;
+  }
+  return %StringParseInt(ToString(string), radix);
+}
+
+
+// ECMA-262 - 15.1.2.3
+function GlobalParseFloat(string) {
+  return %StringParseFloat(ToString(string));
+}
+
+
+function GlobalEval(x) {
+  if (!IS_STRING(x)) return x;
+
+  var global_receiver = %GlobalReceiver(global);
+  var this_is_global_receiver = (this === global_receiver);
+  var global_is_detached = (global === global_receiver);
+
+  if (!this_is_global_receiver || global_is_detached) {
+    throw new $EvalError('The "this" object passed to eval must ' +
+                         'be the global object from which eval originated');
+  }
+
+  var f = %CompileString(x, false);
+  if (!IS_FUNCTION(f)) return f;
+
+  return f.call(this);
+}
+
+
+// execScript for IE compatibility.
+function GlobalExecScript(expr, lang) {
+  // NOTE: We don't care about the character casing.
+  if (!lang || /javascript/i.test(lang)) {
+    var f = %CompileString(ToString(expr), false);
+    f.call(%GlobalReceiver(global));
+  }
+  return null;
+}
+
+
+// ----------------------------------------------------------------------------
+
+
+function SetupGlobal() {
+  // ECMA 262 - 15.1.1.1.
+  %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE);
+
+  // ECMA-262 - 15.1.1.2.
+  %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE);
+
+  // ECMA-262 - 15.1.1.3.
+  %SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE);
+
+  // Setup non-enumerable function on the global object.
+  InstallFunctions(global, DONT_ENUM, $Array(
+    "isNaN", GlobalIsNaN,
+    "isFinite", GlobalIsFinite,
+    "parseInt", GlobalParseInt,
+    "parseFloat", GlobalParseFloat,
+    "eval", GlobalEval,
+    "execScript", GlobalExecScript
+  ));
+}
+
+SetupGlobal();
+
+
+// ----------------------------------------------------------------------------
+// Boolean (first part of definition)
+
+
+%SetCode($Boolean, function(x) {
+  if (%_IsConstructCall()) {
+    %_SetValueOf(this, ToBoolean(x));
+  } else {
+    return ToBoolean(x);
+  }
+});
+
+%FunctionSetPrototype($Boolean, new $Boolean(false));
+
+%SetProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM);
+
+// ----------------------------------------------------------------------------
+// Object
+
+$Object.prototype.constructor = $Object;
+
+// ECMA-262 - 15.2.4.2
+function ObjectToString() {
+  var c = %_ClassOf(this);
+  // Hide Arguments from the outside.
+  if (c === 'Arguments') c  = 'Object';
+  return "[object " + c + "]";
+}
+
+
+// ECMA-262 - 15.2.4.3
+function ObjectToLocaleString() {
+  return this.toString();
+}
+
+
+// ECMA-262 - 15.2.4.4
+function ObjectValueOf() {
+  return this;
+}
+
+
+// ECMA-262 - 15.2.4.5
+function ObjectHasOwnProperty(V) {
+  return %HasLocalProperty(ToObject(this), ToString(V));
+}
+
+
+// ECMA-262 - 15.2.4.6
+function ObjectIsPrototypeOf(V) {
+  if (!IS_OBJECT(V) && !IS_FUNCTION(V)) return false;
+  return %IsInPrototypeChain(this, V);
+}
+
+
+// ECMA-262 - 15.2.4.6
+function ObjectPropertyIsEnumerable(V) {
+  if (this == null) return false;
+  if (!IS_OBJECT(this) && !IS_FUNCTION(this)) return false;
+  return %IsPropertyEnumerable(this, ToString(V));
+}
+
+
+// Extensions for providing property getters and setters.
+function ObjectDefineGetter(name, fun) {
+  if (this == null) {
+    throw new $TypeError('Object.prototype.__defineGetter__: this is Null');
+  }
+  if (!IS_FUNCTION(fun)) {
+    throw new $TypeError('Object.prototype.__defineGetter__: Expecting function');
+  }
+  return %DefineAccessor(ToObject(this), ToString(name), GETTER, fun);
+}
+
+
+function ObjectLookupGetter(name) {
+  if (this == null) {
+    throw new $TypeError('Object.prototype.__lookupGetter__: this is Null');
+  }
+  return %LookupAccessor(ToObject(this), ToString(name), GETTER);
+}
+
+
+function ObjectDefineSetter(name, fun) {
+  if (this == null) {
+    throw new $TypeError('Object.prototype.__defineSetter__: this is Null');
+  }
+  if (!IS_FUNCTION(fun)) {
+    throw new $TypeError(
+        'Object.prototype.__defineSetter__: Expecting function');
+  }
+  return %DefineAccessor(ToObject(this), ToString(name), SETTER, fun);
+}
+
+
+function ObjectLookupSetter(name) {
+  if (this == null) {
+    throw new $TypeError('Object.prototype.__lookupSetter__: this is Null');
+  }
+  return %LookupAccessor(ToObject(this), ToString(name), SETTER);
+}
+
+
+function ObjectKeys(obj) {
+  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+    throw MakeTypeError('object_keys_non_object', [obj]);
+  return %LocalKeys(obj);
+}
+
+
+%SetCode($Object, function(x) {
+  if (%_IsConstructCall()) {
+    if (x == null) return this;
+    return ToObject(x);
+  } else {
+    if (x == null) return { };
+    return ToObject(x);
+  }
+});
+
+
+// ----------------------------------------------------------------------------
+
+
+function SetupObject() {
+  // Setup non-enumerable functions on the Object.prototype object.
+  InstallFunctions($Object.prototype, DONT_ENUM, $Array(
+    "toString", ObjectToString,
+    "toLocaleString", ObjectToLocaleString,
+    "valueOf", ObjectValueOf,
+    "hasOwnProperty", ObjectHasOwnProperty,
+    "isPrototypeOf", ObjectIsPrototypeOf,
+    "propertyIsEnumerable", ObjectPropertyIsEnumerable,
+    "__defineGetter__", ObjectDefineGetter,
+    "__lookupGetter__", ObjectLookupGetter,
+    "__defineSetter__", ObjectDefineSetter,
+    "__lookupSetter__", ObjectLookupSetter
+  ));
+  InstallFunctions($Object, DONT_ENUM, $Array(
+    "keys", ObjectKeys
+  ));
+}
+
+SetupObject();
+
+
+// ----------------------------------------------------------------------------
+// Boolean
+
+function BooleanToString() {
+  // NOTE: Both Boolean objects and values can enter here as
+  // 'this'. This is not as dictated by ECMA-262.
+  if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this))
+    throw new $TypeError('Boolean.prototype.toString is not generic');
+  return ToString(%_ValueOf(this));
+}
+
+
+function BooleanValueOf() {
+  // NOTE: Both Boolean objects and values can enter here as
+  // 'this'. This is not as dictated by ECMA-262.
+  if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this))
+    throw new $TypeError('Boolean.prototype.valueOf is not generic');
+  return %_ValueOf(this);
+}
+
+
+function BooleanToJSON(key) {
+  return CheckJSONPrimitive(this.valueOf());
+}
+
+
+// ----------------------------------------------------------------------------
+
+
+function SetupBoolean() {
+  InstallFunctions($Boolean.prototype, DONT_ENUM, $Array(
+    "toString", BooleanToString,
+    "valueOf", BooleanValueOf,
+    "toJSON", BooleanToJSON
+  ));
+}
+
+SetupBoolean();
+
+// ----------------------------------------------------------------------------
+// Number
+
+// Set the Number function and constructor.
+%SetCode($Number, function(x) {
+  var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
+  if (%_IsConstructCall()) {
+    %_SetValueOf(this, value);
+  } else {
+    return value;
+  }
+});
+
+%FunctionSetPrototype($Number, new $Number(0));
+
+// ECMA-262 section 15.7.4.2.
+function NumberToString(radix) {
+  // NOTE: Both Number objects and values can enter here as
+  // 'this'. This is not as dictated by ECMA-262.
+  var number = this;
+  if (!IS_NUMBER(this)) {
+    if (!IS_NUMBER_WRAPPER(this))
+      throw new $TypeError('Number.prototype.toString is not generic');
+    // Get the value of this number in case it's an object.
+    number = %_ValueOf(this);
+  }
+  // Fast case: Convert number in radix 10.
+  if (IS_UNDEFINED(radix) || radix === 10) {
+    return ToString(number);
+  }
+
+  // Convert the radix to an integer and check the range.
+  radix = TO_INTEGER(radix);
+  if (radix < 2 || radix > 36) {
+    throw new $RangeError('toString() radix argument must be between 2 and 36');
+  }
+  // Convert the number to a string in the given radix.
+  return %NumberToRadixString(number, radix);
+}
+
+
+// ECMA-262 section 15.7.4.3
+function NumberToLocaleString() {
+  return this.toString();
+}
+
+
+// ECMA-262 section 15.7.4.4
+function NumberValueOf() {
+  // NOTE: Both Number objects and values can enter here as
+  // 'this'. This is not as dictated by ECMA-262.
+  if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this))
+    throw new $TypeError('Number.prototype.valueOf is not generic');
+  return %_ValueOf(this);
+}
+
+
+// ECMA-262 section 15.7.4.5
+function NumberToFixed(fractionDigits) {
+  var f = TO_INTEGER(fractionDigits);
+  if (f < 0 || f > 20) {
+    throw new $RangeError("toFixed() digits argument must be between 0 and 20");
+  }
+  var x = ToNumber(this);
+  return %NumberToFixed(x, f);
+}
+
+
+// ECMA-262 section 15.7.4.6
+function NumberToExponential(fractionDigits) {
+  var f = -1;
+  if (!IS_UNDEFINED(fractionDigits)) {
+    f = TO_INTEGER(fractionDigits);
+    if (f < 0 || f > 20) {
+      throw new $RangeError("toExponential() argument must be between 0 and 20");
+    }
+  }
+  var x = ToNumber(this);
+  return %NumberToExponential(x, f);
+}
+
+
+// ECMA-262 section 15.7.4.7
+function NumberToPrecision(precision) {
+  if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
+  var p = TO_INTEGER(precision);
+  if (p < 1 || p > 21) {
+    throw new $RangeError("toPrecision() argument must be between 1 and 21");
+  }
+  var x = ToNumber(this);
+  return %NumberToPrecision(x, p);
+}
+
+
+function CheckJSONPrimitive(val) {
+  if (!IsPrimitive(val))
+    throw MakeTypeError('result_not_primitive', ['toJSON', val]);
+  return val;
+}
+
+
+function NumberToJSON(key) {
+  return CheckJSONPrimitive(this.valueOf());
+}
+
+
+// ----------------------------------------------------------------------------
+
+function SetupNumber() {
+  %OptimizeObjectForAddingMultipleProperties($Number.prototype, 8);
+  // Setup the constructor property on the Number prototype object.
+  %SetProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
+
+  %OptimizeObjectForAddingMultipleProperties($Number, 5);
+  // ECMA-262 section 15.7.3.1.
+  %SetProperty($Number,
+               "MAX_VALUE",
+               1.7976931348623157e+308,
+               DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+  // ECMA-262 section 15.7.3.2.
+  %SetProperty($Number, "MIN_VALUE", 5e-324, DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+  // ECMA-262 section 15.7.3.3.
+  %SetProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+  // ECMA-262 section 15.7.3.4.
+  %SetProperty($Number,
+               "NEGATIVE_INFINITY",
+               -1/0,
+               DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+  // ECMA-262 section 15.7.3.5.
+  %SetProperty($Number,
+               "POSITIVE_INFINITY",
+               1/0,
+               DONT_ENUM | DONT_DELETE | READ_ONLY);
+  %TransformToFastProperties($Number);
+
+  // Setup non-enumerable functions on the Number prototype object.
+  InstallFunctions($Number.prototype, DONT_ENUM, $Array(
+    "toString", NumberToString,
+    "toLocaleString", NumberToLocaleString,
+    "valueOf", NumberValueOf,
+    "toFixed", NumberToFixed,
+    "toExponential", NumberToExponential,
+    "toPrecision", NumberToPrecision,
+    "toJSON", NumberToJSON
+  ));
+}
+
+SetupNumber();
+
+
+
+// ----------------------------------------------------------------------------
+// Function
+
+$Function.prototype.constructor = $Function;
+
+function FunctionSourceString(func) {
+  if (!IS_FUNCTION(func)) {
+    throw new $TypeError('Function.prototype.toString is not generic');
+  }
+
+  var source = %FunctionGetSourceCode(func);
+  if (!IS_STRING(source) || %FunctionIsBuiltin(func)) {
+    var name = %FunctionGetName(func);
+    if (name) {
+      // Mimic what KJS does.
+      return 'function ' + name + '() { [native code] }';
+    } else {
+      return 'function () { [native code] }';
+    }
+  }
+
+  var name = %FunctionGetName(func);
+  return 'function ' + name + source;
+}
+
+
+function FunctionToString() {
+  return FunctionSourceString(this);
+}
+
+
+function NewFunction(arg1) {  // length == 1
+  var n = %_ArgumentsLength();
+  var p = '';
+  if (n > 1) {
+    p = new $Array(n - 1);
+    // Explicitly convert all parameters to strings.
+    // Array.prototype.join replaces null with empty strings which is
+    // not appropriate.
+    for (var i = 0; i < n - 1; i++) p[i] = ToString(%_Arguments(i));
+    p = p.join(',');
+    // If the formal parameters string include ) - an illegal
+    // character - it may make the combined function expression
+    // compile. We avoid this problem by checking for this early on.
+    if (p.indexOf(')') != -1) throw MakeSyntaxError('unable_to_parse',[]);
+  }
+  var body = (n > 0) ? ToString(%_Arguments(n - 1)) : '';
+  var source = '(function(' + p + ') {\n' + body + '\n})';
+
+  // The call to SetNewFunctionAttributes will ensure the prototype
+  // property of the resulting function is enumerable (ECMA262, 15.3.5.2).
+  var f = %CompileString(source, false)();
+  %FunctionSetName(f, "anonymous");
+  return %SetNewFunctionAttributes(f);
+}
+
+%SetCode($Function, NewFunction);
+
+// ----------------------------------------------------------------------------
+
+function SetupFunction() {
+  InstallFunctions($Function.prototype, DONT_ENUM, $Array(
+    "toString", FunctionToString
+  ));
+}
+
+SetupFunction();
diff --git a/src/v8threads.cc b/src/v8threads.cc
new file mode 100644
index 0000000..80a7cd9
--- /dev/null
+++ b/src/v8threads.cc
@@ -0,0 +1,450 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "bootstrapper.h"
+#include "debug.h"
+#include "execution.h"
+#include "v8threads.h"
+#include "regexp-stack.h"
+
+namespace v8 {
+
+static internal::Thread::LocalStorageKey thread_state_key =
+    internal::Thread::CreateThreadLocalKey();
+static internal::Thread::LocalStorageKey thread_id_key =
+    internal::Thread::CreateThreadLocalKey();
+
+
+// Track whether this V8 instance has ever called v8::Locker. This allows the
+// API code to verify that the lock is always held when V8 is being entered.
+bool Locker::active_ = false;
+
+
+// Constructor for the Locker object.  Once the Locker is constructed the
+// current thread will be guaranteed to have the big V8 lock.
+Locker::Locker() : has_lock_(false), top_level_(true) {
+  // Record that the Locker has been used at least once.
+  active_ = true;
+  // Get the big lock if necessary.
+  if (!internal::ThreadManager::IsLockedByCurrentThread()) {
+    internal::ThreadManager::Lock();
+    has_lock_ = true;
+    // Make sure that V8 is initialized.  Archiving of threads interferes
+    // with deserialization by adding additional root pointers, so we must
+    // initialize here, before anyone can call ~Locker() or Unlocker().
+    if (!internal::V8::IsRunning()) {
+      V8::Initialize();
+    }
+    // This may be a locker within an unlocker in which case we have to
+    // get the saved state for this thread and restore it.
+    if (internal::ThreadManager::RestoreThread()) {
+      top_level_ = false;
+    } else {
+      internal::ExecutionAccess access;
+      internal::StackGuard::ClearThread(access);
+      internal::StackGuard::InitThread(access);
+    }
+  }
+  ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
+
+  // Make sure this thread is assigned a thread id.
+  internal::ThreadManager::AssignId();
+}
+
+
+bool Locker::IsLocked() {
+  return internal::ThreadManager::IsLockedByCurrentThread();
+}
+
+
+Locker::~Locker() {
+  ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
+  if (has_lock_) {
+    if (top_level_) {
+      internal::ThreadManager::FreeThreadResources();
+    } else {
+      internal::ThreadManager::ArchiveThread();
+    }
+    internal::ThreadManager::Unlock();
+  }
+}
+
+
+Unlocker::Unlocker() {
+  ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
+  internal::ThreadManager::ArchiveThread();
+  internal::ThreadManager::Unlock();
+}
+
+
+Unlocker::~Unlocker() {
+  ASSERT(!internal::ThreadManager::IsLockedByCurrentThread());
+  internal::ThreadManager::Lock();
+  internal::ThreadManager::RestoreThread();
+}
+
+
+void Locker::StartPreemption(int every_n_ms) {
+  v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
+}
+
+
+void Locker::StopPreemption() {
+  v8::internal::ContextSwitcher::StopPreemption();
+}
+
+
+namespace internal {
+
+
+bool ThreadManager::RestoreThread() {
+  // First check whether the current thread has been 'lazily archived', ie
+  // not archived at all.  If that is the case we put the state storage we
+  // had prepared back in the free list, since we didn't need it after all.
+  if (lazily_archived_thread_.IsSelf()) {
+    lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
+    ASSERT(Thread::GetThreadLocal(thread_state_key) ==
+           lazily_archived_thread_state_);
+    lazily_archived_thread_state_->set_id(kInvalidId);
+    lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
+    lazily_archived_thread_state_ = NULL;
+    Thread::SetThreadLocal(thread_state_key, NULL);
+    return true;
+  }
+
+  // Make sure that the preemption thread cannot modify the thread state while
+  // it is being archived or restored.
+  ExecutionAccess access;
+
+  // If there is another thread that was lazily archived then we have to really
+  // archive it now.
+  if (lazily_archived_thread_.IsValid()) {
+    EagerlyArchiveThread();
+  }
+  ThreadState* state =
+      reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
+  if (state == NULL) {
+    // This is a new thread.
+    StackGuard::InitThread(access);
+    return false;
+  }
+  char* from = state->data();
+  from = HandleScopeImplementer::RestoreThread(from);
+  from = Top::RestoreThread(from);
+  from = Relocatable::RestoreState(from);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  from = Debug::RestoreDebug(from);
+#endif
+  from = StackGuard::RestoreStackGuard(from);
+  from = RegExpStack::RestoreStack(from);
+  from = Bootstrapper::RestoreState(from);
+  Thread::SetThreadLocal(thread_state_key, NULL);
+  if (state->terminate_on_restore()) {
+    StackGuard::TerminateExecution();
+    state->set_terminate_on_restore(false);
+  }
+  state->set_id(kInvalidId);
+  state->Unlink();
+  state->LinkInto(ThreadState::FREE_LIST);
+  return true;
+}
+
+
+void ThreadManager::Lock() {
+  mutex_->Lock();
+  mutex_owner_.Initialize(ThreadHandle::SELF);
+  ASSERT(IsLockedByCurrentThread());
+}
+
+
+void ThreadManager::Unlock() {
+  mutex_owner_.Initialize(ThreadHandle::INVALID);
+  mutex_->Unlock();
+}
+
+
+static int ArchiveSpacePerThread() {
+  return HandleScopeImplementer::ArchiveSpacePerThread() +
+                            Top::ArchiveSpacePerThread() +
+#ifdef ENABLE_DEBUGGER_SUPPORT
+                          Debug::ArchiveSpacePerThread() +
+#endif
+                     StackGuard::ArchiveSpacePerThread() +
+                    RegExpStack::ArchiveSpacePerThread() +
+                   Bootstrapper::ArchiveSpacePerThread() +
+                    Relocatable::ArchiveSpacePerThread();
+}
+
+
+ThreadState* ThreadState::free_anchor_ = new ThreadState();
+ThreadState* ThreadState::in_use_anchor_ = new ThreadState();
+
+
+ThreadState::ThreadState() : id_(ThreadManager::kInvalidId),
+                             terminate_on_restore_(false),
+                             next_(this), previous_(this) {
+}
+
+
+void ThreadState::AllocateSpace() {
+  data_ = NewArray<char>(ArchiveSpacePerThread());
+}
+
+
+void ThreadState::Unlink() {
+  next_->previous_ = previous_;
+  previous_->next_ = next_;
+}
+
+
+void ThreadState::LinkInto(List list) {
+  ThreadState* flying_anchor =
+      list == FREE_LIST ? free_anchor_ : in_use_anchor_;
+  next_ = flying_anchor->next_;
+  previous_ = flying_anchor;
+  flying_anchor->next_ = this;
+  next_->previous_ = this;
+}
+
+
+ThreadState* ThreadState::GetFree() {
+  ThreadState* gotten = free_anchor_->next_;
+  if (gotten == free_anchor_) {
+    ThreadState* new_thread_state = new ThreadState();
+    new_thread_state->AllocateSpace();
+    return new_thread_state;
+  }
+  return gotten;
+}
+
+
+// Gets the first in the list of archived threads.
+ThreadState* ThreadState::FirstInUse() {
+  return in_use_anchor_->Next();
+}
+
+
+ThreadState* ThreadState::Next() {
+  if (next_ == in_use_anchor_) return NULL;
+  return next_;
+}
+
+
+// Thread ids must start with 1, because in TLS having thread id 0 can't
+// be distinguished from not having a thread id at all (since NULL is
+// defined as 0.)
+int ThreadManager::last_id_ = 0;
+Mutex* ThreadManager::mutex_ = OS::CreateMutex();
+ThreadHandle ThreadManager::mutex_owner_(ThreadHandle::INVALID);
+ThreadHandle ThreadManager::lazily_archived_thread_(ThreadHandle::INVALID);
+ThreadState* ThreadManager::lazily_archived_thread_state_ = NULL;
+
+
+void ThreadManager::ArchiveThread() {
+  ASSERT(!lazily_archived_thread_.IsValid());
+  ASSERT(!IsArchived());
+  ThreadState* state = ThreadState::GetFree();
+  state->Unlink();
+  Thread::SetThreadLocal(thread_state_key, reinterpret_cast<void*>(state));
+  lazily_archived_thread_.Initialize(ThreadHandle::SELF);
+  lazily_archived_thread_state_ = state;
+  ASSERT(state->id() == kInvalidId);
+  state->set_id(CurrentId());
+  ASSERT(state->id() != kInvalidId);
+}
+
+
+void ThreadManager::EagerlyArchiveThread() {
+  ThreadState* state = lazily_archived_thread_state_;
+  state->LinkInto(ThreadState::IN_USE_LIST);
+  char* to = state->data();
+  // Ensure that data containing GC roots are archived first, and handle them
+  // in ThreadManager::Iterate(ObjectVisitor*).
+  to = HandleScopeImplementer::ArchiveThread(to);
+  to = Top::ArchiveThread(to);
+  to = Relocatable::ArchiveState(to);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  to = Debug::ArchiveDebug(to);
+#endif
+  to = StackGuard::ArchiveStackGuard(to);
+  to = RegExpStack::ArchiveStack(to);
+  to = Bootstrapper::ArchiveState(to);
+  lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
+  lazily_archived_thread_state_ = NULL;
+}
+
+
+void ThreadManager::FreeThreadResources() {
+  HandleScopeImplementer::FreeThreadResources();
+  Top::FreeThreadResources();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  Debug::FreeThreadResources();
+#endif
+  StackGuard::FreeThreadResources();
+  RegExpStack::FreeThreadResources();
+  Bootstrapper::FreeThreadResources();
+}
+
+
+bool ThreadManager::IsArchived() {
+  return Thread::HasThreadLocal(thread_state_key);
+}
+
+
+void ThreadManager::Iterate(ObjectVisitor* v) {
+  // Expecting no threads during serialization/deserialization
+  for (ThreadState* state = ThreadState::FirstInUse();
+       state != NULL;
+       state = state->Next()) {
+    char* data = state->data();
+    data = HandleScopeImplementer::Iterate(v, data);
+    data = Top::Iterate(v, data);
+    data = Relocatable::Iterate(v, data);
+  }
+}
+
+
+void ThreadManager::MarkCompactPrologue(bool is_compacting) {
+  for (ThreadState* state = ThreadState::FirstInUse();
+       state != NULL;
+       state = state->Next()) {
+    char* data = state->data();
+    data += HandleScopeImplementer::ArchiveSpacePerThread();
+    Top::MarkCompactPrologue(is_compacting, data);
+  }
+}
+
+
+void ThreadManager::MarkCompactEpilogue(bool is_compacting) {
+  for (ThreadState* state = ThreadState::FirstInUse();
+       state != NULL;
+       state = state->Next()) {
+    char* data = state->data();
+    data += HandleScopeImplementer::ArchiveSpacePerThread();
+    Top::MarkCompactEpilogue(is_compacting, data);
+  }
+}
+
+
+int ThreadManager::CurrentId() {
+  return Thread::GetThreadLocalInt(thread_id_key);
+}
+
+
+void ThreadManager::AssignId() {
+  if (!HasId()) {
+    ASSERT(Locker::IsLocked());
+    int thread_id = ++last_id_;
+    ASSERT(thread_id > 0);  // see the comment near last_id_ definition.
+    Thread::SetThreadLocalInt(thread_id_key, thread_id);
+    Top::set_thread_id(thread_id);
+  }
+}
+
+
+bool ThreadManager::HasId() {
+  return Thread::HasThreadLocal(thread_id_key);
+}
+
+
+void ThreadManager::TerminateExecution(int thread_id) {
+  for (ThreadState* state = ThreadState::FirstInUse();
+       state != NULL;
+       state = state->Next()) {
+    if (thread_id == state->id()) {
+      state->set_terminate_on_restore(true);
+    }
+  }
+}
+
+
+// This is the ContextSwitcher singleton. There is at most a single thread
+// running which delivers preemption events to V8 threads.
+ContextSwitcher* ContextSwitcher::singleton_ = NULL;
+
+
+ContextSwitcher::ContextSwitcher(int every_n_ms)
+  : keep_going_(true),
+    sleep_ms_(every_n_ms) {
+}
+
+
+// Set the scheduling interval of V8 threads. This function starts the
+// ContextSwitcher thread if needed.
+void ContextSwitcher::StartPreemption(int every_n_ms) {
+  ASSERT(Locker::IsLocked());
+  if (singleton_ == NULL) {
+    // If the ContextSwitcher thread is not running at the moment start it now.
+    singleton_ = new ContextSwitcher(every_n_ms);
+    singleton_->Start();
+  } else {
+    // ContextSwitcher thread is already running, so we just change the
+    // scheduling interval.
+    singleton_->sleep_ms_ = every_n_ms;
+  }
+}
+
+
+// Disable preemption of V8 threads. If multiple threads want to use V8 they
+// must cooperatively schedule amongst them from this point on.
+void ContextSwitcher::StopPreemption() {
+  ASSERT(Locker::IsLocked());
+  if (singleton_ != NULL) {
+    // The ContextSwitcher thread is running. We need to stop it and release
+    // its resources.
+    singleton_->keep_going_ = false;
+    singleton_->Join();  // Wait for the ContextSwitcher thread to exit.
+    // Thread has exited, now we can delete it.
+    delete(singleton_);
+    singleton_ = NULL;
+  }
+}
+
+
+// Main loop of the ContextSwitcher thread: Preempt the currently running V8
+// thread at regular intervals.
+void ContextSwitcher::Run() {
+  while (keep_going_) {
+    OS::Sleep(sleep_ms_);
+    StackGuard::Preempt();
+  }
+}
+
+
+// Acknowledge the preemption by the receiving thread.
+void ContextSwitcher::PreemptionReceived() {
+  ASSERT(Locker::IsLocked());
+  // There is currently no accounting being done for this. But could be in the
+  // future, which is why we leave this in.
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/v8threads.h b/src/v8threads.h
new file mode 100644
index 0000000..0684053
--- /dev/null
+++ b/src/v8threads.h
@@ -0,0 +1,144 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_V8THREADS_H_
+#define V8_V8THREADS_H_
+
+namespace v8 {
+namespace internal {
+
+
+class ThreadState {
+ public:
+  // Iterate over in-use states.
+  static ThreadState* FirstInUse();
+  // Returns NULL after the last one.
+  ThreadState* Next();
+
+  enum List {FREE_LIST, IN_USE_LIST};
+
+  void LinkInto(List list);
+  void Unlink();
+
+  static ThreadState* GetFree();
+
+  // Id of thread.
+  void set_id(int id) { id_ = id; }
+  int id() { return id_; }
+
+  // Should the thread be terminated when it is restored?
+  bool terminate_on_restore() { return terminate_on_restore_; }
+  void set_terminate_on_restore(bool terminate_on_restore) {
+    terminate_on_restore_ = terminate_on_restore;
+  }
+
+  // Get data area for archiving a thread.
+  char* data() { return data_; }
+ private:
+  ThreadState();
+
+  void AllocateSpace();
+
+  int id_;
+  bool terminate_on_restore_;
+  char* data_;
+  ThreadState* next_;
+  ThreadState* previous_;
+
+  // In the following two lists there is always at least one object on the list.
+  // The first object is a flying anchor that is only there to simplify linking
+  // and unlinking.
+  // Head of linked list of free states.
+  static ThreadState* free_anchor_;
+  // Head of linked list of states in use.
+  static ThreadState* in_use_anchor_;
+};
+
+
+class ThreadManager : public AllStatic {
+ public:
+  static void Lock();
+  static void Unlock();
+
+  static void ArchiveThread();
+  static bool RestoreThread();
+  static void FreeThreadResources();
+  static bool IsArchived();
+
+  static void Iterate(ObjectVisitor* v);
+  static void MarkCompactPrologue(bool is_compacting);
+  static void MarkCompactEpilogue(bool is_compacting);
+  static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
+
+  static int CurrentId();
+  static void AssignId();
+  static bool HasId();
+
+  static void TerminateExecution(int thread_id);
+
+  static const int kInvalidId = -1;
+ private:
+  static void EagerlyArchiveThread();
+
+  static int last_id_;  // V8 threads are identified through an integer.
+  static Mutex* mutex_;
+  static ThreadHandle mutex_owner_;
+  static ThreadHandle lazily_archived_thread_;
+  static ThreadState* lazily_archived_thread_state_;
+};
+
+
+// The ContextSwitcher thread is used to schedule regular preemptions to
+// multiple running V8 threads. Generally it is necessary to call
+// StartPreemption if there is more than one thread running. If not, a single
+// JavaScript can take full control of V8 and not allow other threads to run.
+class ContextSwitcher: public Thread {
+ public:
+  // Set the preemption interval for the ContextSwitcher thread.
+  static void StartPreemption(int every_n_ms);
+
+  // Stop sending preemption requests to threads.
+  static void StopPreemption();
+
+  // Preempted thread needs to call back to the ContextSwitcher to acknowledge
+  // the handling of a preemption request.
+  static void PreemptionReceived();
+
+ private:
+  explicit ContextSwitcher(int every_n_ms);
+
+  void Run();
+
+  bool keep_going_;
+  int sleep_ms_;
+
+  static ContextSwitcher* singleton_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_V8THREADS_H_
diff --git a/src/variables.cc b/src/variables.cc
new file mode 100644
index 0000000..d9a78a5
--- /dev/null
+++ b/src/variables.cc
@@ -0,0 +1,163 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+#include "scopes.h"
+#include "variables.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Implementation UseCount.
+
+UseCount::UseCount()
+  : nreads_(0),
+    nwrites_(0) {
+}
+
+
+void UseCount::RecordRead(int weight) {
+  ASSERT(weight > 0);
+  nreads_ += weight;
+  // We must have a positive nreads_ here. Handle
+  // any kind of overflow by setting nreads_ to
+  // some large-ish value.
+  if (nreads_ <= 0) nreads_ = 1000000;
+  ASSERT(is_read() & is_used());
+}
+
+
+void UseCount::RecordWrite(int weight) {
+  ASSERT(weight > 0);
+  nwrites_ += weight;
+  // We must have a positive nwrites_ here. Handle
+  // any kind of overflow by setting nwrites_ to
+  // some large-ish value.
+  if (nwrites_ <= 0) nwrites_ = 1000000;
+  ASSERT(is_written() && is_used());
+}
+
+
+void UseCount::RecordAccess(int weight) {
+  RecordRead(weight);
+  RecordWrite(weight);
+}
+
+
+void UseCount::RecordUses(UseCount* uses) {
+  if (uses->nreads() > 0) RecordRead(uses->nreads());
+  if (uses->nwrites() > 0) RecordWrite(uses->nwrites());
+}
+
+
+#ifdef DEBUG
+void UseCount::Print() {
+  // PrintF("r = %d, w = %d", nreads_, nwrites_);
+  PrintF("%du = %dr + %dw", nuses(), nreads(), nwrites());
+}
+#endif
+
+
+// ----------------------------------------------------------------------------
+// Implementation SmiAnalysis.
+
+
+const char* SmiAnalysis::Type2String(SmiAnalysis* type) {
+  switch (type->kind_) {
+    case UNKNOWN:
+      return "UNKNOWN";
+    case LIKELY_SMI:
+      return "LIKELY_SMI";
+    default:
+      UNREACHABLE();
+  }
+  return "UNREACHABLE";
+}
+
+
+// ----------------------------------------------------------------------------
+// Implementation Variable.
+
+
+const char* Variable::Mode2String(Mode mode) {
+  switch (mode) {
+    case VAR: return "VAR";
+    case CONST: return "CONST";
+    case DYNAMIC: return "DYNAMIC";
+    case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
+    case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
+    case INTERNAL: return "INTERNAL";
+    case TEMPORARY: return "TEMPORARY";
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Property* Variable::AsProperty() {
+  return rewrite_ == NULL ? NULL : rewrite_->AsProperty();
+}
+
+
+Variable* Variable::AsVariable()  {
+  return rewrite_ == NULL || rewrite_->AsSlot() != NULL ? this : NULL;
+}
+
+
+Slot* Variable::slot() const {
+  return rewrite_ != NULL ? rewrite_->AsSlot() : NULL;
+}
+
+
+Variable::Variable(Scope* scope,
+                   Handle<String> name,
+                   Mode mode,
+                   bool is_valid_LHS,
+                   Kind kind)
+  : scope_(scope),
+    name_(name),
+    mode_(mode),
+    is_valid_LHS_(is_valid_LHS),
+    kind_(kind),
+    local_if_not_shadowed_(NULL),
+    is_accessed_from_inner_scope_(false),
+    rewrite_(NULL) {
+  // names must be canonicalized for fast equality checks
+  ASSERT(name->IsSymbol());
+}
+
+
+bool Variable::is_global() const {
+  // Temporaries are never global, they must always be allocated in the
+  // activation frame.
+  return mode_ != TEMPORARY && scope_ != NULL && scope_->is_global_scope();
+}
+
+} }  // namespace v8::internal
diff --git a/src/variables.h b/src/variables.h
new file mode 100644
index 0000000..ca78b5f
--- /dev/null
+++ b/src/variables.h
@@ -0,0 +1,235 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VARIABLES_H_
+#define V8_VARIABLES_H_
+
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+class UseCount BASE_EMBEDDED {
+ public:
+  UseCount();
+
+  // Inform the node of a "use". The weight can be used to indicate
+  // heavier use, for instance if the variable is accessed inside a loop.
+  void RecordRead(int weight);
+  void RecordWrite(int weight);
+  void RecordAccess(int weight);  // records a read & write
+  void RecordUses(UseCount* uses);
+
+  int nreads() const  { return nreads_; }
+  int nwrites() const  { return nwrites_; }
+  int nuses() const  { return nreads_ + nwrites_; }
+
+  bool is_read() const  { return nreads() > 0; }
+  bool is_written() const  { return nwrites() > 0; }
+  bool is_used() const  { return nuses() > 0; }
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+ private:
+  int nreads_;
+  int nwrites_;
+};
+
+
+// Variables and AST expression nodes can track their "type" to enable
+// optimizations and removal of redundant checks when generating code.
+
+class SmiAnalysis {
+ public:
+  enum Kind {
+    UNKNOWN,
+    LIKELY_SMI
+  };
+
+  SmiAnalysis() : kind_(UNKNOWN) {}
+
+  bool Is(Kind kind) const { return kind_ == kind; }
+
+  bool IsKnown() const { return !Is(UNKNOWN); }
+  bool IsUnknown() const { return Is(UNKNOWN); }
+  bool IsLikelySmi() const { return Is(LIKELY_SMI); }
+
+  void CopyFrom(SmiAnalysis* other) {
+    kind_ = other->kind_;
+  }
+
+  static const char* Type2String(SmiAnalysis* type);
+
+  // LIKELY_SMI accessors
+  void SetAsLikelySmi() {
+    kind_ = LIKELY_SMI;
+  }
+
+  void SetAsLikelySmiIfUnknown() {
+    if (IsUnknown()) {
+      SetAsLikelySmi();
+    }
+  }
+
+ private:
+  Kind kind_;
+
+  DISALLOW_COPY_AND_ASSIGN(SmiAnalysis);
+};
+
+
+// The AST refers to variables via VariableProxies - placeholders for the actual
+// variables. Variables themselves are never directly referred to from the AST,
+// they are maintained by scopes, and referred to from VariableProxies and Slots
+// after binding and variable allocation.
+
+class Variable: public ZoneObject {
+ public:
+  enum Mode {
+    // User declared variables:
+    VAR,       // declared via 'var', and 'function' declarations
+
+    CONST,     // declared via 'const' declarations
+
+    // Variables introduced by the compiler:
+    DYNAMIC,         // always require dynamic lookup (we don't know
+                     // the declaration)
+
+    DYNAMIC_GLOBAL,  // requires dynamic lookup, but we know that the
+                     // variable is global unless it has been shadowed
+                     // by an eval-introduced variable
+
+    DYNAMIC_LOCAL,   // requires dynamic lookup, but we know that the
+                     // variable is local and where it is unless it
+                     // has been shadowed by an eval-introduced
+                     // variable
+
+    INTERNAL,        // like VAR, but not user-visible (may or may not
+                     // be in a context)
+
+    TEMPORARY        // temporary variables (not user-visible), never
+                     // in a context
+  };
+
+  enum Kind {
+    NORMAL,
+    THIS,
+    ARGUMENTS
+  };
+
+  Variable(Scope* scope,
+           Handle<String> name,
+           Mode mode,
+           bool is_valid_lhs,
+           Kind kind);
+
+  // Printing support
+  static const char* Mode2String(Mode mode);
+
+  // Type testing & conversion
+  Property* AsProperty();
+  Variable* AsVariable();
+  bool IsValidLeftHandSide() { return is_valid_LHS_; }
+
+  // The source code for an eval() call may refer to a variable that is
+  // in an outer scope about which we don't know anything (it may not
+  // be the global scope). scope() is NULL in that case. Currently the
+  // scope is only used to follow the context chain length.
+  Scope* scope() const  { return scope_; }
+
+  Handle<String> name() const  { return name_; }
+  Mode mode() const  { return mode_; }
+  bool is_accessed_from_inner_scope() const  {
+    return is_accessed_from_inner_scope_;
+  }
+  UseCount* var_uses()  { return &var_uses_; }
+  UseCount* obj_uses()  { return &obj_uses_; }
+
+  bool IsVariable(Handle<String> n) const {
+    return !is_this() && name().is_identical_to(n);
+  }
+
+  bool is_dynamic() const {
+    return (mode_ == DYNAMIC ||
+            mode_ == DYNAMIC_GLOBAL ||
+            mode_ == DYNAMIC_LOCAL);
+  }
+
+  bool is_global() const;
+  bool is_this() const { return kind_ == THIS; }
+  bool is_arguments() const { return kind_ == ARGUMENTS; }
+
+  // True if the variable is named eval and not known to be shadowed.
+  bool is_possibly_eval() const {
+    return IsVariable(Factory::eval_symbol()) &&
+        (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
+  }
+
+  Variable* local_if_not_shadowed() const {
+    ASSERT(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
+    return local_if_not_shadowed_;
+  }
+
+  void set_local_if_not_shadowed(Variable* local) {
+    local_if_not_shadowed_ = local;
+  }
+
+  Expression* rewrite() const  { return rewrite_; }
+  Slot* slot() const;
+
+  SmiAnalysis* type() { return &type_; }
+
+ private:
+  Scope* scope_;
+  Handle<String> name_;
+  Mode mode_;
+  bool is_valid_LHS_;
+  Kind kind_;
+
+  Variable* local_if_not_shadowed_;
+
+  // Usage info.
+  bool is_accessed_from_inner_scope_;  // set by variable resolver
+  UseCount var_uses_;  // uses of the variable value
+  UseCount obj_uses_;  // uses of the object the variable points to
+
+  // Static type information
+  SmiAnalysis type_;
+
+  // Code generation.
+  // rewrite_ is usually a Slot or a Property, but may be any expression.
+  Expression* rewrite_;
+
+  friend class Scope;  // Has explicit access to rewrite_.
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_VARIABLES_H_
diff --git a/src/version.cc b/src/version.cc
new file mode 100644
index 0000000..a36e17c
--- /dev/null
+++ b/src/version.cc
@@ -0,0 +1,88 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "version.h"
+
+// These macros define the version number for the current version.
+// NOTE these macros are used by the SCons build script so their names
+// cannot be changed without changing the SCons build script.
+#define MAJOR_VERSION     1
+#define MINOR_VERSION     3
+#define BUILD_NUMBER      14
+#define PATCH_LEVEL       0
+#define CANDIDATE_VERSION true
+
+// Define SONAME to have the SCons build the put a specific SONAME into the
+// shared library instead the generic SONAME generated from the V8 version
+// number. This define is mainly used by the SCons build script.
+#define SONAME            ""
+
+namespace v8 {
+namespace internal {
+
+int Version::major_ = MAJOR_VERSION;
+int Version::minor_ = MINOR_VERSION;
+int Version::build_ = BUILD_NUMBER;
+int Version::patch_ = PATCH_LEVEL;
+bool Version::candidate_ = CANDIDATE_VERSION;
+const char* Version::soname_ = SONAME;
+
+
+// Calculate the V8 version string.
+void Version::GetString(Vector<char> str) {
+  const char* candidate = IsCandidate() ? " (candidate)" : "";
+  if (GetPatch() > 0) {
+    OS::SNPrintF(str, "%d.%d.%d.%d%s",
+                 GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate);
+  } else {
+    OS::SNPrintF(str, "%d.%d.%d%s",
+                 GetMajor(), GetMinor(), GetBuild(), candidate);
+  }
+}
+
+
+// Calculate the SONAME for the V8 shared library.
+void Version::GetSONAME(Vector<char> str) {
+  if (soname_ == NULL || *soname_ == '\0') {
+    // Generate generic SONAME if no specific SONAME is defined.
+    const char* candidate = IsCandidate() ? "-candidate" : "";
+    if (GetPatch() > 0) {
+      OS::SNPrintF(str, "libv8-%d.%d.%d.%d%s.so",
+                   GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate);
+    } else {
+      OS::SNPrintF(str, "libv8-%d.%d.%d%s.so",
+                   GetMajor(), GetMinor(), GetBuild(), candidate);
+    }
+  } else {
+    // Use specific SONAME.
+    OS::SNPrintF(str, "%s", soname_);
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/version.h b/src/version.h
new file mode 100644
index 0000000..c322a2f
--- /dev/null
+++ b/src/version.h
@@ -0,0 +1,64 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VERSION_H_
+#define V8_VERSION_H_
+
+namespace v8 {
+namespace internal {
+
+class Version {
+ public:
+  // Return the various version components.
+  static int GetMajor() { return major_; }
+  static int GetMinor() { return minor_; }
+  static int GetBuild() { return build_; }
+  static int GetPatch() { return patch_; }
+  static bool IsCandidate() { return candidate_; }
+
+  // Calculate the V8 version string.
+  static void GetString(Vector<char> str);
+
+  // Calculate the SONAME for the V8 shared library.
+  static void GetSONAME(Vector<char> str);
+
+ private:
+  static int major_;
+  static int minor_;
+  static int build_;
+  static int patch_;
+  static bool candidate_;
+  static const char* soname_;
+
+  // In test-version.cc.
+  friend void SetVersion(int major, int minor, int build, int patch,
+                         bool candidate, const char* soname);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_VERSION_H_
diff --git a/src/virtual-frame.cc b/src/virtual-frame.cc
new file mode 100644
index 0000000..44e5fae
--- /dev/null
+++ b/src/virtual-frame.cc
@@ -0,0 +1,381 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+// When cloned, a frame is a deep copy of the original.
+VirtualFrame::VirtualFrame(VirtualFrame* original)
+    : elements_(original->element_count()),
+      stack_pointer_(original->stack_pointer_) {
+  elements_.AddAll(original->elements_);
+  // Copy register locations from original.
+  memcpy(&register_locations_,
+         original->register_locations_,
+         sizeof(register_locations_));
+}
+
+
+FrameElement VirtualFrame::CopyElementAt(int index) {
+  ASSERT(index >= 0);
+  ASSERT(index < element_count());
+
+  FrameElement target = elements_[index];
+  FrameElement result;
+
+  switch (target.type()) {
+    case FrameElement::CONSTANT:
+      // We do not copy constants and instead return a fresh unsynced
+      // constant.
+      result = FrameElement::ConstantElement(target.handle(),
+                                             FrameElement::NOT_SYNCED);
+      break;
+
+    case FrameElement::COPY:
+      // We do not allow copies of copies, so we follow one link to
+      // the actual backing store of a copy before making a copy.
+      index = target.index();
+      ASSERT(elements_[index].is_memory() || elements_[index].is_register());
+      // Fall through.
+
+    case FrameElement::MEMORY:  // Fall through.
+    case FrameElement::REGISTER:
+      // All copies are backed by memory or register locations.
+      result.set_type(FrameElement::COPY);
+      result.clear_copied();
+      result.clear_sync();
+      result.set_index(index);
+      elements_[index].set_copied();
+      break;
+
+    case FrameElement::INVALID:
+      // We should not try to copy invalid elements.
+      UNREACHABLE();
+      break;
+  }
+  return result;
+}
+
+
+// Modify the state of the virtual frame to match the actual frame by adding
+// extra in-memory elements to the top of the virtual frame.  The extra
+// elements will be externally materialized on the actual frame (eg, by
+// pushing an exception handler).  No code is emitted.
+void VirtualFrame::Adjust(int count) {
+  ASSERT(count >= 0);
+  ASSERT(stack_pointer_ == element_count() - 1);
+
+  for (int i = 0; i < count; i++) {
+    elements_.Add(FrameElement::MemoryElement());
+  }
+  stack_pointer_ += count;
+}
+
+
+void VirtualFrame::ForgetElements(int count) {
+  ASSERT(count >= 0);
+  ASSERT(element_count() >= count);
+
+  for (int i = 0; i < count; i++) {
+    FrameElement last = elements_.RemoveLast();
+    if (last.is_register()) {
+      // A hack to properly count register references for the code
+      // generator's current frame and also for other frames.  The
+      // same code appears in PrepareMergeTo.
+      if (cgen()->frame() == this) {
+        Unuse(last.reg());
+      } else {
+        set_register_location(last.reg(), kIllegalIndex);
+      }
+    }
+  }
+}
+
+
+// If there are any registers referenced only by the frame, spill one.
+Register VirtualFrame::SpillAnyRegister() {
+  // Find the leftmost (ordered by register number) register whose only
+  // reference is in the frame.
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    if (is_used(i) && cgen()->allocator()->count(i) == 1) {
+      SpillElementAt(register_location(i));
+      ASSERT(!cgen()->allocator()->is_used(i));
+      return RegisterAllocator::ToRegister(i);
+    }
+  }
+  return no_reg;
+}
+
+
+// Make the type of the element at a given index be MEMORY.
+void VirtualFrame::SpillElementAt(int index) {
+  if (!elements_[index].is_valid()) return;
+
+  SyncElementAt(index);
+  // The element is now in memory.  Its copied flag is preserved.
+  FrameElement new_element = FrameElement::MemoryElement();
+  if (elements_[index].is_copied()) {
+    new_element.set_copied();
+  }
+  if (elements_[index].is_register()) {
+    Unuse(elements_[index].reg());
+  }
+  elements_[index] = new_element;
+}
+
+
+// Clear the dirty bit for the element at a given index.
+void VirtualFrame::SyncElementAt(int index) {
+  if (index <= stack_pointer_) {
+    if (!elements_[index].is_synced()) SyncElementBelowStackPointer(index);
+  } else if (index == stack_pointer_ + 1) {
+    SyncElementByPushing(index);
+  } else {
+    SyncRange(stack_pointer_ + 1, index);
+  }
+}
+
+
+// Make the type of all elements be MEMORY.
+void VirtualFrame::SpillAll() {
+  for (int i = 0; i < element_count(); i++) {
+    SpillElementAt(i);
+  }
+}
+
+
+void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
+  // Perform state changes on this frame that will make merge to the
+  // expected frame simpler or else increase the likelihood that his
+  // frame will match another.
+  for (int i = 0; i < element_count(); i++) {
+    FrameElement source = elements_[i];
+    FrameElement target = expected->elements_[i];
+
+    if (!target.is_valid() ||
+        (target.is_memory() && !source.is_memory() && source.is_synced())) {
+      // No code needs to be generated to invalidate valid elements.
+      // No code needs to be generated to move values to memory if
+      // they are already synced.  We perform those moves here, before
+      // merging.
+      if (source.is_register()) {
+        // If the frame is the code generator's current frame, we have
+        // to decrement both the frame-internal and global register
+        // counts.
+        if (cgen()->frame() == this) {
+          Unuse(source.reg());
+        } else {
+          set_register_location(source.reg(), kIllegalIndex);
+        }
+      }
+      elements_[i] = target;
+    } else if (target.is_register() && !target.is_synced() &&
+               !source.is_memory()) {
+      // If an element's target is a register that doesn't need to be
+      // synced, and the element is not in memory, then the sync state
+      // of the element is irrelevant.  We clear the sync bit.
+      ASSERT(source.is_valid());
+      elements_[i].clear_sync();
+    }
+  }
+}
+
+
+void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
+  ASSERT(height() >= dropped_args);
+  ASSERT(height() >= spilled_args);
+  ASSERT(dropped_args <= spilled_args);
+
+  SyncRange(0, element_count() - 1);
+  // Spill registers.
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    if (is_used(i)) {
+      SpillElementAt(register_location(i));
+    }
+  }
+
+  // Spill the arguments.
+  for (int i = element_count() - spilled_args; i < element_count(); i++) {
+    if (!elements_[i].is_memory()) {
+      SpillElementAt(i);
+    }
+  }
+
+  // Forget the frame elements that will be popped by the call.
+  Forget(dropped_args);
+}
+
+
+void VirtualFrame::PrepareForReturn() {
+  // Spill all locals. This is necessary to make sure all locals have
+  // the right value when breaking at the return site in the debugger.
+  for (int i = 0; i < expression_base_index(); i++) {
+    SpillElementAt(i);
+  }
+}
+
+
+void VirtualFrame::SetElementAt(int index, Result* value) {
+  int frame_index = element_count() - index - 1;
+  ASSERT(frame_index >= 0);
+  ASSERT(frame_index < element_count());
+  ASSERT(value->is_valid());
+  FrameElement original = elements_[frame_index];
+
+  // Early exit if the element is the same as the one being set.
+  bool same_register = original.is_register()
+      && value->is_register()
+      && original.reg().is(value->reg());
+  bool same_constant = original.is_constant()
+      && value->is_constant()
+      && original.handle().is_identical_to(value->handle());
+  if (same_register || same_constant) {
+    value->Unuse();
+    return;
+  }
+
+  InvalidateFrameSlotAt(frame_index);
+
+  FrameElement new_element;
+  if (value->is_register()) {
+    if (is_used(value->reg())) {
+      // The register already appears on the frame.  Either the existing
+      // register element, or the new element at frame_index, must be made
+      // a copy.
+      int i = register_location(value->reg());
+
+      if (i < frame_index) {
+        // The register FrameElement is lower in the frame than the new copy.
+        elements_[frame_index] = CopyElementAt(i);
+      } else {
+        // There was an early bailout for the case of setting a
+        // register element to itself.
+        ASSERT(i != frame_index);
+        elements_[frame_index] = elements_[i];
+        elements_[i] = CopyElementAt(frame_index);
+        if (elements_[frame_index].is_synced()) {
+          elements_[i].set_sync();
+        }
+        elements_[frame_index].clear_sync();
+        set_register_location(value->reg(), frame_index);
+        for (int j = i + 1; j < element_count(); j++) {
+          if (elements_[j].is_copy() && elements_[j].index() == i) {
+            elements_[j].set_index(frame_index);
+          }
+        }
+      }
+    } else {
+      // The register value->reg() was not already used on the frame.
+      Use(value->reg(), frame_index);
+      elements_[frame_index] =
+          FrameElement::RegisterElement(value->reg(),
+                                        FrameElement::NOT_SYNCED);
+    }
+  } else {
+    ASSERT(value->is_constant());
+    elements_[frame_index] =
+        FrameElement::ConstantElement(value->handle(),
+                                      FrameElement::NOT_SYNCED);
+  }
+  value->Unuse();
+}
+
+
+void VirtualFrame::PushFrameSlotAt(int index) {
+  elements_.Add(CopyElementAt(index));
+}
+
+
+void VirtualFrame::Push(Register reg) {
+  if (is_used(reg)) {
+    int index = register_location(reg);
+    FrameElement element = CopyElementAt(index);
+    elements_.Add(element);
+  } else {
+    Use(reg, element_count());
+    FrameElement element =
+        FrameElement::RegisterElement(reg,
+                                      FrameElement::NOT_SYNCED);
+    elements_.Add(element);
+  }
+}
+
+
+void VirtualFrame::Push(Handle<Object> value) {
+  FrameElement element =
+      FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
+  elements_.Add(element);
+}
+
+
+void VirtualFrame::Nip(int num_dropped) {
+  ASSERT(num_dropped >= 0);
+  if (num_dropped == 0) return;
+  Result tos = Pop();
+  if (num_dropped > 1) {
+    Drop(num_dropped - 1);
+  }
+  SetElementAt(0, &tos);
+}
+
+
+bool VirtualFrame::Equals(VirtualFrame* other) {
+#ifdef DEBUG
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    if (register_location(i) != other->register_location(i)) {
+      return false;
+    }
+  }
+  if (element_count() != other->element_count()) return false;
+#endif
+  if (stack_pointer_ != other->stack_pointer_) return false;
+  for (int i = 0; i < element_count(); i++) {
+    if (!elements_[i].Equals(other->elements_[i])) return false;
+  }
+
+  return true;
+}
+
+
+// Specialization of List::ResizeAdd to non-inlined version for FrameElements.
+// The function ResizeAdd becomes a real function, whose implementation is the
+// inlined ResizeAddInternal.
+template <>
+void List<FrameElement,
+          FreeStoreAllocationPolicy>::ResizeAdd(const FrameElement& element) {
+  ResizeAddInternal(element);
+}
+
+} }  // namespace v8::internal
diff --git a/src/virtual-frame.h b/src/virtual-frame.h
new file mode 100644
index 0000000..0bf0ca2
--- /dev/null
+++ b/src/virtual-frame.h
@@ -0,0 +1,44 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VIRTUAL_FRAME_H_
+#define V8_VIRTUAL_FRAME_H_
+
+#include "frame-element.h"
+#include "macro-assembler.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/virtual-frame-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/virtual-frame-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/virtual-frame-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+#endif  // V8_VIRTUAL_FRAME_H_
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
new file mode 100644
index 0000000..f51a3ea
--- /dev/null
+++ b/src/x64/assembler-x64-inl.h
@@ -0,0 +1,314 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_ASSEMBLER_X64_INL_H_
+#define V8_X64_ASSEMBLER_X64_INL_H_
+
+#include "cpu.h"
+#include "memory.h"
+
+namespace v8 {
+namespace internal {
+
+Condition NegateCondition(Condition cc) {
+  return static_cast<Condition>(cc ^ 1);
+}
+
+// -----------------------------------------------------------------------------
+
+Immediate::Immediate(Smi* value) {
+  value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+
+
+void Assembler::emitl(uint32_t x) {
+  Memory::uint32_at(pc_) = x;
+  pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
+  Memory::uint64_at(pc_) = x;
+  if (rmode != RelocInfo::NONE) {
+    RecordRelocInfo(rmode, x);
+  }
+  pc_ += sizeof(uint64_t);
+}
+
+
+void Assembler::emitw(uint16_t x) {
+  Memory::uint16_at(pc_) = x;
+  pc_ += sizeof(uint16_t);
+}
+
+
+void Assembler::emit_rex_64(Register reg, Register rm_reg) {
+  emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
+}
+
+
+void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
+  emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+}
+
+
+void Assembler::emit_rex_64(Register reg, const Operand& op) {
+  emit(0x48 | reg.high_bit() << 2 | op.rex_);
+}
+
+
+void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
+  emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
+}
+
+
+void Assembler::emit_rex_64(Register rm_reg) {
+  ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
+  emit(0x48 | rm_reg.high_bit());
+}
+
+
+void Assembler::emit_rex_64(const Operand& op) {
+  emit(0x48 | op.rex_);
+}
+
+
+void Assembler::emit_rex_32(Register reg, Register rm_reg) {
+  emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
+}
+
+
+void Assembler::emit_rex_32(Register reg, const Operand& op) {
+  emit(0x40 | reg.high_bit() << 2  | op.rex_);
+}
+
+
+void Assembler::emit_rex_32(Register rm_reg) {
+  emit(0x40 | rm_reg.high_bit());
+}
+
+
+void Assembler::emit_rex_32(const Operand& op) {
+  emit(0x40 | op.rex_);
+}
+
+
+void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
+  byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
+  if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
+  byte rex_bits =  reg.high_bit() << 2 | op.rex_;
+  if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
+  byte rex_bits =  (reg.code() & 0x8) >> 1 | op.rex_;
+  if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
+  byte rex_bits =  (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
+  if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
+  byte rex_bits =  (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
+  if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(Register rm_reg) {
+  if (rm_reg.high_bit()) emit(0x41);
+}
+
+
+void Assembler::emit_optional_rex_32(const Operand& op) {
+  if (op.rex_ != 0) emit(0x40 | op.rex_);
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+  return Memory::Address_at(pc);
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+  Memory::Address_at(pc) = target;
+  CPU::FlushICache(pc, sizeof(intptr_t));
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+// The modes possibly affected by apply must be in kApplyMask.
+void RelocInfo::apply(intptr_t delta) {
+  if (IsInternalReference(rmode_)) {
+    // absolute code pointer inside code object moves with the code object.
+    intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
+    *p += delta;  // relocate entry
+  }
+}
+
+
+Address RelocInfo::target_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(pc_);
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return *reinterpret_cast<Object**>(pc_);
+}
+
+
+Object** RelocInfo::target_object_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object**>(pc_);
+}
+
+
+Address* RelocInfo::target_reference_address() {
+  ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+  return reinterpret_cast<Address*>(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  *reinterpret_cast<Object**>(pc_) = target;
+}
+
+
+bool RelocInfo::IsCallInstruction() {
+  // The recognized call sequence is:
+  //  movq(kScratchRegister, immediate64); call(kScratchRegister);
+  // It only needs to be distinguished from a return sequence
+  //  movq(rsp, rbp); pop(rbp); ret(n); int3 *6
+  // The 11th byte is int3 (0xCC) in the return sequence and
+  // REX.WB (0x48+register bit) for the call sequence.
+  return pc_[10] != 0xCC;
+}
+
+
+Address RelocInfo::call_address() {
+  ASSERT(IsCallInstruction());
+  return Assembler::target_address_at(
+      pc_ + Assembler::kPatchReturnSequenceAddressOffset);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+  ASSERT(IsCallInstruction());
+  Assembler::set_target_address_at(
+      pc_ + Assembler::kPatchReturnSequenceAddressOffset,
+      target);
+}
+
+
+Object* RelocInfo::call_object() {
+  ASSERT(IsCallInstruction());
+  return *call_object_address();
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+  ASSERT(IsCallInstruction());
+  *call_object_address() = target;
+}
+
+
+Object** RelocInfo::call_object_address() {
+  ASSERT(IsCallInstruction());
+  return reinterpret_cast<Object**>(
+      pc_ + Assembler::kPatchReturnSequenceAddressOffset);
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+void Operand::set_modrm(int mod, Register rm_reg) {
+  ASSERT(is_uint2(mod));
+  buf_[0] = mod << 6 | rm_reg.low_bits();
+  // Set REX.B to the high bit of rm.code().
+  rex_ |= rm_reg.high_bit();
+}
+
+
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+  ASSERT(len_ == 1);
+  ASSERT(is_uint2(scale));
+  // Use SIB with no index register only for base rsp or r12. Otherwise we
+  // would skip the SIB byte entirely.
+  ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
+  buf_[1] = scale << 6 | index.low_bits() << 3 | base.low_bits();
+  rex_ |= index.high_bit() << 1 | base.high_bit();
+  len_ = 2;
+}
+
+void Operand::set_disp8(int disp) {
+  ASSERT(is_int8(disp));
+  ASSERT(len_ == 1 || len_ == 2);
+  int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
+  *p = disp;
+  len_ += sizeof(int8_t);
+}
+
+void Operand::set_disp32(int disp) {
+  ASSERT(len_ == 1 || len_ == 2);
+  int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
+  *p = disp;
+  len_ += sizeof(int32_t);
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
new file mode 100644
index 0000000..b4204a9
--- /dev/null
+++ b/src/x64/assembler-x64.cc
@@ -0,0 +1,2393 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Implementation of Register
+
+Register rax = { 0 };
+Register rcx = { 1 };
+Register rdx = { 2 };
+Register rbx = { 3 };
+Register rsp = { 4 };
+Register rbp = { 5 };
+Register rsi = { 6 };
+Register rdi = { 7 };
+Register r8 = { 8 };
+Register r9 = { 9 };
+Register r10 = { 10 };
+Register r11 = { 11 };
+Register r12 = { 12 };
+Register r13 = { 13 };
+Register r14 = { 14 };
+Register r15 = { 15 };
+
+Register no_reg = { -1 };
+
+XMMRegister xmm0 = { 0 };
+XMMRegister xmm1 = { 1 };
+XMMRegister xmm2 = { 2 };
+XMMRegister xmm3 = { 3 };
+XMMRegister xmm4 = { 4 };
+XMMRegister xmm5 = { 5 };
+XMMRegister xmm6 = { 6 };
+XMMRegister xmm7 = { 7 };
+XMMRegister xmm8 = { 8 };
+XMMRegister xmm9 = { 9 };
+XMMRegister xmm10 = { 10 };
+XMMRegister xmm11 = { 11 };
+XMMRegister xmm12 = { 12 };
+XMMRegister xmm13 = { 13 };
+XMMRegister xmm14 = { 14 };
+XMMRegister xmm15 = { 15 };
+
+
+// -----------------------------------------------------------------------------
+// Implementation of CpuFeatures
+
+// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
+//   fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
+uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
+uint64_t CpuFeatures::enabled_ = 0;
+
+void CpuFeatures::Probe()  {
+  ASSERT(Heap::HasBeenSetup());
+  ASSERT(supported_ == kDefaultCpuFeatures);
+  if (Serializer::enabled()) return;  // No features if we might serialize.
+
+  Assembler assm(NULL, 0);
+  Label cpuid, done;
+#define __ assm.
+  // Save old rsp, since we are going to modify the stack.
+  __ push(rbp);
+  __ pushfq();
+  __ push(rcx);
+  __ push(rbx);
+  __ movq(rbp, rsp);
+
+  // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
+  __ pushfq();
+  __ pop(rax);
+  __ movq(rdx, rax);
+  __ xor_(rax, Immediate(0x200000));  // Flip bit 21.
+  __ push(rax);
+  __ popfq();
+  __ pushfq();
+  __ pop(rax);
+  __ xor_(rax, rdx);  // Different if CPUID is supported.
+  __ j(not_zero, &cpuid);
+
+  // CPUID not supported. Clear the supported features in edx:eax.
+  __ xor_(rax, rax);
+  __ jmp(&done);
+
+  // Invoke CPUID with 1 in eax to get feature information in
+  // ecx:edx. Temporarily enable CPUID support because we know it's
+  // safe here.
+  __ bind(&cpuid);
+  __ movq(rax, Immediate(1));
+  supported_ = kDefaultCpuFeatures | (1 << CPUID);
+  { Scope fscope(CPUID);
+    __ cpuid();
+    // Move the result from ecx:edx to rdi.
+    __ movl(rdi, rdx);  // Zero-extended to 64 bits.
+    __ shl(rcx, Immediate(32));
+    __ or_(rdi, rcx);
+
+    // Get the sahf supported flag, from CPUID(0x80000001)
+    __ movq(rax, 0x80000001, RelocInfo::NONE);
+    __ cpuid();
+  }
+  supported_ = kDefaultCpuFeatures;
+
+  // Put the CPU flags in rax.
+  // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID).
+  __ movl(rax, Immediate(1));
+  __ and_(rcx, rax);  // Bit 0 is set if SAHF instruction supported.
+  __ not_(rax);
+  __ and_(rax, rdi);
+  __ or_(rax, rcx);
+  __ or_(rax, Immediate(1 << CPUID));
+
+  // Done.
+  __ bind(&done);
+  __ movq(rsp, rbp);
+  __ pop(rbx);
+  __ pop(rcx);
+  __ popfq();
+  __ pop(rbp);
+  __ ret(0);
+#undef __
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Object* code =
+      Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
+  if (!code->IsCode()) return;
+  LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
+                      Code::cast(code), "CpuFeatures::Probe"));
+  typedef uint64_t (*F0)();
+  F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+  supported_ = probe();
+  // SSE2 and CMOV must be available on an X64 CPU.
+  ASSERT(IsSupported(CPUID));
+  ASSERT(IsSupported(SSE2));
+  ASSERT(IsSupported(CMOV));
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+  // Load register with immediate 64 and call through a register instructions
+  // takes up 13 bytes and int3 takes up one byte.
+  static const int kCallCodeSize = 13;
+  int code_size = kCallCodeSize + guard_bytes;
+
+  // Create a code patcher.
+  CodePatcher patcher(pc_, code_size);
+
+  // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+  Label check_codesize;
+  patcher.masm()->bind(&check_codesize);
+#endif
+
+  // Patch the code.
+  patcher.masm()->movq(r10, target, RelocInfo::NONE);
+  patcher.masm()->call(r10);
+
+  // Check that the size of the code generated is as expected.
+  ASSERT_EQ(kCallCodeSize,
+            patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+
+  // Add the requested number of int3 instructions after the call.
+  for (int i = 0; i < guard_bytes; i++) {
+    patcher.masm()->int3();
+  }
+}
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+  // Patch the code at the current address with the supplied instructions.
+  for (int i = 0; i < instruction_count; i++) {
+    *(pc_ + i) = *(instructions + i);
+  }
+
+  // Indicate that code has changed.
+  CPU::FlushICache(pc_, instruction_count);
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp): rex_(0) {
+  len_ = 1;
+  if (base.is(rsp) || base.is(r12)) {
+    // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+    set_sib(times_1, rsp, base);
+  }
+
+  if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+    set_modrm(0, base);
+  } else if (is_int8(disp)) {
+    set_modrm(1, base);
+    set_disp8(disp);
+  } else {
+    set_modrm(2, base);
+    set_disp32(disp);
+  }
+}
+
+
+Operand::Operand(Register base,
+                 Register index,
+                 ScaleFactor scale,
+                 int32_t disp): rex_(0) {
+  ASSERT(!index.is(rsp));
+  len_ = 1;
+  set_sib(scale, index, base);
+  if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+    // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
+    // possibly set by set_sib.
+    set_modrm(0, rsp);
+  } else if (is_int8(disp)) {
+    set_modrm(1, rsp);
+    set_disp8(disp);
+  } else {
+    set_modrm(2, rsp);
+    set_disp32(disp);
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+#ifdef GENERATED_CODE_COVERAGE
+static void InitCoverageLog();
+#endif
+
+byte* Assembler::spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+  if (buffer == NULL) {
+    // do our own buffer management
+    if (buffer_size <= kMinimalBufferSize) {
+      buffer_size = kMinimalBufferSize;
+
+      if (spare_buffer_ != NULL) {
+        buffer = spare_buffer_;
+        spare_buffer_ = NULL;
+      }
+    }
+    if (buffer == NULL) {
+      buffer_ = NewArray<byte>(buffer_size);
+    } else {
+      buffer_ = static_cast<byte*>(buffer);
+    }
+    buffer_size_ = buffer_size;
+    own_buffer_ = true;
+  } else {
+    // use externally provided buffer instead
+    ASSERT(buffer_size > 0);
+    buffer_ = static_cast<byte*>(buffer);
+    buffer_size_ = buffer_size;
+    own_buffer_ = false;
+  }
+
+  // Clear the buffer in debug mode unless it was provided by the
+  // caller in which case we can't be sure it's okay to overwrite
+  // existing code in it.
+#ifdef DEBUG
+  if (own_buffer_) {
+    memset(buffer_, 0xCC, buffer_size);  // int3
+  }
+#endif
+
+  // setup buffer pointers
+  ASSERT(buffer_ != NULL);
+  pc_ = buffer_;
+  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+
+  last_pc_ = NULL;
+  current_statement_position_ = RelocInfo::kNoPosition;
+  current_position_ = RelocInfo::kNoPosition;
+  written_statement_position_ = current_statement_position_;
+  written_position_ = current_position_;
+#ifdef GENERATED_CODE_COVERAGE
+  InitCoverageLog();
+#endif
+}
+
+
+Assembler::~Assembler() {
+  if (own_buffer_) {
+    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+      spare_buffer_ = buffer_;
+    } else {
+      DeleteArray(buffer_);
+    }
+  }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+  // finalize code
+  // (at this point overflow() may be true, but the gap ensures that
+  // we are still not overlapping instructions and relocation info)
+  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
+  // setup desc
+  desc->buffer = buffer_;
+  desc->buffer_size = buffer_size_;
+  desc->instr_size = pc_offset();
+  ASSERT(desc->instr_size > 0);  // Zero-size code objects upset the system.
+  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+  desc->origin = this;
+
+  Counters::reloc_info_size.Increment(desc->reloc_size);
+}
+
+
+void Assembler::Align(int m) {
+  ASSERT(IsPowerOf2(m));
+  while ((pc_offset() & (m - 1)) != 0) {
+    nop();
+  }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+  ASSERT(!L->is_bound());  // Label may only be bound once.
+  last_pc_ = NULL;
+  ASSERT(0 <= pos && pos <= pc_offset());  // Position must be valid.
+  if (L->is_linked()) {
+    int current = L->pos();
+    int next = long_at(current);
+    while (next != current) {
+      // relative address, relative to point after address
+      int imm32 = pos - (current + sizeof(int32_t));
+      long_at_put(current, imm32);
+      current = next;
+      next = long_at(next);
+    }
+    // Fix up last fixup on linked list.
+    int last_imm32 = pos - (current + sizeof(int32_t));
+    long_at_put(current, last_imm32);
+  }
+  L->bind_to(pos);
+}
+
+
+void Assembler::bind(Label* L) {
+  bind_to(L, pc_offset());
+}
+
+
+void Assembler::GrowBuffer() {
+  ASSERT(buffer_overflow());  // should not call this otherwise
+  if (!own_buffer_) FATAL("external code buffer is too small");
+
+  // compute new buffer size
+  CodeDesc desc;  // the new buffer
+  if (buffer_size_ < 4*KB) {
+    desc.buffer_size = 4*KB;
+  } else {
+    desc.buffer_size = 2*buffer_size_;
+  }
+  // Some internal data structures overflow for very large buffers,
+  // they must ensure that kMaximalBufferSize is not too large.
+  if ((desc.buffer_size > kMaximalBufferSize) ||
+      (desc.buffer_size > Heap::OldGenerationSize())) {
+    V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+  }
+
+  // setup new buffer
+  desc.buffer = NewArray<byte>(desc.buffer_size);
+  desc.instr_size = pc_offset();
+  desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+
+  // Clear the buffer in debug mode. Use 'int3' instructions to make
+  // sure to get into problems if we ever run uninitialized code.
+#ifdef DEBUG
+  memset(desc.buffer, 0xCC, desc.buffer_size);
+#endif
+
+  // copy the data
+  intptr_t pc_delta = desc.buffer - buffer_;
+  intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
+      (buffer_ + buffer_size_);
+  memmove(desc.buffer, buffer_, desc.instr_size);
+  memmove(rc_delta + reloc_info_writer.pos(),
+          reloc_info_writer.pos(), desc.reloc_size);
+
+  // switch buffers
+  if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+    spare_buffer_ = buffer_;
+  } else {
+    DeleteArray(buffer_);
+  }
+  buffer_ = desc.buffer;
+  buffer_size_ = desc.buffer_size;
+  pc_ += pc_delta;
+  if (last_pc_ != NULL) {
+    last_pc_ += pc_delta;
+  }
+  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+                               reloc_info_writer.last_pc() + pc_delta);
+
+  // relocate runtime entries
+  for (RelocIterator it(desc); !it.done(); it.next()) {
+    RelocInfo::Mode rmode = it.rinfo()->rmode();
+    if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+      intptr_t* p = reinterpret_cast<intptr_t*>(it.rinfo()->pc());
+      if (*p != 0) {  // 0 means uninitialized.
+        *p += pc_delta;
+      }
+    }
+  }
+
+  ASSERT(!buffer_overflow());
+}
+
+
+void Assembler::emit_operand(int code, const Operand& adr) {
+  ASSERT(is_uint3(code));
+  const unsigned length = adr.len_;
+  ASSERT(length > 0);
+
+  // Emit updated ModR/M byte containing the given register.
+  ASSERT((adr.buf_[0] & 0x38) == 0);
+  pc_[0] = adr.buf_[0] | code << 3;
+
+  // Emit the rest of the encoded operand.
+  for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
+  pc_ += length;
+}
+
+
+// Assembler Instruction implementations
+
+void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(reg, op);
+  emit(opcode);
+  emit_operand(reg, op);
+}
+
+
+void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(reg, rm_reg);
+  emit(opcode);
+  emit_modrm(reg, rm_reg);
+}
+
+
+void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(reg, rm_reg);
+  emit(opcode);
+  emit_modrm(reg, rm_reg);
+}
+
+
+void Assembler::arithmetic_op_16(byte opcode,
+                                 Register reg,
+                                 const Operand& rm_reg) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(reg, rm_reg);
+  emit(opcode);
+  emit_operand(reg, rm_reg);
+}
+
+
+void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(reg, rm_reg);
+  emit(opcode);
+  emit_modrm(reg, rm_reg);
+}
+
+
+void Assembler::arithmetic_op_32(byte opcode,
+                                 Register reg,
+                                 const Operand& rm_reg) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(reg, rm_reg);
+  emit(opcode);
+  emit_operand(reg, rm_reg);
+}
+
+
+void Assembler::immediate_arithmetic_op(byte subcode,
+                                        Register dst,
+                                        Immediate src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  if (is_int8(src.value_)) {
+    emit(0x83);
+    emit_modrm(subcode, dst);
+    emit(src.value_);
+  } else if (dst.is(rax)) {
+    emit(0x05 | (subcode << 3));
+    emitl(src.value_);
+  } else {
+    emit(0x81);
+    emit_modrm(subcode, dst);
+    emitl(src.value_);
+  }
+}
+
+void Assembler::immediate_arithmetic_op(byte subcode,
+                                        const Operand& dst,
+                                        Immediate src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  if (is_int8(src.value_)) {
+    emit(0x83);
+    emit_operand(subcode, dst);
+    emit(src.value_);
+  } else {
+    emit(0x81);
+    emit_operand(subcode, dst);
+    emitl(src.value_);
+  }
+}
+
+
+void Assembler::immediate_arithmetic_op_16(byte subcode,
+                                           Register dst,
+                                           Immediate src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);  // Operand size override prefix.
+  emit_optional_rex_32(dst);
+  if (is_int8(src.value_)) {
+    emit(0x83);
+    emit_modrm(subcode, dst);
+    emit(src.value_);
+  } else if (dst.is(rax)) {
+    emit(0x05 | (subcode << 3));
+    emitl(src.value_);
+  } else {
+    emit(0x81);
+    emit_modrm(subcode, dst);
+    emitl(src.value_);
+  }
+}
+
+
+void Assembler::immediate_arithmetic_op_16(byte subcode,
+                                           const Operand& dst,
+                                           Immediate src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);  // Operand size override prefix.
+  emit_optional_rex_32(dst);
+  if (is_int8(src.value_)) {
+    emit(0x83);
+    emit_operand(subcode, dst);
+    emit(src.value_);
+  } else {
+    emit(0x81);
+    emit_operand(subcode, dst);
+    emitl(src.value_);
+  }
+}
+
+
+void Assembler::immediate_arithmetic_op_32(byte subcode,
+                                           Register dst,
+                                           Immediate src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  if (is_int8(src.value_)) {
+    emit(0x83);
+    emit_modrm(subcode, dst);
+    emit(src.value_);
+  } else if (dst.is(rax)) {
+    emit(0x05 | (subcode << 3));
+    emitl(src.value_);
+  } else {
+    emit(0x81);
+    emit_modrm(subcode, dst);
+    emitl(src.value_);
+  }
+}
+
+
+void Assembler::immediate_arithmetic_op_32(byte subcode,
+                                           const Operand& dst,
+                                           Immediate src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  if (is_int8(src.value_)) {
+    emit(0x83);
+    emit_operand(subcode, dst);
+    emit(src.value_);
+  } else {
+    emit(0x81);
+    emit_operand(subcode, dst);
+    emitl(src.value_);
+  }
+}
+
+
+void Assembler::immediate_arithmetic_op_8(byte subcode,
+                                          const Operand& dst,
+                                          Immediate src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  ASSERT(is_int8(src.value_) || is_uint8(src.value_));
+  emit(0x80);
+  emit_operand(subcode, dst);
+  emit(src.value_);
+}
+
+
+void Assembler::immediate_arithmetic_op_8(byte subcode,
+                                          Register dst,
+                                          Immediate src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (dst.code() > 3) {
+    // Use 64-bit mode byte registers.
+    emit_rex_64(dst);
+  }
+  ASSERT(is_int8(src.value_) || is_uint8(src.value_));
+  emit(0x80);
+  emit_modrm(subcode, dst);
+  emit(src.value_);
+}
+
+
+void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint6(shift_amount.value_));  // illegal shift count
+  if (shift_amount.value_ == 1) {
+    emit_rex_64(dst);
+    emit(0xD1);
+    emit_modrm(subcode, dst);
+  } else {
+    emit_rex_64(dst);
+    emit(0xC1);
+    emit_modrm(subcode, dst);
+    emit(shift_amount.value_);
+  }
+}
+
+
+void Assembler::shift(Register dst, int subcode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xD3);
+  emit_modrm(subcode, dst);
+}
+
+
+void Assembler::shift_32(Register dst, int subcode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xD3);
+  emit_modrm(subcode, dst);
+}
+
+
+void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint6(shift_amount.value_));  // illegal shift count
+  if (shift_amount.value_ == 1) {
+    emit_optional_rex_32(dst);
+    emit(0xD1);
+    emit_modrm(subcode, dst);
+  } else {
+    emit_optional_rex_32(dst);
+    emit(0xC1);
+    emit_modrm(subcode, dst);
+    emit(shift_amount.value_);
+  }
+}
+
+
+void Assembler::bt(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src, dst);
+  emit(0x0F);
+  emit(0xA3);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::bts(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src, dst);
+  emit(0x0F);
+  emit(0xAB);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::call(Label* L) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // 1110 1000 #32-bit disp
+  emit(0xE8);
+  if (L->is_bound()) {
+    int offset = L->pos() - pc_offset() - sizeof(int32_t);
+    ASSERT(offset <= 0);
+    emitl(offset);
+  } else if (L->is_linked()) {
+    emitl(L->pos());
+    L->link_to(pc_offset() - sizeof(int32_t));
+  } else {
+    ASSERT(L->is_unused());
+    int32_t current = pc_offset();
+    emitl(current);
+    L->link_to(current);
+  }
+}
+
+
+void Assembler::call(Register adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode: FF /2 r64
+  if (adr.high_bit()) {
+    emit_rex_64(adr);
+  }
+  emit(0xFF);
+  emit_modrm(0x2, adr);
+}
+
+
+void Assembler::call(const Operand& op) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode: FF /2 m64
+  emit_rex_64(op);
+  emit(0xFF);
+  emit_operand(2, op);
+}
+
+
+void Assembler::cdq() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x99);
+}
+
+
+void Assembler::cmovq(Condition cc, Register dst, Register src) {
+  // No need to check CpuInfo for CMOV support, it's a required part of the
+  // 64-bit architecture.
+  ASSERT(cc >= 0);  // Use mov for unconditional moves.
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode: REX.W 0f 40 + cc /r
+  emit_rex_64(dst, src);
+  emit(0x0f);
+  emit(0x40 + cc);
+  emit_modrm(dst, src);
+}
+
+
+void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
+  ASSERT(cc >= 0);
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode: REX.W 0f 40 + cc /r
+  emit_rex_64(dst, src);
+  emit(0x0f);
+  emit(0x40 + cc);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::cmovl(Condition cc, Register dst, Register src) {
+  ASSERT(cc >= 0);
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode: 0f 40 + cc /r
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x40 + cc);
+  emit_modrm(dst, src);
+}
+
+
+void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
+  ASSERT(cc >= 0);
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode: 0f 40 + cc /r
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x40 + cc);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::cmpb_al(Immediate imm8) {
+  ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x3c);
+  emit(imm8.value_);
+}
+
+
+void Assembler::cpuid() {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x0F);
+  emit(0xA2);
+}
+
+
+void Assembler::cqo() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64();
+  emit(0x99);
+}
+
+
+void Assembler::decq(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xFF);
+  emit_modrm(0x1, dst);
+}
+
+
+void Assembler::decq(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xFF);
+  emit_operand(1, dst);
+}
+
+
+void Assembler::decl(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xFF);
+  emit_modrm(0x1, dst);
+}
+
+
+void Assembler::decl(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xFF);
+  emit_operand(1, dst);
+}
+
+
+void Assembler::enter(Immediate size) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xC8);
+  emitw(size.value_);  // 16 bit operand, always.
+  emit(0);
+}
+
+
+void Assembler::hlt() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF4);
+}
+
+
+void Assembler::idivq(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src);
+  emit(0xF7);
+  emit_modrm(0x7, src);
+}
+
+
+void Assembler::idivl(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(src);
+  emit(0xF7);
+  emit_modrm(0x7, src);
+}
+
+
+void Assembler::imul(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src);
+  emit(0xF7);
+  emit_modrm(0x5, src);
+}
+
+
+void Assembler::imul(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0xAF);
+  emit_modrm(dst, src);
+}
+
+
+void Assembler::imul(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0xAF);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::imul(Register dst, Register src, Immediate imm) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  if (is_int8(imm.value_)) {
+    emit(0x6B);
+    emit_modrm(dst, src);
+    emit(imm.value_);
+  } else {
+    emit(0x69);
+    emit_modrm(dst, src);
+    emitl(imm.value_);
+  }
+}
+
+
+void Assembler::imull(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xAF);
+  emit_modrm(dst, src);
+}
+
+
+void Assembler::incq(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xFF);
+  emit_modrm(0x0, dst);
+}
+
+
+void Assembler::incq(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xFF);
+  emit_operand(0, dst);
+}
+
+
+void Assembler::incl(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xFF);
+  emit_operand(0, dst);
+}
+
+
+void Assembler::int3() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xCC);
+}
+
+
+void Assembler::j(Condition cc, Label* L) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint4(cc));
+  if (L->is_bound()) {
+    const int short_size = 2;
+    const int long_size  = 6;
+    int offs = L->pos() - pc_offset();
+    ASSERT(offs <= 0);
+    if (is_int8(offs - short_size)) {
+      // 0111 tttn #8-bit disp
+      emit(0x70 | cc);
+      emit((offs - short_size) & 0xFF);
+    } else {
+      // 0000 1111 1000 tttn #32-bit disp
+      emit(0x0F);
+      emit(0x80 | cc);
+      emitl(offs - long_size);
+    }
+  } else if (L->is_linked()) {
+    // 0000 1111 1000 tttn #32-bit disp
+    emit(0x0F);
+    emit(0x80 | cc);
+    emitl(L->pos());
+    L->link_to(pc_offset() - sizeof(int32_t));
+  } else {
+    ASSERT(L->is_unused());
+    emit(0x0F);
+    emit(0x80 | cc);
+    int32_t current = pc_offset();
+    emitl(current);
+    L->link_to(current);
+  }
+}
+
+
+void Assembler::jmp(Label* L) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (L->is_bound()) {
+    int offs = L->pos() - pc_offset() - 1;
+    ASSERT(offs <= 0);
+    if (is_int8(offs - sizeof(int8_t))) {
+      // 1110 1011 #8-bit disp
+      emit(0xEB);
+      emit((offs - sizeof(int8_t)) & 0xFF);
+    } else {
+      // 1110 1001 #32-bit disp
+      emit(0xE9);
+      emitl(offs - sizeof(int32_t));
+    }
+  } else  if (L->is_linked()) {
+    // 1110 1001 #32-bit disp
+    emit(0xE9);
+    emitl(L->pos());
+    L->link_to(pc_offset() - sizeof(int32_t));
+  } else {
+    // 1110 1001 #32-bit disp
+    ASSERT(L->is_unused());
+    emit(0xE9);
+    int32_t current = pc_offset();
+    emitl(current);
+    L->link_to(current);
+  }
+}
+
+
+void Assembler::jmp(Register target) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode FF/4 r64
+  if (target.high_bit()) {
+    emit_rex_64(target);
+  }
+  emit(0xFF);
+  emit_modrm(0x4, target);
+}
+
+
+void Assembler::jmp(const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode FF/4 m64
+  emit_optional_rex_32(src);
+  emit(0xFF);
+  emit_operand(0x4, src);
+}
+
+
+void Assembler::lea(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x8D);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x48);  // REX.W
+  emit(0xA1);
+  emitq(reinterpret_cast<uintptr_t>(value), mode);
+}
+
+
+void Assembler::load_rax(ExternalReference ref) {
+  load_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
+void Assembler::leave() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xC9);
+}
+
+
+void Assembler::movb(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_32(dst, src);
+  emit(0x8A);
+  emit_operand(dst, src);
+}
+
+void Assembler::movb(Register dst, Immediate imm) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_32(dst);
+  emit(0xC6);
+  emit_modrm(0x0, dst);
+  emit(imm.value_);
+}
+
+void Assembler::movb(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_32(src, dst);
+  emit(0x88);
+  emit_operand(src, dst);
+}
+
+void Assembler::movl(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst, src);
+  emit(0x8B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movl(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst, src);
+  emit(0x8B);
+  emit_modrm(dst, src);
+}
+
+
+void Assembler::movl(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(src, dst);
+  emit(0x89);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::movl(const Operand& dst, Immediate value) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xC7);
+  emit_operand(0x0, dst);
+  emit(value);  // Only 32-bit immediates are possible, not 8-bit immediates.
+}
+
+
+void Assembler::movl(Register dst, Immediate value) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xC7);
+  emit_modrm(0x0, dst);
+  emit(value);  // Only 32-bit immediates are possible, not 8-bit immediates.
+}
+
+
+void Assembler::movq(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x8B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movq(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x8B);
+  emit_modrm(dst, src);
+}
+
+
+void Assembler::movq(Register dst, Immediate value) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xC7);
+  emit_modrm(0x0, dst);
+  emit(value);  // Only 32-bit immediates are possible, not 8-bit immediates.
+}
+
+
+void Assembler::movq(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src, dst);
+  emit(0x89);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
+  // This method must not be used with heap object references. The stored
+  // address is not GC safe. Use the handle version instead.
+  ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xB8 | dst.low_bits());
+  emitq(reinterpret_cast<uintptr_t>(value), rmode);
+}
+
+
+void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
+  // Non-relocatable values might not need a 64-bit representation.
+  if (rmode == RelocInfo::NONE) {
+    // Sadly, there is no zero or sign extending move for 8-bit immediates.
+    if (is_int32(value)) {
+      movq(dst, Immediate(static_cast<int32_t>(value)));
+      return;
+    } else if (is_uint32(value)) {
+      movl(dst, Immediate(static_cast<int32_t>(value)));
+      return;
+    }
+    // Value cannot be represented by 32 bits, so do a full 64 bit immediate
+    // value.
+  }
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xB8 | dst.low_bits());
+  emitq(value, rmode);
+}
+
+
+void Assembler::movq(Register dst, ExternalReference ref) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xB8 | dst.low_bits());
+  emitq(reinterpret_cast<uintptr_t>(ref.address()),
+        RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
+void Assembler::movq(const Operand& dst, Immediate value) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xC7);
+  emit_operand(0, dst);
+  emit(value);
+}
+
+
+/*
+ * Loads the ip-relative location of the src label into the target
+ * location (as a 32-bit offset sign extended to 64-bit).
+ */
+void Assembler::movl(const Operand& dst, Label* src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xC7);
+  emit_operand(0, dst);
+  if (src->is_bound()) {
+    int offset = src->pos() - pc_offset() - sizeof(int32_t);
+    ASSERT(offset <= 0);
+    emitl(offset);
+  } else if (src->is_linked()) {
+    emitl(src->pos());
+    src->link_to(pc_offset() - sizeof(int32_t));
+  } else {
+    ASSERT(src->is_unused());
+    int32_t current = pc_offset();
+    emitl(current);
+    src->link_to(current);
+  }
+}
+
+
+void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
+  // If there is no relocation info, emit the value of the handle efficiently
+  // (possibly using less that 8 bytes for the value).
+  if (mode == RelocInfo::NONE) {
+    // There is no possible reason to store a heap pointer without relocation
+    // info, so it must be a smi.
+    ASSERT(value->IsSmi());
+    // Smis never have more than 32 significant bits, but they might
+    // have garbage in the high bits.
+    movq(dst,
+         Immediate(static_cast<int32_t>(reinterpret_cast<intptr_t>(*value))));
+  } else {
+    EnsureSpace ensure_space(this);
+    last_pc_ = pc_;
+    ASSERT(value->IsHeapObject());
+    ASSERT(!Heap::InNewSpace(*value));
+    emit_rex_64(dst);
+    emit(0xB8 | dst.low_bits());
+    emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
+  }
+}
+
+
+void Assembler::movsxlq(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x63);
+  emit_modrm(dst, src);
+}
+
+
+void Assembler::movsxlq(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x63);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movzxbq(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0xB6);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movzxbl(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xB6);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movzxwl(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xB7);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::mul(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src);
+  emit(0xF7);
+  emit_modrm(0x4, src);
+}
+
+
+void Assembler::neg(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xF7);
+  emit_modrm(0x3, dst);
+}
+
+
+void Assembler::negl(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xF7);
+  emit_modrm(0x3, dst);
+}
+
+
+void Assembler::neg(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xF7);
+  emit_operand(3, dst);
+}
+
+
+void Assembler::nop() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x90);
+}
+
+
+void Assembler::not_(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xF7);
+  emit_modrm(0x2, dst);
+}
+
+
+void Assembler::not_(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xF7);
+  emit_operand(2, dst);
+}
+
+
+void Assembler::nop(int n) {
+  // The recommended muti-byte sequences of NOP instructions from the Intel 64
+  // and IA-32 Architectures Software Developer's Manual.
+  //
+  // Length   Assembly                                Byte Sequence
+  // 2 bytes  66 NOP                                  66 90H
+  // 3 bytes  NOP DWORD ptr [EAX]                     0F 1F 00H
+  // 4 bytes  NOP DWORD ptr [EAX + 00H]               0F 1F 40 00H
+  // 5 bytes  NOP DWORD ptr [EAX + EAX*1 + 00H]       0F 1F 44 00 00H
+  // 6 bytes  66 NOP DWORD ptr [EAX + EAX*1 + 00H]    66 0F 1F 44 00 00H
+  // 7 bytes  NOP DWORD ptr [EAX + 00000000H]         0F 1F 80 00 00 00 00H
+  // 8 bytes  NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H
+  // 9 bytes  66 NOP DWORD ptr [EAX + EAX*1 +         66 0F 1F 84 00 00 00 00
+  //          00000000H]                              00H
+
+  ASSERT(1 <= n);
+  ASSERT(n <= 9);
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  switch (n) {
+  case 1:
+    emit(0x90);
+    return;
+  case 2:
+    emit(0x66);
+    emit(0x90);
+    return;
+  case 3:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x00);
+    return;
+  case 4:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x40);
+    emit(0x00);
+    return;
+  case 5:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x44);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 6:
+    emit(0x66);
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x44);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 7:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x80);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 8:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x84);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 9:
+    emit(0x66);
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x84);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    return;
+  }
+}
+
+
+void Assembler::pop(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (dst.high_bit()) {
+    emit_rex_64(dst);
+  }
+  emit(0x58 | dst.low_bits());
+}
+
+
+void Assembler::pop(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);  // Could be omitted in some cases.
+  emit(0x8F);
+  emit_operand(0, dst);
+}
+
+
+void Assembler::popfq() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x9D);
+}
+
+
+void Assembler::push(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (src.high_bit()) {
+    emit_rex_64(src);
+  }
+  emit(0x50 | src.low_bits());
+}
+
+
+void Assembler::push(const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src);  // Could be omitted in some cases.
+  emit(0xFF);
+  emit_operand(6, src);
+}
+
+
+void Assembler::push(Immediate value) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (is_int8(value.value_)) {
+    emit(0x6A);
+    emit(value.value_);  // Emit low byte of value.
+  } else {
+    emit(0x68);
+    emitl(value.value_);
+  }
+}
+
+
+void Assembler::pushfq() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x9C);
+}
+
+
+void Assembler::rcl(Register dst, uint8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint6(imm8));  // illegal shift count
+  if (imm8 == 1) {
+    emit_rex_64(dst);
+    emit(0xD1);
+    emit_modrm(0x2, dst);
+  } else {
+    emit_rex_64(dst);
+    emit(0xC1);
+    emit_modrm(0x2, dst);
+    emit(imm8);
+  }
+}
+
+void Assembler::rdtsc() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x0F);
+  emit(0x31);
+}
+
+
+void Assembler::ret(int imm16) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint16(imm16));
+  if (imm16 == 0) {
+    emit(0xC3);
+  } else {
+    emit(0xC2);
+    emit(imm16 & 0xFF);
+    emit((imm16 >> 8) & 0xFF);
+  }
+}
+
+
+void Assembler::setcc(Condition cc, Register reg) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint4(cc));
+  if (reg.code() > 3) {  // Use x64 byte registers, where different.
+    emit_rex_32(reg);
+  }
+  emit(0x0F);
+  emit(0x90 | cc);
+  emit_modrm(0x0, reg);
+}
+
+
+void Assembler::shld(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src, dst);
+  emit(0x0F);
+  emit(0xA5);
+  emit_modrm(src, dst);
+}
+
+
+void Assembler::shrd(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src, dst);
+  emit(0x0F);
+  emit(0xAD);
+  emit_modrm(src, dst);
+}
+
+
+void Assembler::xchg(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (src.is(rax) || dst.is(rax)) {  // Single-byte encoding
+    Register other = src.is(rax) ? dst : src;
+    emit_rex_64(other);
+    emit(0x90 | other.low_bits());
+  } else {
+    emit_rex_64(src, dst);
+    emit(0x87);
+    emit_modrm(src, dst);
+  }
+}
+
+
+void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x48);  // REX.W
+  emit(0xA3);
+  emitq(reinterpret_cast<uintptr_t>(dst), mode);
+}
+
+
+void Assembler::store_rax(ExternalReference ref) {
+  store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
+void Assembler::testb(Register reg, Immediate mask) {
+  ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (reg.is(rax)) {
+    emit(0xA8);
+    emit(mask.value_);  // Low byte emitted.
+  } else {
+    if (reg.code() > 3) {
+      // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+      emit_rex_32(reg);
+    }
+    emit(0xF6);
+    emit_modrm(0x0, reg);
+    emit(mask.value_);  // Low byte emitted.
+  }
+}
+
+
+void Assembler::testb(const Operand& op, Immediate mask) {
+  ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(rax, op);
+  emit(0xF6);
+  emit_operand(rax, op);  // Operation code 0
+  emit(mask.value_);  // Low byte emitted.
+}
+
+
+void Assembler::testl(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst, src);
+  emit(0x85);
+  emit_modrm(dst, src);
+}
+
+
+void Assembler::testl(Register reg, Immediate mask) {
+  // testl with a mask that fits in the low byte is exactly testb.
+  if (is_uint8(mask.value_)) {
+    testb(reg, mask);
+    return;
+  }
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (reg.is(rax)) {
+    emit(0xA9);
+    emit(mask);
+  } else {
+    emit_optional_rex_32(rax, reg);
+    emit(0xF7);
+    emit_modrm(0x0, reg);
+    emit(mask);
+  }
+}
+
+
+void Assembler::testl(const Operand& op, Immediate mask) {
+  // testl with a mask that fits in the low byte is exactly testb.
+  if (is_uint8(mask.value_)) {
+    testb(op, mask);
+    return;
+  }
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(rax, op);
+  emit(0xF7);
+  emit_operand(rax, op);  // Operation code 0
+  emit(mask);
+}
+
+
+void Assembler::testq(const Operand& op, Register reg) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(reg, op);
+  emit(0x85);
+  emit_operand(reg, op);
+}
+
+
+void Assembler::testq(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x85);
+  emit_modrm(dst, src);
+}
+
+
+void Assembler::testq(Register dst, Immediate mask) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (dst.is(rax)) {
+    emit_rex_64();
+    emit(0xA9);
+    emit(mask);
+  } else {
+    emit_rex_64(dst);
+    emit(0xF7);
+    emit_modrm(0, dst);
+    emit(mask);
+  }
+}
+
+
+// FPU instructions
+
+
+void Assembler::fld(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xD9, 0xC0, i);
+}
+
+
+void Assembler::fld1() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xE8);
+}
+
+
+void Assembler::fldz() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xEE);
+}
+
+
+void Assembler::fld_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xD9);
+  emit_operand(0, adr);
+}
+
+
+void Assembler::fld_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xDD);
+  emit_operand(0, adr);
+}
+
+
+void Assembler::fstp_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xD9);
+  emit_operand(3, adr);
+}
+
+
+void Assembler::fstp_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xDD);
+  emit_operand(3, adr);
+}
+
+
+void Assembler::fild_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xDB);
+  emit_operand(0, adr);
+}
+
+
+void Assembler::fild_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xDF);
+  emit_operand(5, adr);
+}
+
+
+void Assembler::fistp_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xDB);
+  emit_operand(3, adr);
+}
+
+
+void Assembler::fisttp_s(const Operand& adr) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xDB);
+  emit_operand(1, adr);
+}
+
+
+void Assembler::fist_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xDB);
+  emit_operand(2, adr);
+}
+
+
+void Assembler::fistp_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xDF);
+  emit_operand(8, adr);
+}
+
+
+void Assembler::fabs() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xE1);
+}
+
+
+void Assembler::fchs() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xE0);
+}
+
+
+void Assembler::fcos() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xFF);
+}
+
+
+void Assembler::fsin() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xFE);
+}
+
+
+void Assembler::fadd(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xC0, i);
+}
+
+
+void Assembler::fsub(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xE8, i);
+}
+
+
+void Assembler::fisub_s(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xDA);
+  emit_operand(4, adr);
+}
+
+
+void Assembler::fmul(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xC8, i);
+}
+
+
+void Assembler::fdiv(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDC, 0xF8, i);
+}
+
+
+void Assembler::faddp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xC0, i);
+}
+
+
+void Assembler::fsubp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xE8, i);
+}
+
+
+void Assembler::fsubrp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xE0, i);
+}
+
+
+void Assembler::fmulp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xC8, i);
+}
+
+
+void Assembler::fdivp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDE, 0xF8, i);
+}
+
+
+void Assembler::fprem() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xF8);
+}
+
+
+void Assembler::fprem1() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xF5);
+}
+
+
+void Assembler::fxch(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xD9, 0xC8, i);
+}
+
+
+void Assembler::fincstp() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xF7);
+}
+
+
+void Assembler::ffree(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDD, 0xC0, i);
+}
+
+
+void Assembler::ftst() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xE4);
+}
+
+
+void Assembler::fucomp(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDD, 0xE8, i);
+}
+
+
+void Assembler::fucompp() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xDA);
+  emit(0xE9);
+}
+
+
+void Assembler::fcompp() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xDE);
+  emit(0xD9);
+}
+
+
+void Assembler::fnstsw_ax() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xDF);
+  emit(0xE0);
+}
+
+
+void Assembler::fwait() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x9B);
+}
+
+
+void Assembler::frndint() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xFC);
+}
+
+
+void Assembler::fnclex() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xDB);
+  emit(0xE2);
+}
+
+
+void Assembler::sahf() {
+  // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
+  // in 64-bit mode. Test CpuID.
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x9E);
+}
+
+
+void Assembler::emit_farith(int b1, int b2, int i) {
+  ASSERT(is_uint8(b1) && is_uint8(b2));  // wrong opcode
+  ASSERT(is_uint3(i));  // illegal stack offset
+  emit(b1);
+  emit(b2 + i);
+}
+
+// SSE 2 operations
+
+void Assembler::movsd(const Operand& dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);  // double
+  emit_optional_rex_32(src, dst);
+  emit(0x0F);
+  emit(0x11);  // store
+  emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movsd(Register dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);  // double
+  emit_optional_rex_32(src, dst);
+  emit(0x0F);
+  emit(0x11);  // store
+  emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movsd(XMMRegister dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);  // double
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x10);  // load
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movsd(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);  // double
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x10);  // load
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvttss2si(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x2C);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::cvttsd2si(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x2C);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x2A);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x2A);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0x2A);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addsd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x58);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x59);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subsd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5C);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divsd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5E);
+  emit_sse_operand(dst, src);
+}
+
+
+
+void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
+  Register ireg = { reg.code() };
+  emit_operand(ireg, adr);
+}
+
+
+void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
+  emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
+}
+
+void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
+  emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
+}
+
+
+// Relocation information implementations
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+  ASSERT(rmode != RelocInfo::NONE);
+  // Don't record external references unless the heap will be serialized.
+  if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+      !Serializer::enabled() &&
+      !FLAG_debug_code) {
+    return;
+  }
+  RelocInfo rinfo(pc_, rmode, data);
+  reloc_info_writer.Write(&rinfo);
+}
+
+void Assembler::RecordJSReturn() {
+  WriteRecordedPositions();
+  EnsureSpace ensure_space(this);
+  RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+  if (FLAG_debug_code) {
+    EnsureSpace ensure_space(this);
+    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+  }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+  ASSERT(pos != RelocInfo::kNoPosition);
+  ASSERT(pos >= 0);
+  current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+  ASSERT(pos != RelocInfo::kNoPosition);
+  ASSERT(pos >= 0);
+  current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+  // Write the statement position if it is different from what was written last
+  // time.
+  if (current_statement_position_ != written_statement_position_) {
+    EnsureSpace ensure_space(this);
+    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+    written_statement_position_ = current_statement_position_;
+  }
+
+  // Write the position if it is different from what was written last time and
+  // also different from the written statement position.
+  if (current_position_ != written_position_ &&
+      current_position_ != written_statement_position_) {
+    EnsureSpace ensure_space(this);
+    RecordRelocInfo(RelocInfo::POSITION, current_position_);
+    written_position_ = current_position_;
+  }
+}
+
+
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+} }  // namespace v8::internal
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
new file mode 100644
index 0000000..ff87286
--- /dev/null
+++ b/src/x64/assembler-x64.h
@@ -0,0 +1,1319 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+
+// A lightweight X64 Assembler.
+
+#ifndef V8_X64_ASSEMBLER_X64_H_
+#define V8_X64_ASSEMBLER_X64_H_
+
+namespace v8 {
+namespace internal {
+
+// Utility functions
+
+// Test whether a 64-bit value is in a specific range.
+static inline bool is_uint32(int64_t x) {
+  static const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
+  return x == (x & kUInt32Mask);
+}
+
+static inline bool is_int32(int64_t x) {
+  static const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
+  return is_uint32(x - kMinIntValue);
+}
+
+static inline bool uint_is_int32(uint64_t x) {
+  static const uint64_t kMaxIntValue = V8_UINT64_C(0x80000000);
+  return x < kMaxIntValue;
+}
+
+static inline bool is_uint32(uint64_t x) {
+  static const uint64_t kMaxUIntValue = V8_UINT64_C(0x100000000);
+  return x < kMaxUIntValue;
+}
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+
+struct Register {
+  static Register toRegister(int code) {
+    Register r = { code };
+    return r;
+  }
+  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
+  bool is(Register reg) const  { return code_ == reg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    return 1 << code_;
+  }
+
+  // Return the high bit of the register code as a 0 or 1.  Used often
+  // when constructing the REX prefix byte.
+  int high_bit() const {
+    return code_ >> 3;
+  }
+  // Return the 3 low bits of the register code.  Used when encoding registers
+  // in modR/M, SIB, and opcode bytes.
+  int low_bits() const {
+    return code_ & 0x7;
+  }
+
+  // (unfortunately we can't make this private in a struct when initializing
+  // by assignment.)
+  int code_;
+};
+
+extern Register rax;
+extern Register rcx;
+extern Register rdx;
+extern Register rbx;
+extern Register rsp;
+extern Register rbp;
+extern Register rsi;
+extern Register rdi;
+extern Register r8;
+extern Register r9;
+extern Register r10;
+extern Register r11;
+extern Register r12;
+extern Register r13;
+extern Register r14;
+extern Register r15;
+extern Register no_reg;
+
+
+struct MMXRegister {
+  bool is_valid() const  { return 0 <= code_ && code_ < 2; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+
+  int code_;
+};
+
+extern MMXRegister mm0;
+extern MMXRegister mm1;
+extern MMXRegister mm2;
+extern MMXRegister mm3;
+extern MMXRegister mm4;
+extern MMXRegister mm5;
+extern MMXRegister mm6;
+extern MMXRegister mm7;
+extern MMXRegister mm8;
+extern MMXRegister mm9;
+extern MMXRegister mm10;
+extern MMXRegister mm11;
+extern MMXRegister mm12;
+extern MMXRegister mm13;
+extern MMXRegister mm14;
+extern MMXRegister mm15;
+
+
+struct XMMRegister {
+  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+
+  // Return the high bit of the register code as a 0 or 1.  Used often
+  // when constructing the REX prefix byte.
+  int high_bit() const {
+    return code_ >> 3;
+  }
+  // Return the 3 low bits of the register code.  Used when encoding registers
+  // in modR/M, SIB, and opcode bytes.
+  int low_bits() const {
+    return code_ & 0x7;
+  }
+
+  int code_;
+};
+
+extern XMMRegister xmm0;
+extern XMMRegister xmm1;
+extern XMMRegister xmm2;
+extern XMMRegister xmm3;
+extern XMMRegister xmm4;
+extern XMMRegister xmm5;
+extern XMMRegister xmm6;
+extern XMMRegister xmm7;
+extern XMMRegister xmm8;
+extern XMMRegister xmm9;
+extern XMMRegister xmm10;
+extern XMMRegister xmm11;
+extern XMMRegister xmm12;
+extern XMMRegister xmm13;
+extern XMMRegister xmm14;
+extern XMMRegister xmm15;
+
+enum Condition {
+  // any value < 0 is considered no_condition
+  no_condition  = -1,
+
+  overflow      =  0,
+  no_overflow   =  1,
+  below         =  2,
+  above_equal   =  3,
+  equal         =  4,
+  not_equal     =  5,
+  below_equal   =  6,
+  above         =  7,
+  negative      =  8,
+  positive      =  9,
+  parity_even   = 10,
+  parity_odd    = 11,
+  less          = 12,
+  greater_equal = 13,
+  less_equal    = 14,
+  greater       = 15,
+
+  // aliases
+  carry         = below,
+  not_carry     = above_equal,
+  zero          = equal,
+  not_zero      = not_equal,
+  sign          = negative,
+  not_sign      = positive
+};
+
+
+// Returns the equivalent of !cc.
+// Negation of the default no_condition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc);
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+  switch (cc) {
+    case below:
+      return above;
+    case above:
+      return below;
+    case above_equal:
+      return below_equal;
+    case below_equal:
+      return above_equal;
+    case less:
+      return greater;
+    case greater:
+      return less;
+    case greater_equal:
+      return less_equal;
+    case less_equal:
+      return greater_equal;
+    default:
+      return cc;
+  };
+}
+
+enum Hint {
+  no_hint = 0,
+  not_taken = 0x2e,
+  taken = 0x3e
+};
+
+// The result of negating a hint is as if the corresponding condition
+// were negated by NegateCondition.  That is, no_hint is mapped to
+// itself and not_taken and taken are mapped to each other.
+inline Hint NegateHint(Hint hint) {
+  return (hint == no_hint)
+      ? no_hint
+      : ((hint == not_taken) ? taken : not_taken);
+}
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Immediates
+
+class Immediate BASE_EMBEDDED {
+ public:
+  explicit Immediate(int32_t value) : value_(value) {}
+  inline explicit Immediate(Smi* value);
+
+ private:
+  int32_t value_;
+
+  friend class Assembler;
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+enum ScaleFactor {
+  times_1 = 0,
+  times_2 = 1,
+  times_4 = 2,
+  times_8 = 3,
+  times_int_size = times_4,
+  times_half_pointer_size = times_4,
+  times_pointer_size = times_8
+};
+
+
+class Operand BASE_EMBEDDED {
+ public:
+  // [base + disp/r]
+  Operand(Register base, int32_t disp);
+
+  // [base + index*scale + disp/r]
+  Operand(Register base,
+          Register index,
+          ScaleFactor scale,
+          int32_t disp);
+
+  // [index*scale + disp/r]
+  Operand(Register index,
+          ScaleFactor scale,
+          int32_t disp);
+
+ private:
+  byte rex_;
+  byte buf_[10];
+  // The number of bytes in buf_.
+  unsigned int len_;
+  RelocInfo::Mode rmode_;
+
+  // Set the ModR/M byte without an encoded 'reg' register. The
+  // register is encoded later as part of the emit_operand operation.
+  // set_modrm can be called before or after set_sib and set_disp*.
+  inline void set_modrm(int mod, Register rm);
+
+  // Set the SIB byte if one is needed. Sets the length to 2 rather than 1.
+  inline void set_sib(ScaleFactor scale, Register index, Register base);
+
+  // Adds operand displacement fields (offsets added to the memory address).
+  // Needs to be called after set_sib, not before it.
+  inline void set_disp8(int disp);
+  inline void set_disp32(int disp);
+
+  friend class Assembler;
+};
+
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+// Example:
+//   if (CpuFeatures::IsSupported(SSE3)) {
+//     CpuFeatures::Scope fscope(SSE3);
+//     // Generate SSE3 floating point code.
+//   } else {
+//     // Generate standard x87 or SSE2 floating point code.
+//   }
+class CpuFeatures : public AllStatic {
+ public:
+  // Feature flags bit positions. They are mostly based on the CPUID spec.
+  // (We assign CPUID itself to one of the currently reserved bits --
+  // feel free to change this if needed.)
+  enum Feature { SSE3 = 32,
+                 SSE2 = 26,
+                 CMOV = 15,
+                 RDTSC = 4,
+                 CPUID = 10,
+                 SAHF = 0};
+  // Detect features of the target CPU. Set safe defaults if the serializer
+  // is enabled (snapshots must be portable).
+  static void Probe();
+  // Check whether a feature is supported by the target CPU.
+  static bool IsSupported(Feature f) {
+    return (supported_ & (V8_UINT64_C(1) << f)) != 0;
+  }
+  // Check whether a feature is currently enabled.
+  static bool IsEnabled(Feature f) {
+    return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
+  }
+  // Enable a specified feature within a scope.
+  class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+   public:
+    explicit Scope(Feature f) {
+      ASSERT(CpuFeatures::IsSupported(f));
+      old_enabled_ = CpuFeatures::enabled_;
+      CpuFeatures::enabled_ |= (V8_UINT64_C(1) << f);
+    }
+    ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+   private:
+    uint64_t old_enabled_;
+#else
+   public:
+    explicit Scope(Feature f) {}
+#endif
+  };
+ private:
+  // Safe defaults include SSE2 and CMOV for X64. It is always available, if
+  // anyone checks, but they shouldn't need to check.
+  static const uint64_t kDefaultCpuFeatures =
+      (1 << CpuFeatures::SSE2 | 1 << CpuFeatures::CMOV);
+  static uint64_t supported_;
+  static uint64_t enabled_;
+};
+
+
+class Assembler : public Malloced {
+ private:
+  // We check before assembling an instruction that there is sufficient
+  // space to write an instruction and its relocation information.
+  // The relocation writer's position must be kGap bytes above the end of
+  // the generated instructions. This leaves enough space for the
+  // longest possible x64 instruction, 15 bytes, and the longest possible
+  // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
+  // (There is a 15 byte limit on x64 instruction length that rules out some
+  // otherwise valid instructions.)
+  // This allows for a single, fast space check per instruction.
+  static const int kGap = 32;
+
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  Assembler(void* buffer, int buffer_size);
+  ~Assembler();
+
+  // GetCode emits any pending (non-emitted) code and fills the descriptor
+  // desc. GetCode() is idempotent; it returns the same result if no other
+  // Assembler functions are invoked in between GetCode() calls.
+  void GetCode(CodeDesc* desc);
+
+  // Read/Modify the code target in the branch/call instruction at pc.
+  // On the x64 architecture, the address is absolute, not relative.
+  static inline Address target_address_at(Address pc);
+  static inline void set_target_address_at(Address pc, Address target);
+
+  // Distance between the address of the code target in the call instruction
+  // and the return address.  Checked in the debug build.
+  static const int kCallTargetAddressOffset = 3 + kPointerSize;
+  // Distance between start of patched return sequence and the emitted address
+  // to jump to (movq = REX.W 0xB8+r.).
+  static const int kPatchReturnSequenceAddressOffset = 2;
+
+  // ---------------------------------------------------------------------------
+  // Code generation
+  //
+  // Function names correspond one-to-one to x64 instruction mnemonics.
+  // Unless specified otherwise, instructions operate on 64-bit operands.
+  //
+  // If we need versions of an assembly instruction that operate on different
+  // width arguments, we add a single-letter suffix specifying the width.
+  // This is done for the following instructions: mov, cmp, inc, dec,
+  // add, sub, and test.
+  // There are no versions of these instructions without the suffix.
+  // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
+  // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
+  // - Instructions on 32-bit (doubleword) operands/registers use 'l'.
+  // - Instructions on 64-bit (quadword) operands/registers use 'q'.
+  //
+  // Some mnemonics, such as "and", are the same as C++ keywords.
+  // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
+
+  // Insert the smallest number of nop instructions
+  // possible to align the pc offset to a multiple
+  // of m. m must be a power of 2.
+  void Align(int m);
+
+  // Stack
+  void pushfq();
+  void popfq();
+
+  void push(Immediate value);
+  void push(Register src);
+  void push(const Operand& src);
+  void push(Label* label, RelocInfo::Mode relocation_mode);
+
+  void pop(Register dst);
+  void pop(const Operand& dst);
+
+  void enter(Immediate size);
+  void leave();
+
+  // Moves
+  void movb(Register dst, const Operand& src);
+  void movb(Register dst, Immediate imm);
+  void movb(const Operand& dst, Register src);
+
+  void movl(Register dst, Register src);
+  void movl(Register dst, const Operand& src);
+  void movl(const Operand& dst, Register src);
+  void movl(const Operand& dst, Immediate imm);
+  // Load a 32-bit immediate value, zero-extended to 64 bits.
+  void movl(Register dst, Immediate imm32);
+
+  // Move 64 bit register value to 64-bit memory location.
+  void movq(const Operand& dst, Register src);
+  // Move 64 bit memory location to 64-bit register value.
+  void movq(Register dst, const Operand& src);
+  void movq(Register dst, Register src);
+  // Sign extends immediate 32-bit value to 64 bits.
+  void movq(Register dst, Immediate x);
+  // Move the offset of the label location relative to the current
+  // position (after the move) to the destination.
+  void movl(const Operand& dst, Label* src);
+
+  // Move sign extended immediate to memory location.
+  void movq(const Operand& dst, Immediate value);
+  // New x64 instructions to load a 64-bit immediate into a register.
+  // All 64-bit immediates must have a relocation mode.
+  void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
+  void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
+  void movq(Register dst, const char* s, RelocInfo::Mode rmode);
+  // Moves the address of the external reference into the register.
+  void movq(Register dst, ExternalReference ext);
+  void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
+
+  void movsxlq(Register dst, Register src);
+  void movsxlq(Register dst, const Operand& src);
+  void movzxbq(Register dst, const Operand& src);
+  void movzxbl(Register dst, const Operand& src);
+  void movzxwl(Register dst, const Operand& src);
+
+  // New x64 instruction to load from an immediate 64-bit pointer into RAX.
+  void load_rax(void* ptr, RelocInfo::Mode rmode);
+  void load_rax(ExternalReference ext);
+
+  // Conditional moves.
+  void cmovq(Condition cc, Register dst, Register src);
+  void cmovq(Condition cc, Register dst, const Operand& src);
+  void cmovl(Condition cc, Register dst, Register src);
+  void cmovl(Condition cc, Register dst, const Operand& src);
+
+  // Exchange two registers
+  void xchg(Register dst, Register src);
+
+  // Arithmetics
+  void addl(Register dst, Register src) {
+    if (dst.low_bits() == 4) {  // Forces SIB byte.
+      arithmetic_op_32(0x01, src, dst);
+    } else {
+      arithmetic_op_32(0x03, dst, src);
+    }
+  }
+
+  void addl(Register dst, Immediate src) {
+    immediate_arithmetic_op_32(0x0, dst, src);
+  }
+
+  void addl(Register dst, const Operand& src) {
+    arithmetic_op_32(0x03, dst, src);
+  }
+
+  void addl(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op_32(0x0, dst, src);
+  }
+
+  void addq(Register dst, Register src) {
+    arithmetic_op(0x03, dst, src);
+  }
+
+  void addq(Register dst, const Operand& src) {
+    arithmetic_op(0x03, dst, src);
+  }
+
+  void addq(const Operand& dst, Register src) {
+    arithmetic_op(0x01, src, dst);
+  }
+
+  void addq(Register dst, Immediate src) {
+    immediate_arithmetic_op(0x0, dst, src);
+  }
+
+  void addq(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op(0x0, dst, src);
+  }
+
+  void cmpb(Register dst, Immediate src) {
+    immediate_arithmetic_op_8(0x7, dst, src);
+  }
+
+  void cmpb_al(Immediate src);
+
+  void cmpb(Register dst, Register src) {
+    arithmetic_op(0x3A, dst, src);
+  }
+
+  void cmpb(Register dst, const Operand& src) {
+    arithmetic_op(0x3A, dst, src);
+  }
+
+  void cmpb(const Operand& dst, Register src) {
+    arithmetic_op(0x38, src, dst);
+  }
+
+  void cmpb(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op_8(0x7, dst, src);
+  }
+
+  void cmpw(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op_16(0x7, dst, src);
+  }
+
+  void cmpw(Register dst, Immediate src) {
+    immediate_arithmetic_op_16(0x7, dst, src);
+  }
+
+  void cmpw(Register dst, const Operand& src) {
+    arithmetic_op_16(0x3B, dst, src);
+  }
+
+  void cmpw(Register dst, Register src) {
+    arithmetic_op_16(0x3B, dst, src);
+  }
+
+  void cmpw(const Operand& dst, Register src) {
+    arithmetic_op_16(0x39, src, dst);
+  }
+
+  void cmpl(Register dst, Register src) {
+    arithmetic_op_32(0x3B, dst, src);
+  }
+
+  void cmpl(Register dst, const Operand& src) {
+    arithmetic_op_32(0x3B, dst, src);
+  }
+
+  void cmpl(const Operand& dst, Register src) {
+    arithmetic_op_32(0x39, src, dst);
+  }
+
+  void cmpl(Register dst, Immediate src) {
+    immediate_arithmetic_op_32(0x7, dst, src);
+  }
+
+  void cmpl(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op_32(0x7, dst, src);
+  }
+
+  void cmpq(Register dst, Register src) {
+    arithmetic_op(0x3B, dst, src);
+  }
+
+  void cmpq(Register dst, const Operand& src) {
+    arithmetic_op(0x3B, dst, src);
+  }
+
+  void cmpq(const Operand& dst, Register src) {
+    arithmetic_op(0x39, src, dst);
+  }
+
+  void cmpq(Register dst, Immediate src) {
+    immediate_arithmetic_op(0x7, dst, src);
+  }
+
+  void cmpq(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op(0x7, dst, src);
+  }
+
+  void and_(Register dst, Register src) {
+    arithmetic_op(0x23, dst, src);
+  }
+
+  void and_(Register dst, const Operand& src) {
+    arithmetic_op(0x23, dst, src);
+  }
+
+  void and_(const Operand& dst, Register src) {
+    arithmetic_op(0x21, src, dst);
+  }
+
+  void and_(Register dst, Immediate src) {
+    immediate_arithmetic_op(0x4, dst, src);
+  }
+
+  void and_(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op(0x4, dst, src);
+  }
+
+  void andl(Register dst, Immediate src) {
+    immediate_arithmetic_op_32(0x4, dst, src);
+  }
+
+  void decq(Register dst);
+  void decq(const Operand& dst);
+  void decl(Register dst);
+  void decl(const Operand& dst);
+
+  // Sign-extends rax into rdx:rax.
+  void cqo();
+  // Sign-extends eax into edx:eax.
+  void cdq();
+
+  // Divide rdx:rax by src.  Quotient in rax, remainder in rdx.
+  void idivq(Register src);
+  // Divide edx:eax by lower 32 bits of src.  Quotient in eax, rem. in edx.
+  void idivl(Register src);
+
+  // Signed multiply instructions.
+  void imul(Register src);                               // rdx:rax = rax * src.
+  void imul(Register dst, Register src);                 // dst = dst * src.
+  void imul(Register dst, const Operand& src);           // dst = dst * src.
+  void imul(Register dst, Register src, Immediate imm);  // dst = src * imm.
+  // Multiply 32 bit registers
+  void imull(Register dst, Register src);                // dst = dst * src.
+
+  void incq(Register dst);
+  void incq(const Operand& dst);
+  void incl(const Operand& dst);
+
+  void lea(Register dst, const Operand& src);
+
+  // Multiply rax by src, put the result in rdx:rax.
+  void mul(Register src);
+
+  void neg(Register dst);
+  void neg(const Operand& dst);
+  void negl(Register dst);
+
+  void not_(Register dst);
+  void not_(const Operand& dst);
+
+  void or_(Register dst, Register src) {
+    arithmetic_op(0x0B, dst, src);
+  }
+
+  void orl(Register dst, Register src) {
+    arithmetic_op_32(0x0B, dst, src);
+  }
+
+  void or_(Register dst, const Operand& src) {
+    arithmetic_op(0x0B, dst, src);
+  }
+
+  void or_(const Operand& dst, Register src) {
+    arithmetic_op(0x09, src, dst);
+  }
+
+  void or_(Register dst, Immediate src) {
+    immediate_arithmetic_op(0x1, dst, src);
+  }
+
+  void or_(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op(0x1, dst, src);
+  }
+
+
+  void rcl(Register dst, uint8_t imm8);
+
+  // Shifts dst:src left by cl bits, affecting only dst.
+  void shld(Register dst, Register src);
+
+  // Shifts src:dst right by cl bits, affecting only dst.
+  void shrd(Register dst, Register src);
+
+  // Shifts dst right, duplicating sign bit, by shift_amount bits.
+  // Shifting by 1 is handled efficiently.
+  void sar(Register dst, Immediate shift_amount) {
+    shift(dst, shift_amount, 0x7);
+  }
+
+  // Shifts dst right, duplicating sign bit, by shift_amount bits.
+  // Shifting by 1 is handled efficiently.
+  void sarl(Register dst, Immediate shift_amount) {
+    shift_32(dst, shift_amount, 0x7);
+  }
+
+  // Shifts dst right, duplicating sign bit, by cl % 64 bits.
+  void sar(Register dst) {
+    shift(dst, 0x7);
+  }
+
+  // Shifts dst right, duplicating sign bit, by cl % 64 bits.
+  void sarl(Register dst) {
+    shift_32(dst, 0x7);
+  }
+
+  void shl(Register dst, Immediate shift_amount) {
+    shift(dst, shift_amount, 0x4);
+  }
+
+  void shl(Register dst) {
+    shift(dst, 0x4);
+  }
+
+  void shll(Register dst) {
+    shift_32(dst, 0x4);
+  }
+
+  void shll(Register dst, Immediate shift_amount) {
+    shift_32(dst, shift_amount, 0x4);
+  }
+
+  void shr(Register dst, Immediate shift_amount) {
+    shift(dst, shift_amount, 0x5);
+  }
+
+  void shr(Register dst) {
+    shift(dst, 0x5);
+  }
+
+  void shrl(Register dst) {
+    shift_32(dst, 0x5);
+  }
+
+  void shrl(Register dst, Immediate shift_amount) {
+    shift_32(dst, shift_amount, 0x5);
+  }
+
+  void store_rax(void* dst, RelocInfo::Mode mode);
+  void store_rax(ExternalReference ref);
+
+  void subq(Register dst, Register src) {
+    arithmetic_op(0x2B, dst, src);
+  }
+
+  void subq(Register dst, const Operand& src) {
+    arithmetic_op(0x2B, dst, src);
+  }
+
+  void subq(const Operand& dst, Register src) {
+    arithmetic_op(0x29, src, dst);
+  }
+
+  void subq(Register dst, Immediate src) {
+    immediate_arithmetic_op(0x5, dst, src);
+  }
+
+  void subq(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op(0x5, dst, src);
+  }
+
+  void subl(Register dst, Register src) {
+    arithmetic_op_32(0x2B, dst, src);
+  }
+
+  void subl(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op_32(0x5, dst, src);
+  }
+
+  void subl(Register dst, Immediate src) {
+    immediate_arithmetic_op_32(0x5, dst, src);
+  }
+
+  void subb(Register dst, Immediate src) {
+    immediate_arithmetic_op_8(0x5, dst, src);
+  }
+
+  void testb(Register reg, Immediate mask);
+  void testb(const Operand& op, Immediate mask);
+  void testl(Register dst, Register src);
+  void testl(Register reg, Immediate mask);
+  void testl(const Operand& op, Immediate mask);
+  void testq(const Operand& op, Register reg);
+  void testq(Register dst, Register src);
+  void testq(Register dst, Immediate mask);
+
+  void xor_(Register dst, Register src) {
+    arithmetic_op(0x33, dst, src);
+  }
+
+  void xorl(Register dst, Register src) {
+    arithmetic_op_32(0x33, dst, src);
+  }
+
+  void xor_(Register dst, const Operand& src) {
+    arithmetic_op(0x33, dst, src);
+  }
+
+  void xor_(const Operand& dst, Register src) {
+    arithmetic_op(0x31, src, dst);
+  }
+
+  void xor_(Register dst, Immediate src) {
+    immediate_arithmetic_op(0x6, dst, src);
+  }
+
+  void xor_(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op(0x6, dst, src);
+  }
+
+  // Bit operations.
+  void bt(const Operand& dst, Register src);
+  void bts(const Operand& dst, Register src);
+
+  // Miscellaneous
+  void cpuid();
+  void hlt();
+  void int3();
+  void nop();
+  void nop(int n);
+  void rdtsc();
+  void ret(int imm16);
+  void setcc(Condition cc, Register reg);
+
+  // Label operations & relative jumps (PPUM Appendix D)
+  //
+  // Takes a branch opcode (cc) and a label (L) and generates
+  // either a backward branch or a forward branch and links it
+  // to the label fixup chain. Usage:
+  //
+  // Label L;    // unbound label
+  // j(cc, &L);  // forward branch to unbound label
+  // bind(&L);   // bind label to the current pc
+  // j(cc, &L);  // backward branch to bound label
+  // bind(&L);   // illegal: a label may be bound only once
+  //
+  // Note: The same Label can be used for forward and backward branches
+  // but it may be bound only once.
+
+  void bind(Label* L);  // binds an unbound label L to the current code position
+
+  // Calls
+  // Call near relative 32-bit displacement, relative to next instruction.
+  void call(Label* L);
+
+  // Call near absolute indirect, address in register
+  void call(Register adr);
+
+  // Call near indirect
+  void call(const Operand& operand);
+
+  // Jumps
+  // Jump short or near relative.
+  void jmp(Label* L);  // unconditional jump to L
+
+  // Jump near absolute indirect (r64)
+  void jmp(Register adr);
+
+  // Jump near absolute indirect (m64)
+  void jmp(const Operand& src);
+
+  // Conditional jumps
+  void j(Condition cc, Label* L);
+
+  // Floating-point operations
+  void fld(int i);
+
+  void fld1();
+  void fldz();
+
+  void fld_s(const Operand& adr);
+  void fld_d(const Operand& adr);
+
+  void fstp_s(const Operand& adr);
+  void fstp_d(const Operand& adr);
+
+  void fild_s(const Operand& adr);
+  void fild_d(const Operand& adr);
+
+  void fist_s(const Operand& adr);
+
+  void fistp_s(const Operand& adr);
+  void fistp_d(const Operand& adr);
+
+  void fisttp_s(const Operand& adr);
+
+  void fabs();
+  void fchs();
+
+  void fadd(int i);
+  void fsub(int i);
+  void fmul(int i);
+  void fdiv(int i);
+
+  void fisub_s(const Operand& adr);
+
+  void faddp(int i = 1);
+  void fsubp(int i = 1);
+  void fsubrp(int i = 1);
+  void fmulp(int i = 1);
+  void fdivp(int i = 1);
+  void fprem();
+  void fprem1();
+
+  void fxch(int i = 1);
+  void fincstp();
+  void ffree(int i = 0);
+
+  void ftst();
+  void fucomp(int i);
+  void fucompp();
+  void fcompp();
+  void fnstsw_ax();
+  void fwait();
+  void fnclex();
+
+  void fsin();
+  void fcos();
+
+  void frndint();
+
+  void sahf();
+
+  // SSE2 instructions
+  void movsd(const Operand& dst, XMMRegister src);
+  void movsd(Register src, XMMRegister dst);
+  void movsd(XMMRegister dst, Register src);
+  void movsd(XMMRegister src, const Operand& dst);
+
+  void cvttss2si(Register dst, const Operand& src);
+  void cvttsd2si(Register dst, const Operand& src);
+
+  void cvtlsi2sd(XMMRegister dst, const Operand& src);
+  void cvtlsi2sd(XMMRegister dst, Register src);
+  void cvtqsi2sd(XMMRegister dst, const Operand& src);
+  void cvtqsi2sd(XMMRegister dst, Register src);
+
+  void addsd(XMMRegister dst, XMMRegister src);
+  void subsd(XMMRegister dst, XMMRegister src);
+  void mulsd(XMMRegister dst, XMMRegister src);
+  void divsd(XMMRegister dst, XMMRegister src);
+
+
+  void emit_sse_operand(XMMRegister dst, XMMRegister src);
+  void emit_sse_operand(XMMRegister reg, const Operand& adr);
+  void emit_sse_operand(XMMRegister dst, Register src);
+
+  // Use either movsd or movlpd.
+  // void movdbl(XMMRegister dst, const Operand& src);
+  // void movdbl(const Operand& dst, XMMRegister src);
+
+  // Debugging
+  void Print();
+
+  // Check the code size generated from label to here.
+  int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+
+  // Mark address of the ExitJSFrame code.
+  void RecordJSReturn();
+
+  // Record a comment relocation entry that can be used by a disassembler.
+  // Use --debug_code to enable.
+  void RecordComment(const char* msg);
+
+  void RecordPosition(int pos);
+  void RecordStatementPosition(int pos);
+  void WriteRecordedPositions();
+
+  // Writes a doubleword of data in the code stream.
+  // Used for inline tables, e.g., jump-tables.
+  // void dd(uint32_t data);
+
+  // Writes a quadword of data in the code stream.
+  // Used for inline tables, e.g., jump-tables.
+  // void dd(uint64_t data, RelocInfo::Mode reloc_info);
+
+  int pc_offset() const  { return pc_ - buffer_; }
+  int current_statement_position() const { return current_statement_position_; }
+  int current_position() const  { return current_position_; }
+
+  // Check if there is less than kGap bytes available in the buffer.
+  // If this is the case, we need to grow the buffer before emitting
+  // an instruction or relocation information.
+  inline bool buffer_overflow() const {
+    return pc_ >= reloc_info_writer.pos() - kGap;
+  }
+
+  // Get the number of bytes available in the buffer.
+  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Avoid overflows for displacements etc.
+  static const int kMaximalBufferSize = 512*MB;
+  static const int kMinimalBufferSize = 4*KB;
+
+ protected:
+  // void movsd(XMMRegister dst, const Operand& src);
+  // void movsd(const Operand& dst, XMMRegister src);
+
+  // void emit_sse_operand(XMMRegister reg, const Operand& adr);
+  // void emit_sse_operand(XMMRegister dst, XMMRegister src);
+
+
+ private:
+  byte* addr_at(int pos)  { return buffer_ + pos; }
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  uint32_t long_at(int pos)  {
+    return *reinterpret_cast<uint32_t*>(addr_at(pos));
+  }
+  void long_at_put(int pos, uint32_t x)  {
+    *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
+  }
+
+  // code emission
+  void GrowBuffer();
+
+  void emit(byte x) { *pc_++ = x; }
+  inline void emitl(uint32_t x);
+  inline void emit(Handle<Object> handle);
+  inline void emitq(uint64_t x, RelocInfo::Mode rmode);
+  inline void emitw(uint16_t x);
+  void emit(Immediate x) { emitl(x.value_); }
+
+  // Emits a REX prefix that encodes a 64-bit operand size and
+  // the top bit of both register codes.
+  // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+  // REX.W is set.
+  inline void emit_rex_64(Register reg, Register rm_reg);
+  inline void emit_rex_64(XMMRegister reg, Register rm_reg);
+
+  // Emits a REX prefix that encodes a 64-bit operand size and
+  // the top bit of the destination, index, and base register codes.
+  // The high bit of reg is used for REX.R, the high bit of op's base
+  // register is used for REX.B, and the high bit of op's index register
+  // is used for REX.X.  REX.W is set.
+  inline void emit_rex_64(Register reg, const Operand& op);
+  inline void emit_rex_64(XMMRegister reg, const Operand& op);
+
+  // Emits a REX prefix that encodes a 64-bit operand size and
+  // the top bit of the register code.
+  // The high bit of register is used for REX.B.
+  // REX.W is set and REX.R and REX.X are clear.
+  inline void emit_rex_64(Register rm_reg);
+
+  // Emits a REX prefix that encodes a 64-bit operand size and
+  // the top bit of the index and base register codes.
+  // The high bit of op's base register is used for REX.B, and the high
+  // bit of op's index register is used for REX.X.
+  // REX.W is set and REX.R clear.
+  inline void emit_rex_64(const Operand& op);
+
+  // Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
+  void emit_rex_64() { emit(0x48); }
+
+  // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+  // REX.W is clear.
+  inline void emit_rex_32(Register reg, Register rm_reg);
+
+  // The high bit of reg is used for REX.R, the high bit of op's base
+  // register is used for REX.B, and the high bit of op's index register
+  // is used for REX.X.  REX.W is cleared.
+  inline void emit_rex_32(Register reg, const Operand& op);
+
+  // High bit of rm_reg goes to REX.B.
+  // REX.W, REX.R and REX.X are clear.
+  inline void emit_rex_32(Register rm_reg);
+
+  // High bit of base goes to REX.B and high bit of index to REX.X.
+  // REX.W and REX.R are clear.
+  inline void emit_rex_32(const Operand& op);
+
+  // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+  // REX.W is cleared.  If no REX bits are set, no byte is emitted.
+  inline void emit_optional_rex_32(Register reg, Register rm_reg);
+
+  // The high bit of reg is used for REX.R, the high bit of op's base
+  // register is used for REX.B, and the high bit of op's index register
+  // is used for REX.X.  REX.W is cleared.  If no REX bits are set, nothing
+  // is emitted.
+  inline void emit_optional_rex_32(Register reg, const Operand& op);
+
+  // As for emit_optional_rex_32(Register, Register), except that
+  // the registers are XMM registers.
+  inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
+
+  // As for emit_optional_rex_32(Register, Register), except that
+  // the registers are XMM registers.
+  inline void emit_optional_rex_32(XMMRegister reg, Register base);
+
+  // As for emit_optional_rex_32(Register, const Operand&), except that
+  // the register is an XMM register.
+  inline void emit_optional_rex_32(XMMRegister reg, const Operand& op);
+
+  // Optionally do as emit_rex_32(Register) if the register number has
+  // the high bit set.
+  inline void emit_optional_rex_32(Register rm_reg);
+
+  // Optionally do as emit_rex_32(const Operand&) if the operand register
+  // numbers have a high bit set.
+  inline void emit_optional_rex_32(const Operand& op);
+
+
+  // Emit the ModR/M byte, and optionally the SIB byte and
+  // 1- or 4-byte offset for a memory operand.  Also encodes
+  // the second operand of the operation, a register or operation
+  // subcode, into the reg field of the ModR/M byte.
+  void emit_operand(Register reg, const Operand& adr) {
+    emit_operand(reg.low_bits(), adr);
+  }
+
+  // Emit the ModR/M byte, and optionally the SIB byte and
+  // 1- or 4-byte offset for a memory operand.  Also used to encode
+  // a three-bit opcode extension into the ModR/M byte.
+  void emit_operand(int rm, const Operand& adr);
+
+  // Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
+  void emit_modrm(Register reg, Register rm_reg) {
+    emit(0xC0 | reg.low_bits() << 3 | rm_reg.low_bits());
+  }
+
+  // Emit a ModR/M byte with an operation subcode in the reg field and
+  // a register in the rm_reg field.
+  void emit_modrm(int code, Register rm_reg) {
+    ASSERT(is_uint3(code));
+    emit(0xC0 | code << 3 | rm_reg.low_bits());
+  }
+
+  // Emit the code-object-relative offset of the label's position
+  inline void emit_code_relative_offset(Label* label);
+
+  // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
+  // AND, OR, XOR, or CMP.  The encodings of these operations are all
+  // similar, differing just in the opcode or in the reg field of the
+  // ModR/M byte.
+  void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
+  void arithmetic_op_16(byte opcode, Register reg, const Operand& rm_reg);
+  void arithmetic_op_32(byte opcode, Register reg, Register rm_reg);
+  void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
+  void arithmetic_op(byte opcode, Register reg, Register rm_reg);
+  void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
+  void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
+  void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
+  // Operate on a byte in memory or register.
+  void immediate_arithmetic_op_8(byte subcode,
+                                 Register dst,
+                                 Immediate src);
+  void immediate_arithmetic_op_8(byte subcode,
+                                 const Operand& dst,
+                                 Immediate src);
+  // Operate on a word in memory or register.
+  void immediate_arithmetic_op_16(byte subcode,
+                                  Register dst,
+                                  Immediate src);
+  void immediate_arithmetic_op_16(byte subcode,
+                                  const Operand& dst,
+                                  Immediate src);
+  // Operate on a 32-bit word in memory or register.
+  void immediate_arithmetic_op_32(byte subcode,
+                                  Register dst,
+                                  Immediate src);
+  void immediate_arithmetic_op_32(byte subcode,
+                                  const Operand& dst,
+                                  Immediate src);
+
+  // Emit machine code for a shift operation.
+  void shift(Register dst, Immediate shift_amount, int subcode);
+  void shift_32(Register dst, Immediate shift_amount, int subcode);
+  // Shift dst by cl % 64 bits.
+  void shift(Register dst, int subcode);
+  void shift_32(Register dst, int subcode);
+
+  void emit_farith(int b1, int b2, int i);
+
+  // labels
+  // void print(Label* L);
+  void bind_to(Label* L, int pos);
+  void link_to(Label* L, Label* appendix);
+
+  // record reloc info for current pc_
+  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+  friend class CodePatcher;
+  friend class EnsureSpace;
+  friend class RegExpMacroAssemblerX64;
+
+  // Code buffer:
+  // The buffer into which code and relocation info are generated.
+  byte* buffer_;
+  int buffer_size_;
+  // True if the assembler owns the buffer, false if buffer is external.
+  bool own_buffer_;
+  // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
+  static byte* spare_buffer_;
+
+  // code generation
+  byte* pc_;  // the program counter; moves forward
+  RelocInfoWriter reloc_info_writer;
+
+  // push-pop elimination
+  byte* last_pc_;
+
+  // source position information
+  int current_statement_position_;
+  int current_position_;
+  int written_statement_position_;
+  int written_position_;
+};
+
+
+// Helper class that ensures that there is enough space for generating
+// instructions and relocation information.  The constructor makes
+// sure that there is enough space and (in debug mode) the destructor
+// checks that we did not generate too much.
+class EnsureSpace BASE_EMBEDDED {
+ public:
+  explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+    if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
+#ifdef DEBUG
+    space_before_ = assembler_->available_space();
+#endif
+  }
+
+#ifdef DEBUG
+  ~EnsureSpace() {
+    int bytes_generated = space_before_ - assembler_->available_space();
+    ASSERT(bytes_generated < assembler_->kGap);
+  }
+#endif
+
+ private:
+  Assembler* assembler_;
+#ifdef DEBUG
+  int space_before_;
+#endif
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_ASSEMBLER_X64_H_
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
new file mode 100644
index 0000000..35eddc4
--- /dev/null
+++ b/src/x64/builtins-x64.cc
@@ -0,0 +1,1280 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "codegen-inl.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
+  // TODO(428): Don't pass the function in a static variable.
+  ExternalReference passed = ExternalReference::builtin_passed_function();
+  __ movq(kScratchRegister, passed.address(), RelocInfo::EXTERNAL_REFERENCE);
+  __ movq(Operand(kScratchRegister, 0), rdi);
+
+  // The actual argument count has already been loaded into register
+  // rax, but JumpToRuntime expects rax to contain the number of
+  // arguments including the receiver.
+  __ incq(rax);
+  __ JumpToRuntime(ExternalReference(id), 1);
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ push(rbp);
+  __ movq(rbp, rsp);
+
+  // Store the arguments adaptor context sentinel.
+  __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Push the function on the stack.
+  __ push(rdi);
+
+  // Preserve the number of arguments on the stack. Must preserve both
+  // rax and rbx because these registers are used when copying the
+  // arguments and the receiver.
+  __ Integer32ToSmi(rcx, rax);
+  __ push(rcx);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // Retrieve the number of arguments from the stack. Number is a Smi.
+  __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  // Leave the frame.
+  __ movq(rsp, rbp);
+  __ pop(rbp);
+
+  // Remove caller arguments from the stack.
+  // rbx holds a Smi, so we convery to dword offset by multiplying by 4.
+  // TODO(smi): Find a way to abstract indexing by a smi.
+  ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
+  ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
+  // TODO(smi): Find way to abstract indexing by a smi.
+  __ pop(rcx);
+  // 1 * kPointerSize is offset of receiver.
+  __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
+  __ push(rcx);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : actual number of arguments
+  //  -- rbx : expected number of arguments
+  //  -- rdx : code entry to call
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments;
+  __ IncrementCounter(&Counters::arguments_adaptors, 1);
+
+  Label enough, too_few;
+  __ cmpq(rax, rbx);
+  __ j(less, &too_few);
+  __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  __ j(equal, &dont_adapt_arguments);
+
+  {  // Enough parameters: Actual >= expected.
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Copy receiver and all expected arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
+    __ movq(rcx, Immediate(-1));  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ incq(rcx);
+    __ push(Operand(rax, 0));
+    __ subq(rax, Immediate(kPointerSize));
+    __ cmpq(rcx, rbx);
+    __ j(less, &copy);
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Copy receiver and all actual arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
+    __ movq(rcx, Immediate(-1));  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ incq(rcx);
+    __ push(Operand(rdi, 0));
+    __ subq(rdi, Immediate(kPointerSize));
+    __ cmpq(rcx, rax);
+    __ j(less, &copy);
+
+    // Fill remaining expected arguments with undefined values.
+    Label fill;
+    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+    __ bind(&fill);
+    __ incq(rcx);
+    __ push(kScratchRegister);
+    __ cmpq(rcx, rbx);
+    __ j(less, &fill);
+
+    // Restore function pointer.
+    __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ call(rdx);
+
+  // Leave frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ ret(0);
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ jmp(rdx);
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+  // Stack Layout:
+  // rsp: return address
+  //  +1: Argument n
+  //  +2: Argument n-1
+  //  ...
+  //  +n: Argument 1 = receiver
+  //  +n+1: Argument 0 = function to call
+  //
+  // rax contains the number of arguments, n, not counting the function.
+  //
+  // 1. Make sure we have at least one argument.
+  { Label done;
+    __ testq(rax, rax);
+    __ j(not_zero, &done);
+    __ pop(rbx);
+    __ Push(Factory::undefined_value());
+    __ push(rbx);
+    __ incq(rax);
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call from the stack.
+  { Label done, non_function, function;
+    // The function to call is at position n+1 on the stack.
+    __ movq(rdi, Operand(rsp, rax, times_pointer_size, +1 * kPointerSize));
+    __ JumpIfSmi(rdi, &non_function);
+    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+    __ j(equal, &function);
+
+    // Non-function called: Clear the function to force exception.
+    __ bind(&non_function);
+    __ xor_(rdi, rdi);
+    __ jmp(&done);
+
+    // Function called: Change context eagerly to get the right global object.
+    __ bind(&function);
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+    __ bind(&done);
+  }
+
+  // 3. Make sure first argument is an object; convert if necessary.
+  { Label call_to_object, use_global_receiver, patch_receiver, done;
+    __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+
+    __ JumpIfSmi(rbx, &call_to_object);
+
+    __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+    __ j(equal, &use_global_receiver);
+    __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+    __ j(equal, &use_global_receiver);
+
+    __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+    __ j(below, &call_to_object);
+    __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
+    __ j(below_equal, &done);
+
+    __ bind(&call_to_object);
+    __ EnterInternalFrame();  // preserves rax, rbx, rdi
+
+    // Store the arguments count on the stack (smi tagged).
+    __ Integer32ToSmi(rax, rax);
+    __ push(rax);
+
+    __ push(rdi);  // save edi across the call
+    __ push(rbx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ movq(rbx, rax);
+    __ pop(rdi);  // restore edi after the call
+
+    // Get the arguments count and untag it.
+    __ pop(rax);
+    __ SmiToInteger32(rax, rax);
+
+    __ LeaveInternalFrame();
+    __ jmp(&patch_receiver);
+
+    // Use the global receiver object from the called function as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalIndex =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+
+    __ bind(&patch_receiver);
+    __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
+
+    __ bind(&done);
+  }
+
+  // 4. Shift stuff one slot down the stack.
+  { Label loop;
+    __ lea(rcx, Operand(rax, +1));  // +1 ~ copy receiver too
+    __ bind(&loop);
+    __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
+    __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
+    __ decq(rcx);
+    __ j(not_zero, &loop);
+  }
+
+  // 5. Remove TOS (copy of last arguments), but keep return address.
+  __ pop(rbx);
+  __ pop(rcx);
+  __ push(rbx);
+  __ decq(rax);
+
+  // 6. Check that function really was a function and get the code to
+  //    call from the function and check that the number of expected
+  //    arguments matches what we're providing.
+  { Label invoke, trampoline;
+    __ testq(rdi, rdi);
+    __ j(not_zero, &invoke);
+    __ xor_(rbx, rbx);
+    __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+    __ bind(&trampoline);
+    __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+            RelocInfo::CODE_TARGET);
+
+    __ bind(&invoke);
+    __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    __ movsxlq(rbx,
+           FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+    __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+    __ cmpq(rax, rbx);
+    __ j(not_equal, &trampoline);
+  }
+
+  // 7. Jump (tail-call) to the code in register edx without checking arguments.
+  ParameterCount expected(0);
+  __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+  // Stack at entry:
+  //    rsp: return address
+  //  rsp+8: arguments
+  // rsp+16: receiver ("this")
+  // rsp+24: function
+  __ EnterInternalFrame();
+  // Stack frame:
+  //    rbp: Old base pointer
+  // rbp[1]: return address
+  // rbp[2]: function arguments
+  // rbp[3]: receiver
+  // rbp[4]: function
+  static const int kArgumentsOffset = 2 * kPointerSize;
+  static const int kReceiverOffset = 3 * kPointerSize;
+  static const int kFunctionOffset = 4 * kPointerSize;
+  __ push(Operand(rbp, kFunctionOffset));
+  __ push(Operand(rbp, kArgumentsOffset));
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+  if (FLAG_check_stack) {
+    // We need to catch preemptions right here, otherwise an unlucky preemption
+    // could show up as a failed apply.
+    Label retry_preemption;
+    Label no_preemption;
+    __ bind(&retry_preemption);
+    ExternalReference stack_guard_limit =
+        ExternalReference::address_of_stack_guard_limit();
+    __ movq(kScratchRegister, stack_guard_limit);
+    __ movq(rcx, rsp);
+    __ subq(rcx, Operand(kScratchRegister, 0));
+    // rcx contains the difference between the stack limit and the stack top.
+    // We use it below to check that there is enough room for the arguments.
+    __ j(above, &no_preemption);
+
+    // Preemption!
+    // Because runtime functions always remove the receiver from the stack, we
+    // have to fake one to avoid underflowing the stack.
+    __ push(rax);
+    __ push(Immediate(Smi::FromInt(0)));
+
+    // Do call to runtime routine.
+    __ CallRuntime(Runtime::kStackGuard, 1);
+    __ pop(rax);
+    __ jmp(&retry_preemption);
+
+    __ bind(&no_preemption);
+
+    Label okay;
+    // Make rdx the space we need for the array when it is unrolled onto the
+    // stack.
+    __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+    __ cmpq(rcx, rdx);
+    __ j(greater, &okay);
+
+    // Too bad: Out of stack space.
+    __ push(Operand(rbp, kFunctionOffset));
+    __ push(rax);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    __ bind(&okay);
+  }
+
+  // Push current index and limit.
+  const int kLimitOffset =
+      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+  __ push(rax);  // limit
+  __ push(Immediate(0));  // index
+
+  // Change context eagerly to get the right global object if
+  // necessary.
+  __ movq(rdi, Operand(rbp, kFunctionOffset));
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Compute the receiver.
+  Label call_to_object, use_global_receiver, push_receiver;
+  __ movq(rbx, Operand(rbp, kReceiverOffset));
+  __ JumpIfSmi(rbx, &call_to_object);
+  __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+  __ j(equal, &use_global_receiver);
+  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &use_global_receiver);
+
+  // If given receiver is already a JavaScript object then there's no
+  // reason for converting it.
+  __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+  __ j(below, &call_to_object);
+  __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
+  __ j(below_equal, &push_receiver);
+
+  // Convert the receiver to an object.
+  __ bind(&call_to_object);
+  __ push(rbx);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ movq(rbx, rax);
+  __ jmp(&push_receiver);
+
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+
+  // Push the receiver.
+  __ bind(&push_receiver);
+  __ push(rbx);
+
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ movq(rax, Operand(rbp, kIndexOffset));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(rcx, Operand(rbp, kArgumentsOffset));  // load arguments
+  __ push(rcx);
+  __ push(rax);
+
+  // Use inline caching to speed up access to arguments.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  __ Call(ic, RelocInfo::CODE_TARGET);
+  // It is important that we do not have a test instruction after the
+  // call.  A test instruction after the call is used to indicate that
+  // we have generated an inline version of the keyed load.  In this
+  // case, we know that we are not generating a test instruction next.
+
+  // Remove IC arguments from the stack and push the nth argument.
+  __ addq(rsp, Immediate(2 * kPointerSize));
+  __ push(rax);
+
+  // Update the index on the stack and in register rax.
+  __ movq(rax, Operand(rbp, kIndexOffset));
+  __ addq(rax, Immediate(Smi::FromInt(1)));
+  __ movq(Operand(rbp, kIndexOffset), rax);
+
+  __ bind(&entry);
+  __ cmpq(rax, Operand(rbp, kLimitOffset));
+  __ j(not_equal, &loop);
+
+  // Invoke the function.
+  ParameterCount actual(rax);
+  __ SmiToInteger32(rax, rax);
+  __ movq(rdi, Operand(rbp, kFunctionOffset));
+  __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+
+  __ LeaveInternalFrame();
+  __ ret(3 * kPointerSize);  // remove function, receiver, and arguments
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the global context.
+  __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ movq(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the Array function from the global context.
+  __ movq(result,
+          Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+                                 Register array_function,
+                                 Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register scratch3,
+                                 int initial_capacity,
+                                 Label* gc_required) {
+  ASSERT(initial_capacity >= 0);
+
+  // Load the initial map from the array function.
+  __ movq(scratch1, FieldOperand(array_function,
+                                 JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Allocate the JSArray object together with space for a fixed array with the
+  // requested elements.
+  int size = JSArray::kSize;
+  if (initial_capacity > 0) {
+    size += FixedArray::SizeFor(initial_capacity);
+  }
+  __ AllocateInNewSpace(size,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // scratch1: initial map
+  // scratch2: start of next object
+  __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
+  __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
+          Factory::empty_fixed_array());
+  // Field JSArray::kElementsOffset is initialized later.
+  __ movq(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+
+  // If no storage is requested for the elements array just set the empty
+  // fixed array.
+  if (initial_capacity == 0) {
+    __ Move(FieldOperand(result, JSArray::kElementsOffset),
+            Factory::empty_fixed_array());
+    return;
+  }
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // scratch2: start of next object
+  __ lea(scratch1, Operand(result, JSArray::kSize));
+  __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
+
+  // Initialize the FixedArray and fill it with holes. FixedArray length is not
+  // stored as a smi.
+  // result: JSObject
+  // scratch1: elements array
+  // scratch2: start of next object
+  __ Move(FieldOperand(scratch1, JSObject::kMapOffset),
+          Factory::fixed_array_map());
+  __ movq(FieldOperand(scratch1, Array::kLengthOffset),
+          Immediate(initial_capacity));
+
+  // Fill the FixedArray with the hole value. Inline the code if short.
+  // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
+  static const int kLoopUnfoldLimit = 4;
+  ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+  __ Move(scratch3, Factory::the_hole_value());
+  if (initial_capacity <= kLoopUnfoldLimit) {
+    // Use a scratch register here to have only one reloc info when unfolding
+    // the loop.
+    for (int i = 0; i < initial_capacity; i++) {
+      __ movq(FieldOperand(scratch1,
+                           FixedArray::kHeaderSize + i * kPointerSize),
+              scratch3);
+    }
+  } else {
+    Label loop, entry;
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(Operand(scratch1, 0), scratch3);
+    __ addq(scratch1, Immediate(kPointerSize));
+    __ bind(&entry);
+    __ cmpq(scratch1, scratch2);
+    __ j(below, &loop);
+  }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array and elements_array_end  (see
+// below for when that is not the case). If the parameter fill_with_holes is
+// true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+                            Register array_function,  // Array function.
+                            Register array_size,  // As a smi.
+                            Register result,
+                            Register elements_array,
+                            Register elements_array_end,
+                            Register scratch,
+                            bool fill_with_hole,
+                            Label* gc_required) {
+  Label not_empty, allocated;
+
+  // Load the initial map from the array function.
+  __ movq(elements_array,
+          FieldOperand(array_function,
+                       JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ testq(array_size, array_size);
+  __ j(not_zero, &not_empty);
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
+  __ jmp(&allocated);
+
+  // Allocate the JSArray object together with space for a FixedArray with the
+  // requested elements.
+  __ bind(&not_empty);
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+                        times_half_pointer_size,  // array_size is a smi.
+                        array_size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // elements_array: initial map
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  __ bind(&allocated);
+  __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
+  __ Move(elements_array, Factory::empty_fixed_array());
+  __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
+  // Field JSArray::kElementsOffset is initialized later.
+  __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  __ lea(elements_array, Operand(result, JSArray::kSize));
+  __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
+
+  // Initialize the fixed array. FixedArray length is not stored as a smi.
+  // result: JSObject
+  // elements_array: elements array
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  ASSERT(kSmiTag == 0);
+  __ SmiToInteger64(array_size, array_size);
+  __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
+          Factory::fixed_array_map());
+  Label not_empty_2, fill_array;
+  __ testq(array_size, array_size);
+  __ j(not_zero, &not_empty_2);
+  // Length of the FixedArray is the number of pre-allocated elements even
+  // though the actual JSArray has length 0.
+  __ movq(FieldOperand(elements_array, Array::kLengthOffset),
+          Immediate(kPreallocatedArrayElements));
+  __ jmp(&fill_array);
+  __ bind(&not_empty_2);
+  // For non-empty JSArrays the length of the FixedArray and the JSArray is the
+  // same.
+  __ movq(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+
+  // Fill the allocated FixedArray with the hole value if requested.
+  // result: JSObject
+  // elements_array: elements array
+  // elements_array_end: start of next object
+  __ bind(&fill_array);
+  if (fill_with_hole) {
+    Label loop, entry;
+    __ Move(scratch, Factory::the_hole_value());
+    __ lea(elements_array, Operand(elements_array,
+                                   FixedArray::kHeaderSize - kHeapObjectTag));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(Operand(elements_array, 0), scratch);
+    __ addq(elements_array, Immediate(kPointerSize));
+    __ bind(&entry);
+    __ cmpq(elements_array, elements_array_end);
+    __ j(below, &loop);
+  }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+//   rdi: constructor (built-in Array function)
+//   rax: argc
+//   rsp[0]: return address
+//   rsp[8]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in rdi needs to be preserved for
+// entering the generic code. In both cases argc in rax needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// a construct call and a normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+                            Label *call_generic_code) {
+  Label argc_one_or_more, argc_two_or_more;
+
+  // Check for array construction with zero arguments.
+  __ testq(rax, rax);
+  __ j(not_zero, &argc_one_or_more);
+
+  // Handle construction of an empty array.
+  AllocateEmptyJSArray(masm,
+                       rdi,
+                       rbx,
+                       rcx,
+                       rdx,
+                       r8,
+                       kPreallocatedArrayElements,
+                       call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ movq(rax, rbx);
+  __ ret(kPointerSize);
+
+  // Check for one argument. Bail out if argument is not smi or if it is
+  // negative.
+  __ bind(&argc_one_or_more);
+  __ cmpq(rax, Immediate(1));
+  __ j(not_equal, &argc_two_or_more);
+  __ movq(rdx, Operand(rsp, kPointerSize));  // Get the argument from the stack.
+  Condition not_positive_smi = __ CheckNotPositiveSmi(rdx);
+  __ j(not_positive_smi, call_generic_code);
+
+  // Handle construction of an empty array of a certain size. Bail out if size
+  // is to large to actually allocate an elements array.
+  __ JumpIfSmiGreaterEqualsConstant(rdx,
+                                    JSObject::kInitialMaxFastElementArray,
+                                    call_generic_code);
+
+  // rax: argc
+  // rdx: array_size (smi)
+  // rdi: constructor
+  // esp[0]: return address
+  // esp[8]: argument
+  AllocateJSArray(masm,
+                  rdi,
+                  rdx,
+                  rbx,
+                  rcx,
+                  r8,
+                  r9,
+                  true,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ movq(rax, rbx);
+  __ ret(2 * kPointerSize);
+
+  // Handle construction of an array from a list of arguments.
+  __ bind(&argc_two_or_more);
+  __ movq(rdx, rax);
+  __ Integer32ToSmi(rdx, rdx);  // Convet argc to a smi.
+  // rax: argc
+  // rdx: array_size (smi)
+  // rdi: constructor
+  // esp[0] : return address
+  // esp[8] : last argument
+  AllocateJSArray(masm,
+                  rdi,
+                  rdx,
+                  rbx,
+                  rcx,
+                  r8,
+                  r9,
+                  false,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+
+  // rax: argc
+  // rbx: JSArray
+  // rcx: elements_array
+  // r8: elements_array_end (untagged)
+  // esp[0]: return address
+  // esp[8]: last argument
+
+  // Location of the last argument
+  __ lea(r9, Operand(rsp, kPointerSize));
+
+  // Location of the first array element (Parameter fill_with_holes to
+  // AllocateJSArrayis false, so the FixedArray is returned in rcx).
+  __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
+
+  // rax: argc
+  // rbx: JSArray
+  // rdx: location of the first array element
+  // r9: location of the last argument
+  // esp[0]: return address
+  // esp[8]: last argument
+  Label loop, entry;
+  __ movq(rcx, rax);
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+  __ movq(Operand(rdx, 0), kScratchRegister);
+  __ addq(rdx, Immediate(kPointerSize));
+  __ bind(&entry);
+  __ decq(rcx);
+  __ j(greater_equal, &loop);
+
+  // Remove caller arguments from the stack and return.
+  // rax: argc
+  // rbx: JSArray
+  // esp[0]: return address
+  // esp[8]: last argument
+  __ pop(rcx);
+  __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+  __ push(rcx);
+  __ movq(rax, rbx);
+  __ ret(0);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : argc
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : last argument
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, rdi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array function shoud be a map.
+    __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    ASSERT(kSmiTag == 0);
+    Condition not_smi = __ CheckNotSmi(rbx);
+    __ Assert(not_smi, "Unexpected initial map for Array function");
+    __ CmpObjectType(rbx, MAP_TYPE, rcx);
+    __ Assert(equal, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code in case the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
+  Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+  Handle<Code> array_code(code);
+  __ Jump(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : argc
+  //  -- rdi : constructor
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : last argument
+  // -----------------------------------
+  Label generic_constructor;
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the builtin Array function which
+    // does always have a map.
+    GenerateLoadArrayFunction(masm, rbx);
+    __ cmpq(rdi, rbx);
+    __ Assert(equal, "Unexpected Array function");
+    // Initial map for the builtin Array function should be a map.
+    __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    ASSERT(kSmiTag == 0);
+    Condition not_smi = __ CheckNotSmi(rbx);
+    __ Assert(not_smi, "Unexpected initial map for Array function");
+    __ CmpObjectType(rbx, MAP_TYPE, rcx);
+    __ Assert(equal, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as constructor.
+  ArrayNativeCode(masm, &generic_constructor);
+
+  // Jump to the generic construct code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_constructor);
+  Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+  Handle<Code> generic_construct_stub(code);
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax: number of arguments
+  //  -- rdi: constructor function
+  // -----------------------------------
+
+  Label non_function_call;
+  // Check that function is not a smi.
+  __ JumpIfSmi(rdi, &non_function_call);
+  // Check that function is a JSFunction.
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &non_function_call);
+
+  // Jump to the function-specific construct stub.
+  __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
+  __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
+  __ jmp(rbx);
+
+  // edi: called object
+  // eax: number of arguments
+  __ bind(&non_function_call);
+
+  // Set expected number of arguments to zero (not changing eax).
+  __ movq(rbx, Immediate(0));
+  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+          RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+    // Enter a construct frame.
+  __ EnterConstructFrame();
+
+  // Store a smi-tagged arguments count on the stack.
+  __ Integer32ToSmi(rax, rax);
+  __ push(rax);
+
+  // Push the function to invoke on the stack.
+  __ push(rdi);
+
+  // Try to allocate the object without transitioning into C code. If any of the
+  // preconditions is not met, the code bails out to the runtime call.
+  Label rt_call, allocated;
+  if (FLAG_inline_new) {
+    Label undo_allocation;
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    ExternalReference debug_step_in_fp =
+        ExternalReference::debug_step_in_fp_address();
+    __ movq(kScratchRegister, debug_step_in_fp);
+    __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+    __ j(not_equal, &rt_call);
+#endif
+
+    // Verified that the constructor is a JSFunction.
+    // Load the initial map and verify that it is in fact a map.
+    // rdi: constructor
+    __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi
+    ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(rax, &rt_call);
+    // rdi: constructor
+    // rax: initial map (if proven valid below)
+    __ CmpObjectType(rax, MAP_TYPE, rbx);
+    __ j(not_equal, &rt_call);
+
+    // Check that the constructor is not constructing a JSFunction (see comments
+    // in Runtime_NewObject in runtime.cc). In which case the initial map's
+    // instance type would be JS_FUNCTION_TYPE.
+    // rdi: constructor
+    // rax: initial map
+    __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+    __ j(equal, &rt_call);
+
+    // Now allocate the JSObject on the heap.
+    __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+    __ shl(rdi, Immediate(kPointerSizeLog2));
+    // rdi: size of new object
+    __ AllocateInNewSpace(rdi,
+                          rbx,
+                          rdi,
+                          no_reg,
+                          &rt_call,
+                          NO_ALLOCATION_FLAGS);
+    // Allocated the JSObject, now initialize the fields.
+    // rax: initial map
+    // rbx: JSObject (not HeapObject tagged - the actual address).
+    // rdi: start of next object
+    __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+    __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+    __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+    __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+    // Set extra fields in the newly allocated object.
+    // rax: initial map
+    // rbx: JSObject
+    // rdi: start of next object
+    { Label loop, entry;
+      __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+      __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ movq(Operand(rcx, 0), rdx);
+      __ addq(rcx, Immediate(kPointerSize));
+      __ bind(&entry);
+      __ cmpq(rcx, rdi);
+      __ j(less, &loop);
+    }
+
+    // Add the object tag to make the JSObject real, so that we can continue and
+    // jump into the continuation code at any time from now on. Any failures
+    // need to undo the allocation, so that the heap is in a consistent state
+    // and verifiable.
+    // rax: initial map
+    // rbx: JSObject
+    // rdi: start of next object
+    __ or_(rbx, Immediate(kHeapObjectTag));
+
+    // Check if a non-empty properties array is needed.
+    // Allocate and initialize a FixedArray if it is.
+    // rax: initial map
+    // rbx: JSObject
+    // rdi: start of next object
+    // Calculate total properties described map.
+    __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+    __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+    __ addq(rdx, rcx);
+    // Calculate unused properties past the end of the in-object properties.
+    __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+    __ subq(rdx, rcx);
+    // Done if no extra properties are to be allocated.
+    __ j(zero, &allocated);
+    __ Assert(positive, "Property allocation count failed.");
+
+    // Scale the number of elements by pointer size and add the header for
+    // FixedArrays to the start of the next object calculation from above.
+    // rbx: JSObject
+    // rdi: start of next object (will be start of FixedArray)
+    // rdx: number of elements in properties array
+    __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                          times_pointer_size,
+                          rdx,
+                          rdi,
+                          rax,
+                          no_reg,
+                          &undo_allocation,
+                          RESULT_CONTAINS_TOP);
+
+    // Initialize the FixedArray.
+    // rbx: JSObject
+    // rdi: FixedArray
+    // rdx: number of elements
+    // rax: start of next object
+    __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+    __ movq(Operand(rdi, JSObject::kMapOffset), rcx);  // setup the map
+    __ movl(Operand(rdi, FixedArray::kLengthOffset), rdx);  // and length
+
+    // Initialize the fields to undefined.
+    // rbx: JSObject
+    // rdi: FixedArray
+    // rax: start of next object
+    // rdx: number of elements
+    { Label loop, entry;
+      __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+      __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ movq(Operand(rcx, 0), rdx);
+      __ addq(rcx, Immediate(kPointerSize));
+      __ bind(&entry);
+      __ cmpq(rcx, rax);
+      __ j(below, &loop);
+    }
+
+    // Store the initialized FixedArray into the properties field of
+    // the JSObject
+    // rbx: JSObject
+    // rdi: FixedArray
+    __ or_(rdi, Immediate(kHeapObjectTag));  // add the heap tag
+    __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+
+
+    // Continue with JSObject being successfully allocated
+    // rbx: JSObject
+    __ jmp(&allocated);
+
+    // Undo the setting of the new top so that the heap is verifiable. For
+    // example, the map's unused properties potentially do not match the
+    // allocated objects unused properties.
+    // rbx: JSObject (previous new top)
+    __ bind(&undo_allocation);
+    __ UndoAllocationInNewSpace(rbx);
+  }
+
+  // Allocate the new receiver object using the runtime call.
+  // rdi: function (constructor)
+  __ bind(&rt_call);
+  // Must restore rdi (constructor) before calling runtime.
+  __ movq(rdi, Operand(rsp, 0));
+  __ push(rdi);
+  __ CallRuntime(Runtime::kNewObject, 1);
+  __ movq(rbx, rax);  // store result in rbx
+
+  // New object allocated.
+  // rbx: newly allocated object
+  __ bind(&allocated);
+  // Retrieve the function from the stack.
+  __ pop(rdi);
+
+  // Retrieve smi-tagged arguments count from the stack.
+  __ movq(rax, Operand(rsp, 0));
+  __ SmiToInteger32(rax, rax);
+
+  // Push the allocated receiver to the stack. We need two copies
+  // because we may have to return the original one and the calling
+  // conventions dictate that the called function pops the receiver.
+  __ push(rbx);
+  __ push(rbx);
+
+  // Setup pointer to last argument.
+  __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+
+  // Copy arguments and receiver to the expression stack.
+  Label loop, entry;
+  __ movq(rcx, rax);
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ push(Operand(rbx, rcx, times_pointer_size, 0));
+  __ bind(&entry);
+  __ decq(rcx);
+  __ j(greater_equal, &loop);
+
+  // Call the function.
+  ParameterCount actual(rax);
+  __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+
+  // Restore context from the frame.
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+  // If the result is an object (in the ECMA sense), we should get rid
+  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+  // on page 74.
+  Label use_receiver, exit;
+  // If the result is a smi, it is *not* an object in the ECMA sense.
+  __ JumpIfSmi(rax, &use_receiver);
+
+  // If the type of the result (stored in its map) is less than
+  // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+  __ j(above_equal, &exit);
+
+  // Throw away the result of the constructor invocation and use the
+  // on-stack receiver as the result.
+  __ bind(&use_receiver);
+  __ movq(rax, Operand(rsp, 0));
+
+  // Restore the arguments count and leave the construct frame.
+  __ bind(&exit);
+  __ movq(rbx, Operand(rsp, kPointerSize));  // get arguments count
+  __ LeaveConstructFrame();
+
+  // Remove caller arguments from the stack and return.
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  // TODO(smi): Find a way to abstract indexing by a smi.
+  __ pop(rcx);
+  // 1 * kPointerSize is offset of receiver.
+  __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
+  __ push(rcx);
+  __ IncrementCounter(&Counters::constructed_objects, 1);
+  __ ret(0);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Expects five C++ function parameters.
+  // - Address entry (ignored)
+  // - JSFunction* function (
+  // - Object* receiver
+  // - int argc
+  // - Object*** argv
+  // (see Handle::Invoke in execution.cc).
+
+  // Platform specific argument handling. After this, the stack contains
+  // an internal frame and the pushed function and receiver, and
+  // register rax and rbx holds the argument count and argument array,
+  // while rdi holds the function pointer and rsi the context.
+#ifdef _WIN64
+  // MSVC parameters in:
+  // rcx : entry (ignored)
+  // rdx : function
+  // r8 : receiver
+  // r9 : argc
+  // [rsp+0x20] : argv
+
+  // Clear the context before we push it when entering the JS frame.
+  __ xor_(rsi, rsi);
+  __ EnterInternalFrame();
+
+  // Load the function context into rsi.
+  __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+
+  // Push the function and the receiver onto the stack.
+  __ push(rdx);
+  __ push(r8);
+
+  // Load the number of arguments and setup pointer to the arguments.
+  __ movq(rax, r9);
+  // Load the previous frame pointer to access C argument on stack
+  __ movq(kScratchRegister, Operand(rbp, 0));
+  __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+  // Load the function pointer into rdi.
+  __ movq(rdi, rdx);
+#else  // !defined(_WIN64)
+  // GCC parameters in:
+  // rdi : entry (ignored)
+  // rsi : function
+  // rdx : receiver
+  // rcx : argc
+  // r8  : argv
+
+  __ movq(rdi, rsi);
+  // rdi : function
+
+  // Clear the context before we push it when entering the JS frame.
+  __ xor_(rsi, rsi);
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push the function and receiver and setup the context.
+  __ push(rdi);
+  __ push(rdx);
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Load the number of arguments and setup pointer to the arguments.
+  __ movq(rax, rcx);
+  __ movq(rbx, r8);
+#endif  // _WIN64
+
+  // Set up the roots register.
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ movq(r13, roots_address);
+
+  // Current stack contents:
+  // [rsp + 2 * kPointerSize ... ]: Internal frame
+  // [rsp + kPointerSize]         : function
+  // [rsp]                        : receiver
+  // Current register contents:
+  // rax : argc
+  // rbx : argv
+  // rsi : context
+  // rdi : function
+
+  // Copy arguments to the stack in a loop.
+  // Register rbx points to array of pointers to handle locations.
+  // Push the values of these handles.
+  Label loop, entry;
+  __ xor_(rcx, rcx);  // Set loop variable to 0.
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+  __ push(Operand(kScratchRegister, 0));  // dereference handle
+  __ addq(rcx, Immediate(1));
+  __ bind(&entry);
+  __ cmpq(rcx, rax);
+  __ j(not_equal, &loop);
+
+  // Invoke the code.
+  if (is_construct) {
+    // Expects rdi to hold function pointer.
+    __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+            RelocInfo::CODE_TARGET);
+  } else {
+    ParameterCount actual(rax);
+    // Function must be in rdi.
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+  }
+
+  // Exit the JS frame. Notice that this also removes the empty
+  // context and the function left on the stack by the code
+  // invocation.
+  __ LeaveInternalFrame();
+  // TODO(X64): Is argument correct? Is there a receiver to remove?
+  __ ret(1 * kPointerSize);  // remove receiver
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+} }  // namespace v8::internal
diff --git a/src/x64/codegen-x64-inl.h b/src/x64/codegen-x64-inl.h
new file mode 100644
index 0000000..6869fc9
--- /dev/null
+++ b/src/x64/codegen-x64-inl.h
@@ -0,0 +1,56 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_X64_CODEGEN_X64_INL_H_
+#define V8_X64_CODEGEN_X64_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
+
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(SIN, args);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(COS, args);
+}
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_CODEGEN_X64_INL_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
new file mode 100644
index 0000000..8e6dbef
--- /dev/null
+++ b/src/x64/codegen-x64.cc
@@ -0,0 +1,7684 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "ic-inl.h"
+#include "parser.h"
+#include "register-allocator-inl.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ push(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+      __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
+    }
+  }
+}
+
+void DeferredCode::RestoreRegisters() {
+  // Restore registers in reverse order due to the stack.
+  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ pop(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore) {
+      action &= ~kSyncedFlag;
+      __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
+    }
+  }
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenState implementation.
+
+CodeGenState::CodeGenState(CodeGenerator* owner)
+    : owner_(owner),
+      typeof_state_(NOT_INSIDE_TYPEOF),
+      destination_(NULL),
+      previous_(NULL) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::CodeGenState(CodeGenerator* owner,
+                           TypeofState typeof_state,
+                           ControlDestination* destination)
+    : owner_(owner),
+      typeof_state_(typeof_state),
+      destination_(destination),
+      previous_(owner->state()) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::~CodeGenState() {
+  ASSERT(owner_->state() == this);
+  owner_->set_state(previous_);
+}
+
+
+// -------------------------------------------------------------------------
+// Deferred code objects
+//
+// These subclasses of DeferredCode add pieces of code to the end of generated
+// code.  They are branched to from the generated code, and
+// keep some slower code out of the main body of the generated code.
+// Many of them call a code stub or a runtime function.
+
+class DeferredInlineSmiAdd: public DeferredCode {
+ public:
+  DeferredInlineSmiAdd(Register dst,
+                       Smi* value,
+                       OverwriteMode overwrite_mode)
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiAdd");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+// The result of value + src is in dst.  It either overflowed or was not
+// smi tagged.  Undo the speculative addition and call the appropriate
+// specialized stub for add.  The result is left in dst.
+class DeferredInlineSmiAddReversed: public DeferredCode {
+ public:
+  DeferredInlineSmiAddReversed(Register dst,
+                               Smi* value,
+                               OverwriteMode overwrite_mode)
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiAddReversed");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+class DeferredInlineSmiSub: public DeferredCode {
+ public:
+  DeferredInlineSmiSub(Register dst,
+                       Smi* value,
+                       OverwriteMode overwrite_mode)
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiSub");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+// Call the appropriate binary operation stub to compute src op value
+// and leave the result in dst.
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+  DeferredInlineSmiOperation(Token::Value op,
+                             Register dst,
+                             Register src,
+                             Smi* value,
+                             OverwriteMode overwrite_mode)
+      : op_(op),
+        dst_(dst),
+        src_(src),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Token::Value op_;
+  Register dst_;
+  Register src_;
+  Smi* value_;
+  OverwriteMode overwrite_mode_;
+};
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+  // Code pattern for loading a floating point value. Input value must
+  // be either a smi or a heap number object (fp value). Requirements:
+  // operand on TOS+1. Returns operand as floating point number on FPU
+  // stack.
+  static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
+
+  // Code pattern for loading a floating point value. Input value must
+  // be either a smi or a heap number object (fp value). Requirements:
+  // operand in src register. Returns operand as floating point number
+  // in XMM register
+  static void LoadFloatOperand(MacroAssembler* masm,
+                               Register src,
+                               XMMRegister dst);
+
+  // Code pattern for loading floating point values. Input values must
+  // be either smi or heap number objects (fp values). Requirements:
+  // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
+  // floating point numbers in XMM registers.
+  static void LoadFloatOperands(MacroAssembler* masm,
+                                XMMRegister dst1,
+                                XMMRegister dst2);
+
+  // Code pattern for loading floating point values onto the fp stack.
+  // Input values must be either smi or heap number objects (fp values).
+  // Requirements:
+  // Register version: operands in registers lhs and rhs.
+  // Stack version: operands on TOS+1 and TOS+2.
+  // Returns operands as floating point numbers on fp stack.
+  static void LoadFloatOperands(MacroAssembler* masm);
+  static void LoadFloatOperands(MacroAssembler* masm,
+                                Register lhs,
+                                Register rhs);
+
+  // Code pattern for loading a floating point value and converting it
+  // to a 32 bit integer. Input value must be either a smi or a heap number
+  // object.
+  // Returns operands as 32-bit sign extended integers in a general purpose
+  // registers.
+  static void LoadInt32Operand(MacroAssembler* masm,
+                               const Operand& src,
+                               Register dst);
+
+  // Test if operands are smi or number objects (fp). Requirements:
+  // operand_1 in rax, operand_2 in rdx; falls through on float or smi
+  // operands, jumps to the non_float label otherwise.
+  static void CheckFloatOperands(MacroAssembler* masm,
+                                 Label* non_float);
+
+  // Allocate a heap number in new space with undefined value.
+  // Returns tagged pointer in result, or jumps to need_gc if new space is full.
+  static void AllocateHeapNumber(MacroAssembler* masm,
+                                 Label* need_gc,
+                                 Register scratch,
+                                 Register result);
+};
+
+
+// -----------------------------------------------------------------------------
+// CodeGenerator implementation.
+
+CodeGenerator::CodeGenerator(int buffer_size,
+                             Handle<Script> script,
+                             bool is_eval)
+    : is_eval_(is_eval),
+      script_(script),
+      deferred_(8),
+      masm_(new MacroAssembler(NULL, buffer_size)),
+      scope_(NULL),
+      frame_(NULL),
+      allocator_(NULL),
+      state_(NULL),
+      loop_nesting_(0),
+      function_return_is_shadowed_(false),
+      in_spilled_code_(false) {
+}
+
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.  The inevitable call
+  // will sync frame elements to memory anyway, so we do it eagerly to
+  // allow us to push the arguments directly into place.
+  frame_->SyncRange(0, frame_->element_count() - 1);
+
+  __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
+  frame_->EmitPush(kScratchRegister);
+  frame_->EmitPush(rsi);  // The context is the second argument.
+  frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
+  Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
+}
+
+
+void CodeGenerator::GenCode(FunctionLiteral* function) {
+  // Record the position for debugging purposes.
+  CodeForFunctionPosition(function);
+  ZoneList<Statement*>* body = function->body();
+
+  // Initialize state.
+  ASSERT(scope_ == NULL);
+  scope_ = function->scope();
+  ASSERT(allocator_ == NULL);
+  RegisterAllocator register_allocator(this);
+  allocator_ = &register_allocator;
+  ASSERT(frame_ == NULL);
+  frame_ = new VirtualFrame();
+  set_in_spilled_code(false);
+
+  // Adjust for function-level loop nesting.
+  loop_nesting_ += function->loop_nesting();
+
+  JumpTarget::set_compiling_deferred_code(false);
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      function->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    frame_->SpillAll();
+    __ int3();
+  }
+#endif
+
+  // New scope to get automatic timing calculation.
+  {  // NOLINT
+    HistogramTimerScope codegen_timer(&Counters::code_generation);
+    CodeGenState state(this);
+
+    // Entry:
+    // Stack: receiver, arguments, return address.
+    // rbp: caller's frame pointer
+    // rsp: stack pointer
+    // rdi: called JS function
+    // rsi: callee's context
+    allocator_->Initialize();
+    frame_->Enter();
+
+    // Allocate space for locals and initialize them.
+    frame_->AllocateStackSlots();
+    // Initialize the function return target after the locals are set
+    // up, because it needs the expected frame height from the frame.
+    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+    function_return_is_shadowed_ = false;
+
+    // Allocate the local context if needed.
+    if (scope_->num_heap_slots() > 0) {
+      Comment cmnt(masm_, "[ allocate local context");
+      // Allocate local context.
+      // Get outer context and create a new context based on it.
+      frame_->PushFunction();
+      Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
+
+      // Update context local.
+      frame_->SaveContextRegister();
+
+      // Verify that the runtime call result and rsi agree.
+      if (FLAG_debug_code) {
+        __ cmpq(context.reg(), rsi);
+        __ Assert(equal, "Runtime::NewContext should end up in rsi");
+      }
+    }
+
+    // TODO(1241774): Improve this code:
+    // 1) only needed if we have a context
+    // 2) no need to recompute context ptr every single time
+    // 3) don't copy parameter operand code from SlotOperand!
+    {
+      Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+      // Note that iteration order is relevant here! If we have the same
+      // parameter twice (e.g., function (x, y, x)), and that parameter
+      // needs to be copied into the context, it must be the last argument
+      // passed to the parameter that needs to be copied. This is a rare
+      // case so we don't check for it, instead we rely on the copying
+      // order: such a parameter is copied repeatedly into the same
+      // context location and thus the last value is what is seen inside
+      // the function.
+      for (int i = 0; i < scope_->num_parameters(); i++) {
+        Variable* par = scope_->parameter(i);
+        Slot* slot = par->slot();
+        if (slot != NULL && slot->type() == Slot::CONTEXT) {
+          // The use of SlotOperand below is safe in unspilled code
+          // because the slot is guaranteed to be a context slot.
+          //
+          // There are no parameters in the global scope.
+          ASSERT(!scope_->is_global_scope());
+          frame_->PushParameterAt(i);
+          Result value = frame_->Pop();
+          value.ToRegister();
+
+          // SlotOperand loads context.reg() with the context object
+          // stored to, used below in RecordWrite.
+          Result context = allocator_->Allocate();
+          ASSERT(context.is_valid());
+          __ movq(SlotOperand(slot, context.reg()), value.reg());
+          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+          Result scratch = allocator_->Allocate();
+          ASSERT(scratch.is_valid());
+          frame_->Spill(context.reg());
+          frame_->Spill(value.reg());
+          __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+        }
+      }
+    }
+
+    // Store the arguments object.  This must happen after context
+    // initialization because the arguments object may be stored in
+    // the context.
+    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+      StoreArgumentsObject(true);
+    }
+
+    // Generate code to 'execute' declarations and initialize functions
+    // (source elements). In case of an illegal redeclaration we need to
+    // handle that instead of processing the declarations.
+    if (scope_->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ illegal redeclarations");
+      scope_->VisitIllegalRedeclaration(this);
+    } else {
+      Comment cmnt(masm_, "[ declarations");
+      ProcessDeclarations(scope_->declarations());
+      // Bail out if a stack-overflow exception occurred when processing
+      // declarations.
+      if (HasStackOverflow()) return;
+    }
+
+    if (FLAG_trace) {
+      frame_->CallRuntime(Runtime::kTraceEnter, 0);
+      // Ignore the return value.
+    }
+    CheckStack();
+
+    // Compile the body of the function in a vanilla state. Don't
+    // bother compiling all the code if the scope has an illegal
+    // redeclaration.
+    if (!scope_->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+      bool is_builtin = Bootstrapper::IsActive();
+      bool should_trace =
+          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+      if (should_trace) {
+        frame_->CallRuntime(Runtime::kDebugTrace, 0);
+        // Ignore the return value.
+      }
+#endif
+      VisitStatements(body);
+
+      // Handle the return from the function.
+      if (has_valid_frame()) {
+        // If there is a valid frame, control flow can fall off the end of
+        // the body.  In that case there is an implicit return statement.
+        ASSERT(!function_return_is_shadowed_);
+        CodeForReturnPosition(function);
+        frame_->PrepareForReturn();
+        Result undefined(Factory::undefined_value());
+        if (function_return_.is_bound()) {
+          function_return_.Jump(&undefined);
+        } else {
+          function_return_.Bind(&undefined);
+          GenerateReturnSequence(&undefined);
+        }
+      } else if (function_return_.is_linked()) {
+        // If the return target has dangling jumps to it, then we have not
+        // yet generated the return sequence.  This can happen when (a)
+        // control does not flow off the end of the body so we did not
+        // compile an artificial return statement just above, and (b) there
+        // are return statements in the body but (c) they are all shadowed.
+        Result return_value;
+        function_return_.Bind(&return_value);
+        GenerateReturnSequence(&return_value);
+      }
+    }
+  }
+
+  // Adjust for function-level loop nesting.
+  loop_nesting_ -= function->loop_nesting();
+
+  // Code generation state must be reset.
+  ASSERT(state_ == NULL);
+  ASSERT(loop_nesting() == 0);
+  ASSERT(!function_return_is_shadowed_);
+  function_return_.Unuse();
+  DeleteFrame();
+
+  // Process any deferred code using the register allocator.
+  if (!HasStackOverflow()) {
+    HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
+    JumpTarget::set_compiling_deferred_code(true);
+    ProcessDeferred();
+    JumpTarget::set_compiling_deferred_code(false);
+  }
+
+  // There is no need to delete the register allocator, it is a
+  // stack-allocated local.
+  allocator_ = NULL;
+  scope_ = NULL;
+}
+
+void CodeGenerator::GenerateReturnSequence(Result* return_value) {
+  // The return value is a live (but not currently reference counted)
+  // reference to rax.  This is safe because the current frame does not
+  // contain a reference to rax (it is prepared for the return by spilling
+  // all registers).
+  if (FLAG_trace) {
+    frame_->Push(return_value);
+    *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
+  }
+  return_value->ToRegister(rax);
+
+  // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+  Label check_exit_codesize;
+  masm_->bind(&check_exit_codesize);
+#endif
+
+  // Leave the frame and return popping the arguments and the
+  // receiver.
+  frame_->Exit();
+  masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Add padding that will be overwritten by a debugger breakpoint.
+  // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
+  // with length 7 (3 + 1 + 3).
+  const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
+  for (int i = 0; i < kPadding; ++i) {
+    masm_->int3();
+  }
+  // Check that the size of the code used for returning matches what is
+  // expected by the debugger.
+  ASSERT_EQ(Debug::kX64JSReturnSequenceLength,
+            masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+  DeleteFrame();
+}
+
+
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() {
+  return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
+      && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
+      && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
+      && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
+      && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
+      && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
+      && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
+      && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
+      && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
+      && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
+      && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
+}
+#endif
+
+
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+  explicit DeferredReferenceGetKeyedValue(Register dst,
+                                          Register receiver,
+                                          Register key,
+                                          bool is_global)
+      : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
+    set_comment("[ DeferredReferenceGetKeyedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Label patch_site_;
+  Register dst_;
+  Register receiver_;
+  Register key_;
+  bool is_global_;
+};
+
+
+void DeferredReferenceGetKeyedValue::Generate() {
+  __ push(receiver_);  // First IC argument.
+  __ push(key_);       // Second IC argument.
+
+  // Calculate the delta from the IC call instruction to the map check
+  // movq instruction in the inlined version.  This delta is stored in
+  // a test(rax, delta) instruction after the call so that we can find
+  // it in the IC initialization code and patch the movq instruction.
+  // This means that we cannot allow test instructions after calls to
+  // KeyedLoadIC stubs in other places.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  RelocInfo::Mode mode = is_global_
+                         ? RelocInfo::CODE_TARGET_CONTEXT
+                         : RelocInfo::CODE_TARGET;
+  __ Call(ic, mode);
+  // The delta from the start of the map-compare instruction to the
+  // test instruction.  We use masm_-> directly here instead of the __
+  // macro because the macro sometimes uses macro expansion to turn
+  // into something that can't return a value.  This is encountered
+  // when doing generated code coverage tests.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  // TODO(X64): Consider whether it's worth switching the test to a
+  // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
+  // be generated normally.
+  masm_->testl(rax, Immediate(-delta_to_patch_site));
+  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+
+  if (!dst_.is(rax)) __ movq(dst_, rax);
+  __ pop(key_);
+  __ pop(receiver_);
+}
+
+
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+  DeferredReferenceSetKeyedValue(Register value,
+                                 Register key,
+                                 Register receiver)
+      : value_(value), key_(key), receiver_(receiver) {
+    set_comment("[ DeferredReferenceSetKeyedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Register value_;
+  Register key_;
+  Register receiver_;
+  Label patch_site_;
+};
+
+
+void DeferredReferenceSetKeyedValue::Generate() {
+  __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+  // Push receiver and key arguments on the stack.
+  __ push(receiver_);
+  __ push(key_);
+  // Move value argument to eax as expected by the IC stub.
+  if (!value_.is(rax)) __ movq(rax, value_);
+  // Call the IC stub.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  __ Call(ic, RelocInfo::CODE_TARGET);
+  // The delta from the start of the map-compare instructions (initial movq)
+  // to the test instruction.  We use masm_-> directly here instead of the
+  // __ macro because the macro sometimes uses macro expansion to turn
+  // into something that can't return a value.  This is encountered
+  // when doing generated code coverage tests.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->testl(rax, Immediate(-delta_to_patch_site));
+  // Restore value (returned from store IC), key and receiver
+  // registers.
+  if (!value_.is(rax)) __ movq(value_, rax);
+  __ pop(key_);
+  __ pop(receiver_);
+}
+
+
+class CallFunctionStub: public CodeStub {
+ public:
+  CallFunctionStub(int argc, InLoopFlag in_loop)
+      : argc_(argc), in_loop_(in_loop) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  int argc_;
+  InLoopFlag in_loop_;
+
+#ifdef DEBUG
+  void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+  Major MajorKey() { return CallFunction; }
+  int MinorKey() { return argc_; }
+  InLoopFlag InLoop() { return in_loop_; }
+};
+
+
+void CodeGenerator::CallApplyLazy(Property* apply,
+                                  Expression* receiver,
+                                  VariableProxy* arguments,
+                                  int position) {
+  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
+  ASSERT(arguments->IsArguments());
+
+  JumpTarget slow, done;
+
+  // Load the apply function onto the stack. This will usually
+  // give us a megamorphic load site. Not super, but it works.
+  Reference ref(this, apply);
+  ref.GetValue(NOT_INSIDE_TYPEOF);
+  ASSERT(ref.type() == Reference::NAMED);
+
+  // Load the receiver and the existing arguments object onto the
+  // expression stack. Avoid allocating the arguments object here.
+  Load(receiver);
+  LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+
+  // Emit the source position information after having loaded the
+  // receiver and the arguments.
+  CodeForSourcePosition(position);
+
+  // Check if the arguments object has been lazily allocated
+  // already. If so, just use that instead of copying the arguments
+  // from the stack. This also deals with cases where a local variable
+  // named 'arguments' has been introduced.
+  frame_->Dup();
+  Result probe = frame_->Pop();
+  bool try_lazy = true;
+  if (probe.is_constant()) {
+    try_lazy = probe.handle()->IsTheHole();
+  } else {
+    __ Cmp(probe.reg(), Factory::the_hole_value());
+    probe.Unuse();
+    slow.Branch(not_equal);
+  }
+
+  if (try_lazy) {
+    JumpTarget build_args;
+
+    // Get rid of the arguments object probe.
+    frame_->Drop();
+
+    // Before messing with the execution stack, we sync all
+    // elements. This is bound to happen anyway because we're
+    // about to call a function.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+
+    // Check that the receiver really is a JavaScript object.
+    {
+      frame_->PushElementAt(0);
+      Result receiver = frame_->Pop();
+      receiver.ToRegister();
+      Condition is_smi = masm_->CheckSmi(receiver.reg());
+      build_args.Branch(is_smi);
+      // We allow all JSObjects including JSFunctions.  As long as
+      // JS_FUNCTION_TYPE is the last instance type and it is right
+      // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
+      // bound.
+      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+      ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+      __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
+      build_args.Branch(below);
+    }
+
+    // Verify that we're invoking Function.prototype.apply.
+    {
+      frame_->PushElementAt(1);
+      Result apply = frame_->Pop();
+      apply.ToRegister();
+      Condition is_smi = masm_->CheckSmi(apply.reg());
+      build_args.Branch(is_smi);
+      Result tmp = allocator_->Allocate();
+      __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
+      build_args.Branch(not_equal);
+      __ movq(tmp.reg(),
+              FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+      Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+      __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+             apply_code);
+      build_args.Branch(not_equal);
+    }
+
+    // Get the function receiver from the stack. Check that it
+    // really is a function.
+    __ movq(rdi, Operand(rsp, 2 * kPointerSize));
+    Condition is_smi = masm_->CheckSmi(rdi);
+    build_args.Branch(is_smi);
+    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+    build_args.Branch(not_equal);
+
+    // Copy the arguments to this function possibly from the
+    // adaptor frame below it.
+    Label invoke, adapted;
+    __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+    __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+    __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+    __ j(equal, &adapted);
+
+    // No arguments adaptor frame. Copy fixed number of arguments.
+    __ movq(rax, Immediate(scope_->num_parameters()));
+    for (int i = 0; i < scope_->num_parameters(); i++) {
+      __ push(frame_->ParameterAt(i));
+    }
+    __ jmp(&invoke);
+
+    // Arguments adaptor frame present. Copy arguments from there, but
+    // avoid copying too many arguments to avoid stack overflows.
+    __ bind(&adapted);
+    static const uint32_t kArgumentsLimit = 1 * KB;
+    __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ SmiToInteger32(rax, rax);
+    __ movq(rcx, rax);
+    __ cmpq(rax, Immediate(kArgumentsLimit));
+    build_args.Branch(above);
+
+    // Loop through the arguments pushing them onto the execution
+    // stack. We don't inform the virtual frame of the push, so we don't
+    // have to worry about getting rid of the elements from the virtual
+    // frame.
+    Label loop;
+    __ bind(&loop);
+    __ testl(rcx, rcx);
+    __ j(zero, &invoke);
+    __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
+    __ decl(rcx);
+    __ jmp(&loop);
+
+    // Invoke the function. The virtual frame knows about the receiver
+    // so make sure to forget that explicitly.
+    __ bind(&invoke);
+    ParameterCount actual(rax);
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+    frame_->Forget(1);
+    Result result = allocator()->Allocate(rax);
+    frame_->SetElementAt(0, &result);
+    done.Jump();
+
+    // Slow-case: Allocate the arguments object since we know it isn't
+    // there, and fall-through to the slow-case where we call
+    // Function.prototype.apply.
+    build_args.Bind();
+    Result arguments_object = StoreArgumentsObject(false);
+    frame_->Push(&arguments_object);
+    slow.Bind();
+  }
+
+  // Flip the apply function and the function to call on the stack, so
+  // the function looks like the receiver of the apply call. This way,
+  // the generic Function.prototype.apply implementation can deal with
+  // the call like it usually does.
+  Result a2 = frame_->Pop();
+  Result a1 = frame_->Pop();
+  Result ap = frame_->Pop();
+  Result fn = frame_->Pop();
+  frame_->Push(&ap);
+  frame_->Push(&fn);
+  frame_->Push(&a1);
+  frame_->Push(&a2);
+  CallFunctionStub call_function(2, NOT_IN_LOOP);
+  Result res = frame_->CallStub(&call_function, 3);
+  frame_->Push(&res);
+
+  // All done. Restore context register after call.
+  if (try_lazy) done.Bind();
+  frame_->RestoreContextRegister();
+}
+
+
+class DeferredStackCheck: public DeferredCode {
+ public:
+  DeferredStackCheck() {
+    set_comment("[ DeferredStackCheck");
+  }
+
+  virtual void Generate();
+};
+
+
+void DeferredStackCheck::Generate() {
+  StackCheckStub stub;
+  __ CallStub(&stub);
+}
+
+
+void CodeGenerator::CheckStack() {
+  if (FLAG_check_stack) {
+    DeferredStackCheck* deferred = new DeferredStackCheck;
+    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+    deferred->Branch(below);
+    deferred->BindExit();
+  }
+}
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+  // TODO(X64): No architecture specific code. Move to shared location.
+  ASSERT(in_spilled_code());
+  set_in_spilled_code(false);
+  Visit(statement);
+  if (frame_ != NULL) {
+    frame_->SpillAll();
+  }
+  set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+  ASSERT(in_spilled_code());
+  set_in_spilled_code(false);
+  VisitStatements(statements);
+  if (frame_ != NULL) {
+    frame_->SpillAll();
+  }
+  set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+  ASSERT(!in_spilled_code());
+  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
+    Visit(statements->at(i));
+  }
+}
+
+
+void CodeGenerator::VisitBlock(Block* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ Block");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  VisitStatements(node->statements());
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::VisitDeclaration(Declaration* node) {
+  Comment cmnt(masm_, "[ Declaration");
+  Variable* var = node->proxy()->var();
+  ASSERT(var != NULL);  // must have been resolved
+  Slot* slot = var->slot();
+
+  // If it was not possible to allocate the variable at compile time,
+  // we need to "declare" it at runtime to make sure it actually
+  // exists in the local context.
+  if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    // Variables with a "LOOKUP" slot were introduced as non-locals
+    // during variable resolution and must have mode DYNAMIC.
+    ASSERT(var->is_dynamic());
+    // For now, just do a runtime call.  Sync the virtual frame eagerly
+    // so we can simply push the arguments into place.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+    frame_->EmitPush(rsi);
+    __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
+    frame_->EmitPush(kScratchRegister);
+    // Declaration nodes are always introduced in one of two modes.
+    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
+    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
+    frame_->EmitPush(Immediate(Smi::FromInt(attr)));
+    // Push initial value, if any.
+    // Note: For variables we must not push an initial value (such as
+    // 'undefined') because we may have a (legal) redeclaration and we
+    // must not destroy the current value.
+    if (node->mode() == Variable::CONST) {
+      frame_->EmitPush(Heap::kTheHoleValueRootIndex);
+    } else if (node->fun() != NULL) {
+      Load(node->fun());
+    } else {
+      frame_->EmitPush(Immediate(Smi::FromInt(0)));  // no initial value!
+    }
+    Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
+    // Ignore the return value (declarations are statements).
+    return;
+  }
+
+  ASSERT(!var->is_global());
+
+  // If we have a function or a constant, we need to initialize the variable.
+  Expression* val = NULL;
+  if (node->mode() == Variable::CONST) {
+    val = new Literal(Factory::the_hole_value());
+  } else {
+    val = node->fun();  // NULL if we don't have a function
+  }
+
+  if (val != NULL) {
+    {
+      // Set the initial value.
+      Reference target(this, node->proxy());
+      Load(val);
+      target.SetValue(NOT_CONST_INIT);
+      // The reference is removed from the stack (preserving TOS) when
+      // it goes out of scope.
+    }
+    // Get rid of the assigned value (declarations are statements).
+    frame_->Drop();
+  }
+}
+
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  CodeForStatementPosition(node);
+  Expression* expression = node->expression();
+  expression->MarkAsStatement();
+  Load(expression);
+  // Remove the lingering expression result from the top of stack.
+  frame_->Drop();
+}
+
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "// EmptyStatement");
+  CodeForStatementPosition(node);
+  // nothing to do
+}
+
+
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ IfStatement");
+  // Generate different code depending on which parts of the if statement
+  // are present or not.
+  bool has_then_stm = node->HasThenStatement();
+  bool has_else_stm = node->HasElseStatement();
+
+  CodeForStatementPosition(node);
+  JumpTarget exit;
+  if (has_then_stm && has_else_stm) {
+    JumpTarget then;
+    JumpTarget else_;
+    ControlDestination dest(&then, &else_, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.false_was_fall_through()) {
+      // The else target was bound, so we compile the else part first.
+      Visit(node->else_statement());
+
+      // We may have dangling jumps to the then part.
+      if (then.is_linked()) {
+        if (has_valid_frame()) exit.Jump();
+        then.Bind();
+        Visit(node->then_statement());
+      }
+    } else {
+      // The then target was bound, so we compile the then part first.
+      Visit(node->then_statement());
+
+      if (else_.is_linked()) {
+        if (has_valid_frame()) exit.Jump();
+        else_.Bind();
+        Visit(node->else_statement());
+      }
+    }
+
+  } else if (has_then_stm) {
+    ASSERT(!has_else_stm);
+    JumpTarget then;
+    ControlDestination dest(&then, &exit, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.false_was_fall_through()) {
+      // The exit label was bound.  We may have dangling jumps to the
+      // then part.
+      if (then.is_linked()) {
+        exit.Unuse();
+        exit.Jump();
+        then.Bind();
+        Visit(node->then_statement());
+      }
+    } else {
+      // The then label was bound.
+      Visit(node->then_statement());
+    }
+
+  } else if (has_else_stm) {
+    ASSERT(!has_then_stm);
+    JumpTarget else_;
+    ControlDestination dest(&exit, &else_, false);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.true_was_fall_through()) {
+      // The exit label was bound.  We may have dangling jumps to the
+      // else part.
+      if (else_.is_linked()) {
+        exit.Unuse();
+        exit.Jump();
+        else_.Bind();
+        Visit(node->else_statement());
+      }
+    } else {
+      // The else label was bound.
+      Visit(node->else_statement());
+    }
+
+  } else {
+    ASSERT(!has_then_stm && !has_else_stm);
+    // We only care about the condition's side effects (not its value
+    // or control flow effect).  LoadCondition is called without
+    // forcing control flow.
+    ControlDestination dest(&exit, &exit, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+    if (!dest.is_used()) {
+      // We got a value on the frame rather than (or in addition to)
+      // control flow.
+      frame_->Drop();
+    }
+  }
+
+  if (exit.is_linked()) {
+    exit.Bind();
+  }
+}
+
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ContinueStatement");
+  CodeForStatementPosition(node);
+  node->target()->continue_target()->Jump();
+}
+
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ BreakStatement");
+  CodeForStatementPosition(node);
+  node->target()->break_target()->Jump();
+}
+
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ReturnStatement");
+
+  CodeForStatementPosition(node);
+  Load(node->expression());
+  Result return_value = frame_->Pop();
+  if (function_return_is_shadowed_) {
+    function_return_.Jump(&return_value);
+  } else {
+    frame_->PrepareForReturn();
+    if (function_return_.is_bound()) {
+      // If the function return label is already bound we reuse the
+      // code by jumping to the return site.
+      function_return_.Jump(&return_value);
+    } else {
+      function_return_.Bind(&return_value);
+      GenerateReturnSequence(&return_value);
+    }
+  }
+}
+
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ WithEnterStatement");
+  CodeForStatementPosition(node);
+  Load(node->expression());
+  Result context;
+  if (node->is_catch_block()) {
+    context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
+  } else {
+    context = frame_->CallRuntime(Runtime::kPushContext, 1);
+  }
+
+  // Update context local.
+  frame_->SaveContextRegister();
+
+  // Verify that the runtime call result and rsi agree.
+  if (FLAG_debug_code) {
+    __ cmpq(context.reg(), rsi);
+    __ Assert(equal, "Runtime::NewContext should end up in rsi");
+  }
+}
+
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ WithExitStatement");
+  CodeForStatementPosition(node);
+  // Pop context.
+  __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
+  // Update context local.
+  frame_->SaveContextRegister();
+}
+
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+  // TODO(X64): This code is completely generic and should be moved somewhere
+  // where it can be shared between architectures.
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ SwitchStatement");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  // Compile the switch value.
+  Load(node->tag());
+
+  ZoneList<CaseClause*>* cases = node->cases();
+  int length = cases->length();
+  CaseClause* default_clause = NULL;
+
+  JumpTarget next_test;
+  // Compile the case label expressions and comparisons.  Exit early
+  // if a comparison is unconditionally true.  The target next_test is
+  // bound before the loop in order to indicate control flow to the
+  // first comparison.
+  next_test.Bind();
+  for (int i = 0; i < length && !next_test.is_unused(); i++) {
+    CaseClause* clause = cases->at(i);
+    // The default is not a test, but remember it for later.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    // We recycle the same target next_test for each test.  Bind it if
+    // the previous test has not done so and then unuse it for the
+    // loop.
+    if (next_test.is_linked()) {
+      next_test.Bind();
+    }
+    next_test.Unuse();
+
+    // Duplicate the switch value.
+    frame_->Dup();
+
+    // Compile the label expression.
+    Load(clause->label());
+
+    // Compare and branch to the body if true or the next test if
+    // false.  Prefer the next test as a fall through.
+    ControlDestination dest(clause->body_target(), &next_test, false);
+    Comparison(equal, true, &dest);
+
+    // If the comparison fell through to the true target, jump to the
+    // actual body.
+    if (dest.true_was_fall_through()) {
+      clause->body_target()->Unuse();
+      clause->body_target()->Jump();
+    }
+  }
+
+  // If there was control flow to a next test from the last one
+  // compiled, compile a jump to the default or break target.
+  if (!next_test.is_unused()) {
+    if (next_test.is_linked()) {
+      next_test.Bind();
+    }
+    // Drop the switch value.
+    frame_->Drop();
+    if (default_clause != NULL) {
+      default_clause->body_target()->Jump();
+    } else {
+      node->break_target()->Jump();
+    }
+  }
+
+  // The last instruction emitted was a jump, either to the default
+  // clause or the break target, or else to a case body from the loop
+  // that compiles the tests.
+  ASSERT(!has_valid_frame());
+  // Compile case bodies as needed.
+  for (int i = 0; i < length; i++) {
+    CaseClause* clause = cases->at(i);
+
+    // There are two ways to reach the body: from the corresponding
+    // test or as the fall through of the previous body.
+    if (clause->body_target()->is_linked() || has_valid_frame()) {
+      if (clause->body_target()->is_linked()) {
+        if (has_valid_frame()) {
+          // If we have both a jump to the test and a fall through, put
+          // a jump on the fall through path to avoid the dropping of
+          // the switch value on the test path.  The exception is the
+          // default which has already had the switch value dropped.
+          if (clause->is_default()) {
+            clause->body_target()->Bind();
+          } else {
+            JumpTarget body;
+            body.Jump();
+            clause->body_target()->Bind();
+            frame_->Drop();
+            body.Bind();
+          }
+        } else {
+          // No fall through to worry about.
+          clause->body_target()->Bind();
+          if (!clause->is_default()) {
+            frame_->Drop();
+          }
+        }
+      } else {
+        // Otherwise, we have only fall through.
+        ASSERT(has_valid_frame());
+      }
+
+      // We are now prepared to compile the body.
+      Comment cmnt(masm_, "[ Case body");
+      VisitStatements(clause->statements());
+    }
+    clause->body_target()->Unuse();
+  }
+
+  // We may not have a valid frame here so bind the break target only
+  // if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ LoopStatement");
+  CodeForStatementPosition(node);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
+  // known result for the test expression, with no side effects.
+  enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
+  if (node->cond() == NULL) {
+    ASSERT(node->type() == LoopStatement::FOR_LOOP);
+    info = ALWAYS_TRUE;
+  } else {
+    Literal* lit = node->cond()->AsLiteral();
+    if (lit != NULL) {
+      if (lit->IsTrue()) {
+        info = ALWAYS_TRUE;
+      } else if (lit->IsFalse()) {
+        info = ALWAYS_FALSE;
+      }
+    }
+  }
+
+  switch (node->type()) {
+    case LoopStatement::DO_LOOP: {
+      JumpTarget body(JumpTarget::BIDIRECTIONAL);
+      IncrementLoopNesting();
+
+      // Label the top of the loop for the backward jump if necessary.
+      if (info == ALWAYS_TRUE) {
+        // Use the continue target.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else if (info == ALWAYS_FALSE) {
+        // No need to label it.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else {
+        // Continue is the test, so use the backward body target.
+        ASSERT(info == DONT_KNOW);
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        body.Bind();
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
+
+      // Compile the test.
+      if (info == ALWAYS_TRUE) {
+        // If control flow can fall off the end of the body, jump back
+        // to the top and bind the break target at the exit.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
+
+      } else if (info == ALWAYS_FALSE) {
+        // We may have had continues or breaks in the body.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
+
+      } else {
+        ASSERT(info == DONT_KNOW);
+        // We have to compile the test expression if it can be reached by
+        // control flow falling out of the body or via continue.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+        if (has_valid_frame()) {
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
+      }
+      break;
+    }
+
+    case LoopStatement::WHILE_LOOP: {
+      // Do not duplicate conditions that may have function literal
+      // subexpressions.  This can cause us to compile the function
+      // literal twice.
+      bool test_at_bottom = !node->may_have_function_literal();
+
+      IncrementLoopNesting();
+
+      // If the condition is always false and has no side effects, we
+      // do not need to compile anything.
+      if (info == ALWAYS_FALSE) break;
+
+      JumpTarget body;
+      if (test_at_bottom) {
+        body.set_direction(JumpTarget::BIDIRECTIONAL);
+      }
+
+      // Based on the condition analysis, compile the test as necessary.
+      if (info == ALWAYS_TRUE) {
+        // We will not compile the test expression.  Label the top of
+        // the loop with the continue target.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          // Continue is the test at the bottom, no need to label the
+          // test at the top.  The body is a backward target.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        } else {
+          // Label the test at the top as the continue target.  The
+          // body is a forward-only target.
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        }
+        // Compile the test with the body as the true target and
+        // preferred fall-through and with the break target as the
+        // false target.
+        ControlDestination dest(&body, node->break_target(), true);
+        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+        if (dest.false_was_fall_through()) {
+          // If we got the break target as fall-through, the test may
+          // have been unconditionally false (if there are no jumps to
+          // the body).
+          if (!body.is_linked()) break;
+
+          // Otherwise, jump around the body on the fall through and
+          // then bind the body target.
+          node->break_target()->Unuse();
+          node->break_target()->Jump();
+          body.Bind();
+        }
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
+
+      // Based on the condition analysis, compile the backward jump as
+      // necessary.
+      if (info == ALWAYS_TRUE) {
+        // The loop body has been labeled with the continue target.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          // If we have chosen to recompile the test at the bottom,
+          // then it is the continue target.
+          if (node->continue_target()->is_linked()) {
+            node->continue_target()->Bind();
+          }
+          if (has_valid_frame()) {
+            // The break target is the fall-through (body is a backward
+            // jump from here and thus an invalid fall-through).
+            ControlDestination dest(&body, node->break_target(), false);
+            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+          }
+        } else {
+          // If we have chosen not to recompile the test at the
+          // bottom, jump back to the one at the top.
+          if (has_valid_frame()) {
+            node->continue_target()->Jump();
+          }
+        }
+      }
+
+      // The break target may be already bound (by the condition), or
+      // there may not be a valid frame.  Bind it only if needed.
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
+      break;
+    }
+
+    case LoopStatement::FOR_LOOP: {
+      // Do not duplicate conditions that may have function literal
+      // subexpressions.  This can cause us to compile the function
+      // literal twice.
+      bool test_at_bottom = !node->may_have_function_literal();
+
+      // Compile the init expression if present.
+      if (node->init() != NULL) {
+        Visit(node->init());
+      }
+
+      IncrementLoopNesting();
+
+      // If the condition is always false and has no side effects, we
+      // do not need to compile anything else.
+      if (info == ALWAYS_FALSE) break;
+
+      // Target for backward edge if no test at the bottom, otherwise
+      // unused.
+      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+      // Target for backward edge if there is a test at the bottom,
+      // otherwise used as target for test at the top.
+      JumpTarget body;
+      if (test_at_bottom) {
+        body.set_direction(JumpTarget::BIDIRECTIONAL);
+      }
+
+      // Based on the condition analysis, compile the test as necessary.
+      if (info == ALWAYS_TRUE) {
+        // We will not compile the test expression.  Label the top of
+        // the loop.
+        if (node->next() == NULL) {
+          // Use the continue target if there is no update expression.
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        } else {
+          // Otherwise use the backward loop target.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+          loop.Bind();
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);
+        if (test_at_bottom) {
+          // Continue is either the update expression or the test at
+          // the bottom, no need to label the test at the top.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        } else if (node->next() == NULL) {
+          // We are not recompiling the test at the bottom and there
+          // is no update expression.
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        } else {
+          // We are not recompiling the test at the bottom and there
+          // is an update expression.
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+          loop.Bind();
+        }
+
+        // Compile the test with the body as the true target and
+        // preferred fall-through and with the break target as the
+        // false target.
+        ControlDestination dest(&body, node->break_target(), true);
+        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+        if (dest.false_was_fall_through()) {
+          // If we got the break target as fall-through, the test may
+          // have been unconditionally false (if there are no jumps to
+          // the body).
+          if (!body.is_linked()) break;
+
+          // Otherwise, jump around the body on the fall through and
+          // then bind the body target.
+          node->break_target()->Unuse();
+          node->break_target()->Jump();
+          body.Bind();
+        }
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
+
+      // If there is an update expression, compile it if necessary.
+      if (node->next() != NULL) {
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+
+        // Control can reach the update by falling out of the body or
+        // by a continue.
+        if (has_valid_frame()) {
+          // Record the source position of the statement as this code
+          // which is after the code for the body actually belongs to
+          // the loop statement and not the body.
+          CodeForStatementPosition(node);
+          Visit(node->next());
+        }
+      }
+
+      // Based on the condition analysis, compile the backward jump as
+      // necessary.
+      if (info == ALWAYS_TRUE) {
+        if (has_valid_frame()) {
+          if (node->next() == NULL) {
+            node->continue_target()->Jump();
+          } else {
+            loop.Jump();
+          }
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          if (node->continue_target()->is_linked()) {
+            // We can have dangling jumps to the continue target if
+            // there was no update expression.
+            node->continue_target()->Bind();
+          }
+          // Control can reach the test at the bottom by falling out
+          // of the body, by a continue in the body, or from the
+          // update expression.
+          if (has_valid_frame()) {
+            // The break target is the fall-through (body is a
+            // backward jump from here).
+            ControlDestination dest(&body, node->break_target(), false);
+            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+          }
+        } else {
+          // Otherwise, jump back to the test at the top.
+          if (has_valid_frame()) {
+            if (node->next() == NULL) {
+              node->continue_target()->Jump();
+            } else {
+              loop.Jump();
+            }
+          }
+        }
+      }
+
+      // The break target may be already bound (by the condition), or
+      // there may not be a valid frame.  Bind it only if needed.
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
+      break;
+    }
+  }
+
+  DecrementLoopNesting();
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ForInStatement");
+  CodeForStatementPosition(node);
+
+  JumpTarget primitive;
+  JumpTarget jsobject;
+  JumpTarget fixed_array;
+  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+  JumpTarget end_del_check;
+  JumpTarget exit;
+
+  // Get the object to enumerate over (converted to JSObject).
+  LoadAndSpill(node->enumerable());
+
+  // Both SpiderMonkey and kjs ignore null and undefined in contrast
+  // to the specification.  12.6.4 mandates a call to ToObject.
+  frame_->EmitPop(rax);
+
+  // rax: value to be iterated over
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  exit.Branch(equal);
+  __ CompareRoot(rax, Heap::kNullValueRootIndex);
+  exit.Branch(equal);
+
+  // Stack layout in body:
+  // [iteration counter (smi)] <- slot 0
+  // [length of array]         <- slot 1
+  // [FixedArray]              <- slot 2
+  // [Map or 0]                <- slot 3
+  // [Object]                  <- slot 4
+
+  // Check if enumerable is already a JSObject
+  // rax: value to be iterated over
+  Condition is_smi = masm_->CheckSmi(rax);
+  primitive.Branch(is_smi);
+  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+  jsobject.Branch(above_equal);
+
+  primitive.Bind();
+  frame_->EmitPush(rax);
+  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
+  // function call returns the value in rax, which is where we want it below
+
+  jsobject.Bind();
+  // Get the set of properties (as a FixedArray or Map).
+  // rax: value to be iterated over
+  frame_->EmitPush(rax);  // push the object being iterated over (slot 4)
+
+  frame_->EmitPush(rax);  // push the Object (slot 4) for the runtime call
+  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a Map, we can do a fast modification check.
+  // Otherwise, we got a FixedArray, and we have to do a slow check.
+  // rax: map or fixed array (result from call to
+  // Runtime::kGetPropertyNamesFast)
+  __ movq(rdx, rax);
+  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
+  fixed_array.Branch(not_equal);
+
+  // Get enum cache
+  // rax: map (result from call to Runtime::kGetPropertyNamesFast)
+  __ movq(rcx, rax);
+  __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
+  // Get the bridge array held in the enumeration index field.
+  __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
+  // Get the cache from the bridge array.
+  __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  frame_->EmitPush(rax);  // <- slot 3
+  frame_->EmitPush(rdx);  // <- slot 2
+  __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
+  __ Integer32ToSmi(rax, rax);
+  frame_->EmitPush(rax);  // <- slot 1
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
+  entry.Jump();
+
+  fixed_array.Bind();
+  // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 3
+  frame_->EmitPush(rax);  // <- slot 2
+
+  // Push the length of the array and the initial index onto the stack.
+  __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+  __ Integer32ToSmi(rax, rax);
+  frame_->EmitPush(rax);  // <- slot 1
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
+
+  // Condition.
+  entry.Bind();
+  // Grab the current frame's height for the break and continue
+  // targets only after all the state is pushed on the frame.
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  __ movq(rax, frame_->ElementAt(0));  // load the current count
+  __ cmpl(rax, frame_->ElementAt(1));  // compare to the array length
+  node->break_target()->Branch(above_equal);
+
+  // Get the i'th entry of the array.
+  __ movq(rdx, frame_->ElementAt(2));
+  SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
+  __ movq(rbx,
+          FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
+
+  // Get the expected map from the stack or a zero map in the
+  // permanent slow case rax: current iteration count rbx: i'th entry
+  // of the enum cache
+  __ movq(rdx, frame_->ElementAt(3));
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we have to filter the key.
+  // rax: current iteration count
+  // rbx: i'th entry of the enum cache
+  // rdx: expected map value
+  __ movq(rcx, frame_->ElementAt(4));
+  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ cmpq(rcx, rdx);
+  end_del_check.Branch(equal);
+
+  // Convert the entry to a string (or null if it isn't a property anymore).
+  frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
+  frame_->EmitPush(rbx);  // push entry
+  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
+  __ movq(rbx, rax);
+
+  // If the property has been removed while iterating, we just skip it.
+  __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+  node->continue_target()->Branch(equal);
+
+  end_del_check.Bind();
+  // Store the entry in the 'each' expression and take another spin in the
+  // loop.  rdx: i'th entry of the enum cache (or string there of)
+  frame_->EmitPush(rbx);
+  { Reference each(this, node->each());
+    // Loading a reference may leave the frame in an unspilled state.
+    frame_->SpillAll();
+    if (!each.is_illegal()) {
+      if (each.size() > 0) {
+        frame_->EmitPush(frame_->ElementAt(each.size()));
+      }
+      // If the reference was to a slot we rely on the convenient property
+      // that it doesn't matter whether a value (eg, ebx pushed above) is
+      // right on top of or right underneath a zero-sized reference.
+      each.SetValue(NOT_CONST_INIT);
+      if (each.size() > 0) {
+        // It's safe to pop the value lying on top of the reference before
+        // unloading the reference itself (which preserves the top of stack,
+        // ie, now the topmost value of the non-zero sized reference), since
+        // we will discard the top of stack after unloading the reference
+        // anyway.
+        frame_->Drop();
+      }
+    }
+  }
+  // Unloading a reference may leave the frame in an unspilled state.
+  frame_->SpillAll();
+
+  // Discard the i'th entry pushed above or else the remainder of the
+  // reference, whichever is currently on top of the stack.
+  frame_->Drop();
+
+  // Body.
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  VisitAndSpill(node->body());
+
+  // Next.  Reestablish a spilled frame in case we are coming here via
+  // a continue in the body.
+  node->continue_target()->Bind();
+  frame_->SpillAll();
+  frame_->EmitPop(rax);
+  __ addq(rax, Immediate(Smi::FromInt(1)));
+  frame_->EmitPush(rax);
+  entry.Jump();
+
+  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
+  // any frame.
+  node->break_target()->Bind();
+  frame_->Drop(5);
+
+  // Exit.
+  exit.Bind();
+
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+}
+
+void CodeGenerator::VisitTryCatch(TryCatch* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ TryCatch");
+  CodeForStatementPosition(node);
+
+  JumpTarget try_block;
+  JumpTarget exit;
+
+  try_block.Call();
+  // --- Catch block ---
+  frame_->EmitPush(rax);
+
+  // Store the caught exception in the catch variable.
+  { Reference ref(this, node->catch_var());
+    ASSERT(ref.is_slot());
+    // Load the exception to the top of the stack.  Here we make use of the
+    // convenient property that it doesn't matter whether a value is
+    // immediately on top of or underneath a zero-sized reference.
+    ref.SetValue(NOT_CONST_INIT);
+  }
+
+  // Remove the exception from the stack.
+  frame_->Drop();
+
+  VisitStatementsAndSpill(node->catch_block()->statements());
+  if (has_valid_frame()) {
+    exit.Jump();
+  }
+
+
+  // --- Try block ---
+  try_block.Bind();
+
+  frame_->PushTryHandler(TRY_CATCH_HANDLER);
+  int handler_height = frame_->height();
+
+  // Shadow the jump targets for all escapes from the try block, including
+  // returns.  During shadowing, the original target is hidden as the
+  // ShadowTarget and operations on the original actually affect the
+  // shadowing target.
+  //
+  // We should probably try to unify the escaping targets and the return
+  // target.
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
+  for (int i = 0; i < nof_escapes; i++) {
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+  }
+
+  // Generate code for the statements in the try block.
+  VisitStatementsAndSpill(node->try_block()->statements());
+
+  // Stop the introduced shadowing and count the number of required unlinks.
+  // After shadowing stops, the original targets are unshadowed and the
+  // ShadowTargets represent the formerly shadowing targets.
+  bool has_unlinks = false;
+  for (int i = 0; i < shadows.length(); i++) {
+    shadows[i]->StopShadowing();
+    has_unlinks = has_unlinks || shadows[i]->is_linked();
+  }
+  function_return_is_shadowed_ = function_return_was_shadowed;
+
+  // Get an external reference to the handler address.
+  ExternalReference handler_address(Top::k_handler_address);
+
+  // Make sure that there's nothing left on the stack above the
+  // handler structure.
+  if (FLAG_debug_code) {
+    __ movq(kScratchRegister, handler_address);
+    __ cmpq(rsp, Operand(kScratchRegister, 0));
+    __ Assert(equal, "stack pointer should point to top handler");
+  }
+
+  // If we can fall off the end of the try block, unlink from try chain.
+  if (has_valid_frame()) {
+    // The next handler address is on top of the frame.  Unlink from
+    // the handler list and drop the rest of this handler from the
+    // frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    __ movq(kScratchRegister, handler_address);
+    frame_->EmitPop(Operand(kScratchRegister, 0));
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+    if (has_unlinks) {
+      exit.Jump();
+    }
+  }
+
+  // Generate unlink code for the (formerly) shadowing targets that
+  // have been jumped to.  Deallocate each shadow target.
+  Result return_value;
+  for (int i = 0; i < shadows.length(); i++) {
+    if (shadows[i]->is_linked()) {
+      // Unlink from try chain; be careful not to destroy the TOS if
+      // there is one.
+      if (i == kReturnShadowIndex) {
+        shadows[i]->Bind(&return_value);
+        return_value.ToRegister(rax);
+      } else {
+        shadows[i]->Bind();
+      }
+      // Because we can be jumping here (to spilled code) from
+      // unspilled code, we need to reestablish a spilled frame at
+      // this block.
+      frame_->SpillAll();
+
+      // Reload sp from the top handler, because some statements that we
+      // break from (eg, for...in) may have left stuff on the stack.
+      __ movq(kScratchRegister, handler_address);
+      __ movq(rsp, Operand(kScratchRegister, 0));
+      frame_->Forget(frame_->height() - handler_height);
+
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      __ movq(kScratchRegister, handler_address);
+      frame_->EmitPop(Operand(kScratchRegister, 0));
+      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+      if (i == kReturnShadowIndex) {
+        if (!function_return_is_shadowed_) frame_->PrepareForReturn();
+        shadows[i]->other_target()->Jump(&return_value);
+      } else {
+        shadows[i]->other_target()->Jump();
+      }
+    }
+  }
+
+  exit.Bind();
+}
+
+
+void CodeGenerator::VisitTryFinally(TryFinally* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ TryFinally");
+  CodeForStatementPosition(node);
+
+  // State: Used to keep track of reason for entering the finally
+  // block. Should probably be extended to hold information for
+  // break/continue from within the try block.
+  enum { FALLING, THROWING, JUMPING };
+
+  JumpTarget try_block;
+  JumpTarget finally_block;
+
+  try_block.Call();
+
+  frame_->EmitPush(rax);
+  // In case of thrown exceptions, this is where we continue.
+  __ movq(rcx, Immediate(Smi::FromInt(THROWING)));
+  finally_block.Jump();
+
+  // --- Try block ---
+  try_block.Bind();
+
+  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+  int handler_height = frame_->height();
+
+  // Shadow the jump targets for all escapes from the try block, including
+  // returns.  During shadowing, the original target is hidden as the
+  // ShadowTarget and operations on the original actually affect the
+  // shadowing target.
+  //
+  // We should probably try to unify the escaping targets and the return
+  // target.
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
+  for (int i = 0; i < nof_escapes; i++) {
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+  }
+
+  // Generate code for the statements in the try block.
+  VisitStatementsAndSpill(node->try_block()->statements());
+
+  // Stop the introduced shadowing and count the number of required unlinks.
+  // After shadowing stops, the original targets are unshadowed and the
+  // ShadowTargets represent the formerly shadowing targets.
+  int nof_unlinks = 0;
+  for (int i = 0; i < shadows.length(); i++) {
+    shadows[i]->StopShadowing();
+    if (shadows[i]->is_linked()) nof_unlinks++;
+  }
+  function_return_is_shadowed_ = function_return_was_shadowed;
+
+  // Get an external reference to the handler address.
+  ExternalReference handler_address(Top::k_handler_address);
+
+  // If we can fall off the end of the try block, unlink from the try
+  // chain and set the state on the frame to FALLING.
+  if (has_valid_frame()) {
+    // The next handler address is on top of the frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    __ movq(kScratchRegister, handler_address);
+    frame_->EmitPop(Operand(kScratchRegister, 0));
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+    // Fake a top of stack value (unneeded when FALLING) and set the
+    // state in ecx, then jump around the unlink blocks if any.
+    frame_->EmitPush(Heap::kUndefinedValueRootIndex);
+    __ movq(rcx, Immediate(Smi::FromInt(FALLING)));
+    if (nof_unlinks > 0) {
+      finally_block.Jump();
+    }
+  }
+
+  // Generate code to unlink and set the state for the (formerly)
+  // shadowing targets that have been jumped to.
+  for (int i = 0; i < shadows.length(); i++) {
+    if (shadows[i]->is_linked()) {
+      // If we have come from the shadowed return, the return value is
+      // on the virtual frame.  We must preserve it until it is
+      // pushed.
+      if (i == kReturnShadowIndex) {
+        Result return_value;
+        shadows[i]->Bind(&return_value);
+        return_value.ToRegister(rax);
+      } else {
+        shadows[i]->Bind();
+      }
+      // Because we can be jumping here (to spilled code) from
+      // unspilled code, we need to reestablish a spilled frame at
+      // this block.
+      frame_->SpillAll();
+
+      // Reload sp from the top handler, because some statements that
+      // we break from (eg, for...in) may have left stuff on the
+      // stack.
+      __ movq(kScratchRegister, handler_address);
+      __ movq(rsp, Operand(kScratchRegister, 0));
+      frame_->Forget(frame_->height() - handler_height);
+
+      // Unlink this handler and drop it from the frame.
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      __ movq(kScratchRegister, handler_address);
+      frame_->EmitPop(Operand(kScratchRegister, 0));
+      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+      if (i == kReturnShadowIndex) {
+        // If this target shadowed the function return, materialize
+        // the return value on the stack.
+        frame_->EmitPush(rax);
+      } else {
+        // Fake TOS for targets that shadowed breaks and continues.
+        frame_->EmitPush(Heap::kUndefinedValueRootIndex);
+      }
+      __ movq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+      if (--nof_unlinks > 0) {
+        // If this is not the last unlink block, jump around the next.
+        finally_block.Jump();
+      }
+    }
+  }
+
+  // --- Finally block ---
+  finally_block.Bind();
+
+  // Push the state on the stack.
+  frame_->EmitPush(rcx);
+
+  // We keep two elements on the stack - the (possibly faked) result
+  // and the state - while evaluating the finally block.
+  //
+  // Generate code for the statements in the finally block.
+  VisitStatementsAndSpill(node->finally_block()->statements());
+
+  if (has_valid_frame()) {
+    // Restore state and return value or faked TOS.
+    frame_->EmitPop(rcx);
+    frame_->EmitPop(rax);
+  }
+
+  // Generate code to jump to the right destination for all used
+  // formerly shadowing targets.  Deallocate each shadow target.
+  for (int i = 0; i < shadows.length(); i++) {
+    if (has_valid_frame() && shadows[i]->is_bound()) {
+      BreakTarget* original = shadows[i]->other_target();
+      __ cmpq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+      if (i == kReturnShadowIndex) {
+        // The return value is (already) in rax.
+        Result return_value = allocator_->Allocate(rax);
+        ASSERT(return_value.is_valid());
+        if (function_return_is_shadowed_) {
+          original->Branch(equal, &return_value);
+        } else {
+          // Branch around the preparation for return which may emit
+          // code.
+          JumpTarget skip;
+          skip.Branch(not_equal);
+          frame_->PrepareForReturn();
+          original->Jump(&return_value);
+          skip.Bind();
+        }
+      } else {
+        original->Branch(equal);
+      }
+    }
+  }
+
+  if (has_valid_frame()) {
+    // Check if we need to rethrow the exception.
+    JumpTarget exit;
+    __ cmpq(rcx, Immediate(Smi::FromInt(THROWING)));
+    exit.Branch(not_equal);
+
+    // Rethrow exception.
+    frame_->EmitPush(rax);  // undo pop from above
+    frame_->CallRuntime(Runtime::kReThrow, 1);
+
+    // Done.
+    exit.Bind();
+  }
+}
+
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ DebuggerStatement");
+  CodeForStatementPosition(node);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Spill everything, even constants, to the frame.
+  frame_->SpillAll();
+  frame_->CallRuntime(Runtime::kDebugBreak, 0);
+  // Ignore the return value.
+#endif
+}
+
+
+void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+  // Call the runtime to instantiate the function boilerplate object.
+  // The inevitable call will sync frame elements to memory anyway, so
+  // we do it eagerly to allow us to push the arguments directly into
+  // place.
+  ASSERT(boilerplate->IsBoilerplate());
+  frame_->SyncRange(0, frame_->element_count() - 1);
+
+  // Push the boilerplate on the stack.
+  __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
+  frame_->EmitPush(kScratchRegister);
+
+  // Create a new closure.
+  frame_->EmitPush(rsi);
+  Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+  Comment cmnt(masm_, "[ FunctionLiteral");
+
+  // Build the function boilerplate and instantiate it.
+  Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+  // Check for stack-overflow exception.
+  if (HasStackOverflow()) return;
+  InstantiateBoilerplate(boilerplate);
+}
+
+
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* node) {
+  Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
+  InstantiateBoilerplate(node->boilerplate());
+}
+
+
+void CodeGenerator::VisitConditional(Conditional* node) {
+  Comment cmnt(masm_, "[ Conditional");
+  JumpTarget then;
+  JumpTarget else_;
+  JumpTarget exit;
+  ControlDestination dest(&then, &else_, true);
+  LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+  if (dest.false_was_fall_through()) {
+    // The else target was bound, so we compile the else part first.
+    Load(node->else_expression(), typeof_state());
+
+    if (then.is_linked()) {
+      exit.Jump();
+      then.Bind();
+      Load(node->then_expression(), typeof_state());
+    }
+  } else {
+    // The then target was bound, so we compile the then part first.
+    Load(node->then_expression(), typeof_state());
+
+    if (else_.is_linked()) {
+      exit.Jump();
+      else_.Bind();
+      Load(node->else_expression(), typeof_state());
+    }
+  }
+
+  exit.Bind();
+}
+
+
+void CodeGenerator::VisitSlot(Slot* node) {
+  Comment cmnt(masm_, "[ Slot");
+  LoadFromSlotCheckForArguments(node, typeof_state());
+}
+
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+  Comment cmnt(masm_, "[ VariableProxy");
+  Variable* var = node->var();
+  Expression* expr = var->rewrite();
+  if (expr != NULL) {
+    Visit(expr);
+  } else {
+    ASSERT(var->is_global());
+    Reference ref(this, node);
+    ref.GetValue(typeof_state());
+  }
+}
+
+
+void CodeGenerator::VisitLiteral(Literal* node) {
+  Comment cmnt(masm_, "[ Literal");
+  frame_->Push(node->handle());
+}
+
+
+// Materialize the regexp literal 'node' in the literals array
+// 'literals' of the function.  Leave the regexp boilerplate in
+// 'boilerplate'.
+class DeferredRegExpLiteral: public DeferredCode {
+ public:
+  DeferredRegExpLiteral(Register boilerplate,
+                        Register literals,
+                        RegExpLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
+    set_comment("[ DeferredRegExpLiteral");
+  }
+
+  void Generate();
+
+ private:
+  Register boilerplate_;
+  Register literals_;
+  RegExpLiteral* node_;
+};
+
+
+void DeferredRegExpLiteral::Generate() {
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
+  // Literal array (0).
+  __ push(literals_);
+  // Literal index (1).
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  // RegExp pattern (2).
+  __ Push(node_->pattern());
+  // RegExp flags (3).
+  __ Push(node_->flags());
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
+}
+
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+  Comment cmnt(masm_, "[ RegExp Literal");
+
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
+
+  // Load the literals array of the function.
+  __ movq(literals.reg(),
+          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+  // Check whether we need to materialize the RegExp object.  If so,
+  // jump to the deferred code passing the literals array.
+  DeferredRegExpLiteral* deferred =
+      new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
+  __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
+  deferred->Branch(equal);
+  deferred->BindExit();
+  literals.Unuse();
+
+  // Push the boilerplate object.
+  frame_->Push(&boilerplate);
+}
+
+
+// Materialize the object literal 'node' in the literals array
+// 'literals' of the function.  Leave the object boilerplate in
+// 'boilerplate'.
+class DeferredObjectLiteral: public DeferredCode {
+ public:
+  DeferredObjectLiteral(Register boilerplate,
+                        Register literals,
+                        ObjectLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
+    set_comment("[ DeferredObjectLiteral");
+  }
+
+  void Generate();
+
+ private:
+  Register boilerplate_;
+  Register literals_;
+  ObjectLiteral* node_;
+};
+
+
+void DeferredObjectLiteral::Generate() {
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
+  // Literal array (0).
+  __ push(literals_);
+  // Literal index (1).
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  // Constant properties (2).
+  __ Push(node_->constant_properties());
+  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
+}
+
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+  Comment cmnt(masm_, "[ ObjectLiteral");
+
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
+
+  // Load the literals array of the function.
+  __ movq(literals.reg(),
+          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+  // Check whether we need to materialize the object literal boilerplate.
+  // If so, jump to the deferred code passing the literals array.
+  DeferredObjectLiteral* deferred =
+      new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
+  __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
+  deferred->Branch(equal);
+  deferred->BindExit();
+  literals.Unuse();
+
+  // Push the boilerplate object.
+  frame_->Push(&boilerplate);
+  // Clone the boilerplate object.
+  Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+  if (node->depth() == 1) {
+    clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+  }
+  Result clone = frame_->CallRuntime(clone_function_id, 1);
+  // Push the newly cloned literal object as the result.
+  frame_->Push(&clone);
+
+  for (int i = 0; i < node->properties()->length(); i++) {
+    ObjectLiteral::Property* property = node->properties()->at(i);
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        break;
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
+        // else fall through.
+      case ObjectLiteral::Property::COMPUTED: {
+        Handle<Object> key(property->key()->handle());
+        if (key->IsSymbol()) {
+          // Duplicate the object as the IC receiver.
+          frame_->Dup();
+          Load(property->value());
+          frame_->Push(key);
+          Result ignored = frame_->CallStoreIC();
+          // Drop the duplicated receiver and ignore the result.
+          frame_->Drop();
+          break;
+        }
+        // Fall through
+      }
+      case ObjectLiteral::Property::PROTOTYPE: {
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
+        Load(property->key());
+        Load(property->value());
+        Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
+        // Ignore the result.
+        break;
+      }
+      case ObjectLiteral::Property::SETTER: {
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
+        Load(property->key());
+        frame_->Push(Smi::FromInt(1));
+        Load(property->value());
+        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+        // Ignore the result.
+        break;
+      }
+      case ObjectLiteral::Property::GETTER: {
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
+        Load(property->key());
+        frame_->Push(Smi::FromInt(0));
+        Load(property->value());
+        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+        // Ignore the result.
+        break;
+      }
+      default: UNREACHABLE();
+    }
+  }
+}
+
+
+// Materialize the array literal 'node' in the literals array 'literals'
+// of the function.  Leave the array boilerplate in 'boilerplate'.
+class DeferredArrayLiteral: public DeferredCode {
+ public:
+  DeferredArrayLiteral(Register boilerplate,
+                       Register literals,
+                       ArrayLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
+    set_comment("[ DeferredArrayLiteral");
+  }
+
+  void Generate();
+
+ private:
+  Register boilerplate_;
+  Register literals_;
+  ArrayLiteral* node_;
+};
+
+
+void DeferredArrayLiteral::Generate() {
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
+  // Literal array (0).
+  __ push(literals_);
+  // Literal index (1).
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  // Constant properties (2).
+  __ Push(node_->literals());
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+  if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
+}
+
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
+
+  // Load the literals array of the function.
+  __ movq(literals.reg(),
+          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+  // Check whether we need to materialize the object literal boilerplate.
+  // If so, jump to the deferred code passing the literals array.
+  DeferredArrayLiteral* deferred =
+      new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
+  __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
+  deferred->Branch(equal);
+  deferred->BindExit();
+  literals.Unuse();
+
+  // Push the resulting array literal boilerplate on the stack.
+  frame_->Push(&boilerplate);
+  // Clone the boilerplate object.
+  Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+  if (node->depth() == 1) {
+    clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+  }
+  Result clone = frame_->CallRuntime(clone_function_id, 1);
+  // Push the newly cloned literal object as the result.
+  frame_->Push(&clone);
+
+  // Generate code to set the elements in the array that are not
+  // literals.
+  for (int i = 0; i < node->values()->length(); i++) {
+    Expression* value = node->values()->at(i);
+
+    // If value is a literal the property value is already set in the
+    // boilerplate object.
+    if (value->AsLiteral() != NULL) continue;
+    // If value is a materialized literal the property value is already set
+    // in the boilerplate object if it is simple.
+    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
+
+    // The property must be set by generated code.
+    Load(value);
+
+    // Get the property value off the stack.
+    Result prop_value = frame_->Pop();
+    prop_value.ToRegister();
+
+    // Fetch the array literal while leaving a copy on the stack and
+    // use it to get the elements array.
+    frame_->Dup();
+    Result elements = frame_->Pop();
+    elements.ToRegister();
+    frame_->Spill(elements.reg());
+    // Get the elements FixedArray.
+    __ movq(elements.reg(),
+            FieldOperand(elements.reg(), JSObject::kElementsOffset));
+
+    // Write to the indexed properties array.
+    int offset = i * kPointerSize + FixedArray::kHeaderSize;
+    __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
+
+    // Update the write barrier for the array address.
+    frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
+    Result scratch = allocator_->Allocate();
+    ASSERT(scratch.is_valid());
+    __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
+  }
+}
+
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  ASSERT(!in_spilled_code());
+  // Call runtime routine to allocate the catch extension object and
+  // assign the exception value to the catch variable.
+  Comment cmnt(masm_, "[ CatchExtensionObject");
+  Load(node->key());
+  Load(node->value());
+  Result result =
+      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+  Comment cmnt(masm_, "[ Assignment");
+
+  { Reference target(this, node->target());
+    if (target.is_illegal()) {
+      // Fool the virtual frame into thinking that we left the assignment's
+      // value on the frame.
+      frame_->Push(Smi::FromInt(0));
+      return;
+    }
+    Variable* var = node->target()->AsVariableProxy()->AsVariable();
+
+    if (node->starts_initialization_block()) {
+      ASSERT(target.type() == Reference::NAMED ||
+             target.type() == Reference::KEYED);
+      // Change to slow case in the beginning of an initialization
+      // block to avoid the quadratic behavior of repeatedly adding
+      // fast properties.
+
+      // The receiver is the argument to the runtime call.  It is the
+      // first value pushed when the reference was loaded to the
+      // frame.
+      frame_->PushElementAt(target.size() - 1);
+      Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+    }
+    if (node->op() == Token::ASSIGN ||
+        node->op() == Token::INIT_VAR ||
+        node->op() == Token::INIT_CONST) {
+      Load(node->value());
+
+    } else {
+      Literal* literal = node->value()->AsLiteral();
+      bool overwrite_value =
+          (node->value()->AsBinaryOperation() != NULL &&
+           node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+      Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
+      // There are two cases where the target is not read in the right hand
+      // side, that are easy to test for: the right hand side is a literal,
+      // or the right hand side is a different variable.  TakeValue invalidates
+      // the target, with an implicit promise that it will be written to again
+      // before it is read.
+      if (literal != NULL || (right_var != NULL && right_var != var)) {
+        target.TakeValue(NOT_INSIDE_TYPEOF);
+      } else {
+        target.GetValue(NOT_INSIDE_TYPEOF);
+      }
+      Load(node->value());
+      GenericBinaryOperation(node->binary_op(),
+                             node->type(),
+                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+    }
+
+    if (var != NULL &&
+        var->mode() == Variable::CONST &&
+        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+      // Assignment ignored - leave the value on the stack.
+    } else {
+      CodeForSourcePosition(node->position());
+      if (node->op() == Token::INIT_CONST) {
+        // Dynamic constant initializations must use the function context
+        // and initialize the actual constant declared. Dynamic variable
+        // initializations are simply assignments and use SetValue.
+        target.SetValue(CONST_INIT);
+      } else {
+        target.SetValue(NOT_CONST_INIT);
+      }
+      if (node->ends_initialization_block()) {
+        ASSERT(target.type() == Reference::NAMED ||
+               target.type() == Reference::KEYED);
+        // End of initialization block. Revert to fast case.  The
+        // argument to the runtime call is the receiver, which is the
+        // first value pushed as part of the reference, which is below
+        // the lhs value.
+        frame_->PushElementAt(target.size());
+        Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+      }
+    }
+  }
+}
+
+
+void CodeGenerator::VisitThrow(Throw* node) {
+  Comment cmnt(masm_, "[ Throw");
+  Load(node->exception());
+  Result result = frame_->CallRuntime(Runtime::kThrow, 1);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::VisitProperty(Property* node) {
+  Comment cmnt(masm_, "[ Property");
+  Reference property(this, node);
+  property.GetValue(typeof_state());
+}
+
+
+void CodeGenerator::VisitCall(Call* node) {
+  Comment cmnt(masm_, "[ Call");
+
+  ZoneList<Expression*>* args = node->arguments();
+
+  // Check if the function is a variable or a property.
+  Expression* function = node->expression();
+  Variable* var = function->AsVariableProxy()->AsVariable();
+  Property* property = function->AsProperty();
+
+  // ------------------------------------------------------------------------
+  // Fast-case: Use inline caching.
+  // ---
+  // According to ECMA-262, section 11.2.3, page 44, the function to call
+  // must be resolved after the arguments have been evaluated. The IC code
+  // automatically handles this by loading the arguments before the function
+  // is resolved in cache misses (this also holds for megamorphic calls).
+  // ------------------------------------------------------------------------
+
+  if (var != NULL && var->is_possibly_eval()) {
+    // ----------------------------------
+    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
+    // ----------------------------------
+
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+
+    // Prepare the stack for the call to the resolved function.
+    Load(function);
+
+    // Allocate a frame slot for the receiver.
+    frame_->Push(Factory::undefined_value());
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      Load(args->at(i));
+    }
+
+    // Prepare the stack for the call to ResolvePossiblyDirectEval.
+    frame_->PushElementAt(arg_count + 1);
+    if (arg_count > 0) {
+      frame_->PushElementAt(arg_count);
+    } else {
+      frame_->Push(Factory::undefined_value());
+    }
+
+    // Resolve the call.
+    Result result =
+        frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+    // Touch up the stack with the right values for the function and the
+    // receiver.  Use a scratch register to avoid destroying the result.
+    Result scratch = allocator_->Allocate();
+    ASSERT(scratch.is_valid());
+    __ movq(scratch.reg(),
+            FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
+    frame_->SetElementAt(arg_count + 1, &scratch);
+
+    // We can reuse the result register now.
+    frame_->Spill(result.reg());
+    __ movq(result.reg(),
+            FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
+    frame_->SetElementAt(arg_count, &result);
+
+    // Call the function.
+    CodeForSourcePosition(node->position());
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub call_function(arg_count, in_loop);
+    result = frame_->CallStub(&call_function, arg_count + 1);
+
+    // Restore the context and overwrite the function on the stack with
+    // the result.
+    frame_->RestoreContextRegister();
+    frame_->SetElementAt(0, &result);
+
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
+    // ----------------------------------
+
+    // Push the name of the function and the receiver onto the stack.
+    frame_->Push(var->name());
+
+    // Pass the global object as the receiver and let the IC stub
+    // patch the stack to use the global proxy as 'this' in the
+    // invoked function.
+    LoadGlobal();
+
+    // Load the arguments.
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      Load(args->at(i));
+    }
+
+    // Call the IC initialization code.
+    CodeForSourcePosition(node->position());
+    Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
+                                       arg_count,
+                                       loop_nesting());
+    frame_->RestoreContextRegister();
+    // Replace the function on the stack with the result.
+    frame_->SetElementAt(0, &result);
+
+  } else if (var != NULL && var->slot() != NULL &&
+             var->slot()->type() == Slot::LOOKUP) {
+    // ----------------------------------
+    // JavaScript example: 'with (obj) foo(1, 2, 3)'  // foo is in obj
+    // ----------------------------------
+
+    // Load the function from the context.  Sync the frame so we can
+    // push the arguments directly into place.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+    frame_->EmitPush(rsi);
+    frame_->EmitPush(var->name());
+    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+    // The runtime call returns a pair of values in rax and rdx.  The
+    // looked-up function is in rax and the receiver is in rdx.  These
+    // register references are not ref counted here.  We spill them
+    // eagerly since they are arguments to an inevitable call (and are
+    // not sharable by the arguments).
+    ASSERT(!allocator()->is_used(rax));
+    frame_->EmitPush(rax);
+
+    // Load the receiver.
+    ASSERT(!allocator()->is_used(rdx));
+    frame_->EmitPush(rdx);
+
+    // Call the function.
+    CallWithArguments(args, node->position());
+
+  } else if (property != NULL) {
+    // Check if the key is a literal string.
+    Literal* literal = property->key()->AsLiteral();
+
+    if (literal != NULL && literal->handle()->IsSymbol()) {
+      // ------------------------------------------------------------------
+      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
+      // ------------------------------------------------------------------
+
+      Handle<String> name = Handle<String>::cast(literal->handle());
+
+      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
+          name->IsEqualTo(CStrVector("apply")) &&
+          args->length() == 2 &&
+          args->at(1)->AsVariableProxy() != NULL &&
+          args->at(1)->AsVariableProxy()->IsArguments()) {
+        // Use the optimized Function.prototype.apply that avoids
+        // allocating lazily allocated arguments objects.
+        CallApplyLazy(property,
+                      args->at(0),
+                      args->at(1)->AsVariableProxy(),
+                      node->position());
+
+      } else {
+        // Push the name of the function and the receiver onto the stack.
+        frame_->Push(name);
+        Load(property->obj());
+
+        // Load the arguments.
+        int arg_count = args->length();
+        for (int i = 0; i < arg_count; i++) {
+          Load(args->at(i));
+        }
+
+        // Call the IC initialization code.
+        CodeForSourcePosition(node->position());
+        Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+                                           arg_count,
+                                           loop_nesting());
+        frame_->RestoreContextRegister();
+        // Replace the function on the stack with the result.
+        frame_->SetElementAt(0, &result);
+      }
+
+    } else {
+      // -------------------------------------------
+      // JavaScript example: 'array[index](1, 2, 3)'
+      // -------------------------------------------
+
+      // Load the function to call from the property through a reference.
+      Reference ref(this, property);
+      ref.GetValue(NOT_INSIDE_TYPEOF);
+
+      // Pass receiver to called function.
+      if (property->is_synthetic()) {
+        // Use global object as receiver.
+        LoadGlobalReceiver();
+      } else {
+        // The reference's size is non-negative.
+        frame_->PushElementAt(ref.size());
+      }
+
+      // Call the function.
+      CallWithArguments(args, node->position());
+    }
+
+  } else {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
+    // ----------------------------------
+
+    // Load the function.
+    Load(function);
+
+    // Pass the global proxy as the receiver.
+    LoadGlobalReceiver();
+
+    // Call the function.
+    CallWithArguments(args, node->position());
+  }
+}
+
+
+void CodeGenerator::VisitCallNew(CallNew* node) {
+  Comment cmnt(masm_, "[ CallNew");
+
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments. This is different from ordinary calls, where the
+  // actual function to call is resolved after the arguments have been
+  // evaluated.
+
+  // Compute function to call and use the global object as the
+  // receiver. There is no need to use the global proxy here because
+  // it will always be replaced with a newly allocated object.
+  Load(node->expression());
+  LoadGlobal();
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = node->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Load(args->at(i));
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  CodeForSourcePosition(node->position());
+  Result result = frame_->CallConstructor(arg_count);
+  // Replace the function on the stack with the result.
+  frame_->SetElementAt(0, &result);
+}
+
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+  if (CheckForInlineRuntimeCall(node)) {
+    return;
+  }
+
+  ZoneList<Expression*>* args = node->arguments();
+  Comment cmnt(masm_, "[ CallRuntime");
+  Runtime::Function* function = node->function();
+
+  if (function == NULL) {
+    // Prepare stack for calling JS runtime function.
+    frame_->Push(node->name());
+    // Push the builtins object found in the current global object.
+    Result temp = allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ movq(temp.reg(), GlobalObject());
+    __ movq(temp.reg(),
+            FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
+    frame_->Push(&temp);
+  }
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Load(args->at(i));
+  }
+
+  if (function == NULL) {
+    // Call the JS runtime function.
+    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+                                       arg_count,
+                                       loop_nesting_);
+    frame_->RestoreContextRegister();
+    frame_->SetElementAt(0, &answer);
+  } else {
+    // Call the C runtime function.
+    Result answer = frame_->CallRuntime(function, arg_count);
+    frame_->Push(&answer);
+  }
+}
+
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+  // Note that because of NOT and an optimization in comparison of a typeof
+  // expression to a literal string, this function can fail to leave a value
+  // on top of the frame or in the cc register.
+  Comment cmnt(masm_, "[ UnaryOperation");
+
+  Token::Value op = node->op();
+
+  if (op == Token::NOT) {
+    // Swap the true and false targets but keep the same actual label
+    // as the fall through.
+    destination()->Invert();
+    LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+    // Swap the labels back.
+    destination()->Invert();
+
+  } else if (op == Token::DELETE) {
+    Property* property = node->expression()->AsProperty();
+    if (property != NULL) {
+      Load(property->obj());
+      Load(property->key());
+      Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+      frame_->Push(&answer);
+      return;
+    }
+
+    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
+    if (variable != NULL) {
+      Slot* slot = variable->slot();
+      if (variable->is_global()) {
+        LoadGlobal();
+        frame_->Push(variable->name());
+        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+                                              CALL_FUNCTION, 2);
+        frame_->Push(&answer);
+        return;
+
+      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+        // Call the runtime to look up the context holding the named
+        // variable.  Sync the virtual frame eagerly so we can push the
+        // arguments directly into place.
+        frame_->SyncRange(0, frame_->element_count() - 1);
+        frame_->EmitPush(rsi);
+        frame_->EmitPush(variable->name());
+        Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
+        ASSERT(context.is_register());
+        frame_->EmitPush(context.reg());
+        context.Unuse();
+        frame_->EmitPush(variable->name());
+        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+                                              CALL_FUNCTION, 2);
+        frame_->Push(&answer);
+        return;
+      }
+
+      // Default: Result of deleting non-global, not dynamically
+      // introduced variables is false.
+      frame_->Push(Factory::false_value());
+
+    } else {
+      // Default: Result of deleting expressions is true.
+      Load(node->expression());  // may have side-effects
+      frame_->SetElementAt(0, Factory::true_value());
+    }
+
+  } else if (op == Token::TYPEOF) {
+    // Special case for loading the typeof expression; see comment on
+    // LoadTypeofExpression().
+    LoadTypeofExpression(node->expression());
+    Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
+    frame_->Push(&answer);
+
+  } else if (op == Token::VOID) {
+    Expression* expression = node->expression();
+    if (expression && expression->AsLiteral() && (
+        expression->AsLiteral()->IsTrue() ||
+        expression->AsLiteral()->IsFalse() ||
+        expression->AsLiteral()->handle()->IsNumber() ||
+        expression->AsLiteral()->handle()->IsString() ||
+        expression->AsLiteral()->handle()->IsJSRegExp() ||
+        expression->AsLiteral()->IsNull())) {
+      // Omit evaluating the value of the primitive literal.
+      // It will be discarded anyway, and can have no side effect.
+      frame_->Push(Factory::undefined_value());
+    } else {
+      Load(node->expression());
+      frame_->SetElementAt(0, Factory::undefined_value());
+    }
+
+  } else {
+    Load(node->expression());
+    switch (op) {
+      case Token::NOT:
+      case Token::DELETE:
+      case Token::TYPEOF:
+        UNREACHABLE();  // handled above
+        break;
+
+      case Token::SUB: {
+        bool overwrite =
+            (node->AsBinaryOperation() != NULL &&
+             node->AsBinaryOperation()->ResultOverwriteAllowed());
+        UnarySubStub stub(overwrite);
+        // TODO(1222589): remove dependency of TOS being cached inside stub
+        Result operand = frame_->Pop();
+        Result answer = frame_->CallStub(&stub, &operand);
+        frame_->Push(&answer);
+        break;
+      }
+
+      case Token::BIT_NOT: {
+        // Smi check.
+        JumpTarget smi_label;
+        JumpTarget continue_label;
+        Result operand = frame_->Pop();
+        operand.ToRegister();
+
+        Condition is_smi = masm_->CheckSmi(operand.reg());
+        smi_label.Branch(is_smi, &operand);
+
+        frame_->Push(&operand);  // undo popping of TOS
+        Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
+                                              CALL_FUNCTION, 1);
+        continue_label.Jump(&answer);
+        smi_label.Bind(&answer);
+        answer.ToRegister();
+        frame_->Spill(answer.reg());
+        __ SmiNot(answer.reg(), answer.reg());
+        continue_label.Bind(&answer);
+        frame_->Push(&answer);
+        break;
+      }
+
+      case Token::ADD: {
+        // Smi check.
+        JumpTarget continue_label;
+        Result operand = frame_->Pop();
+        operand.ToRegister();
+        Condition is_smi = masm_->CheckSmi(operand.reg());
+        continue_label.Branch(is_smi, &operand);
+        frame_->Push(&operand);
+        Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
+                                              CALL_FUNCTION, 1);
+
+        continue_label.Bind(&answer);
+        frame_->Push(&answer);
+        break;
+      }
+
+      default:
+        UNREACHABLE();
+    }
+  }
+}
+
+
+// The value in dst was optimistically incremented or decremented.  The
+// result overflowed or was not smi tagged.  Undo the operation, call
+// into the runtime to convert the argument to a number, and call the
+// specialized add or subtract stub.  The result is left in dst.
+class DeferredPrefixCountOperation: public DeferredCode {
+ public:
+  DeferredPrefixCountOperation(Register dst, bool is_increment)
+      : dst_(dst), is_increment_(is_increment) {
+    set_comment("[ DeferredCountOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  bool is_increment_;
+};
+
+
+void DeferredPrefixCountOperation::Generate() {
+  __ push(dst_);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+  __ push(rax);
+  __ push(Immediate(Smi::FromInt(1)));
+  if (is_increment_) {
+    __ CallRuntime(Runtime::kNumberAdd, 2);
+  } else {
+    __ CallRuntime(Runtime::kNumberSub, 2);
+  }
+  if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+// The value in dst was optimistically incremented or decremented.  The
+// result overflowed or was not smi tagged.  Undo the operation and call
+// into the runtime to convert the argument to a number.  Update the
+// original value in old.  Call the specialized add or subtract stub.
+// The result is left in dst.
+class DeferredPostfixCountOperation: public DeferredCode {
+ public:
+  DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
+      : dst_(dst), old_(old), is_increment_(is_increment) {
+    set_comment("[ DeferredCountOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Register old_;
+  bool is_increment_;
+};
+
+
+void DeferredPostfixCountOperation::Generate() {
+  __ push(dst_);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+  // Save the result of ToNumber to use as the old value.
+  __ push(rax);
+
+  // Call the runtime for the addition or subtraction.
+  __ push(rax);
+  __ push(Immediate(Smi::FromInt(1)));
+  if (is_increment_) {
+    __ CallRuntime(Runtime::kNumberAdd, 2);
+  } else {
+    __ CallRuntime(Runtime::kNumberSub, 2);
+  }
+  if (!dst_.is(rax)) __ movq(dst_, rax);
+  __ pop(old_);
+}
+
+
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+  Comment cmnt(masm_, "[ CountOperation");
+
+  bool is_postfix = node->is_postfix();
+  bool is_increment = node->op() == Token::INC;
+
+  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
+  bool is_const = (var != NULL && var->mode() == Variable::CONST);
+
+  // Postfix operations need a stack slot under the reference to hold
+  // the old value while the new value is being stored.  This is so that
+  // in the case that storing the new value requires a call, the old
+  // value will be in the frame to be spilled.
+  if (is_postfix) frame_->Push(Smi::FromInt(0));
+
+  { Reference target(this, node->expression());
+    if (target.is_illegal()) {
+      // Spoof the virtual frame to have the expected height (one higher
+      // than on entry).
+      if (!is_postfix) frame_->Push(Smi::FromInt(0));
+      return;
+    }
+    target.TakeValue(NOT_INSIDE_TYPEOF);
+
+    Result new_value = frame_->Pop();
+    new_value.ToRegister();
+
+    Result old_value;  // Only allocated in the postfix case.
+    if (is_postfix) {
+      // Allocate a temporary to preserve the old value.
+      old_value = allocator_->Allocate();
+      ASSERT(old_value.is_valid());
+      __ movq(old_value.reg(), new_value.reg());
+    }
+    // Ensure the new value is writable.
+    frame_->Spill(new_value.reg());
+
+    DeferredCode* deferred = NULL;
+    if (is_postfix) {
+      deferred = new DeferredPostfixCountOperation(new_value.reg(),
+                                                   old_value.reg(),
+                                                   is_increment);
+    } else {
+      deferred = new DeferredPrefixCountOperation(new_value.reg(),
+                                                  is_increment);
+    }
+
+    __ movq(kScratchRegister, new_value.reg());
+    if (is_increment) {
+      __ addl(kScratchRegister, Immediate(Smi::FromInt(1)));
+    } else {
+      __ subl(kScratchRegister, Immediate(Smi::FromInt(1)));
+    }
+    // Smi test.
+    deferred->Branch(overflow);
+    __ JumpIfNotSmi(kScratchRegister, deferred->entry_label());
+    __ movq(new_value.reg(), kScratchRegister);
+    deferred->BindExit();
+
+    // Postfix: store the old value in the allocated slot under the
+    // reference.
+    if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
+
+    frame_->Push(&new_value);
+    // Non-constant: update the reference.
+    if (!is_const) target.SetValue(NOT_CONST_INIT);
+  }
+
+  // Postfix: drop the new value and use the old.
+  if (is_postfix) frame_->Drop();
+}
+
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+  // TODO(X64): This code was copied verbatim from codegen-ia32.
+  //     Either find a reason to change it or move it to a shared location.
+
+  // Note that due to an optimization in comparison operations (typeof
+  // compared to a string literal), we can evaluate a binary expression such
+  // as AND or OR and not leave a value on the frame or in the cc register.
+  Comment cmnt(masm_, "[ BinaryOperation");
+  Token::Value op = node->op();
+
+  // According to ECMA-262 section 11.11, page 58, the binary logical
+  // operators must yield the result of one of the two expressions
+  // before any ToBoolean() conversions. This means that the value
+  // produced by a && or || operator is not necessarily a boolean.
+
+  // NOTE: If the left hand side produces a materialized value (not
+  // control flow), we force the right hand side to do the same. This
+  // is necessary because we assume that if we get control flow on the
+  // last path out of an expression we got it on all paths.
+  if (op == Token::AND) {
+    JumpTarget is_true;
+    ControlDestination dest(&is_true, destination()->false_target(), true);
+    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+    if (dest.false_was_fall_through()) {
+      // The current false target was used as the fall-through.  If
+      // there are no dangling jumps to is_true then the left
+      // subexpression was unconditionally false.  Otherwise we have
+      // paths where we do have to evaluate the right subexpression.
+      if (is_true.is_linked()) {
+        // We need to compile the right subexpression.  If the jump to
+        // the current false target was a forward jump then we have a
+        // valid frame, we have just bound the false target, and we
+        // have to jump around the code for the right subexpression.
+        if (has_valid_frame()) {
+          destination()->false_target()->Unuse();
+          destination()->false_target()->Jump();
+        }
+        is_true.Bind();
+        // The left subexpression compiled to control flow, so the
+        // right one is free to do so as well.
+        LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+      } else {
+        // We have actually just jumped to or bound the current false
+        // target but the current control destination is not marked as
+        // used.
+        destination()->Use(false);
+      }
+
+    } else if (dest.is_used()) {
+      // The left subexpression compiled to control flow (and is_true
+      // was just bound), so the right is free to do so as well.
+      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+
+    } else {
+      // We have a materialized value on the frame, so we exit with
+      // one on all paths.  There are possibly also jumps to is_true
+      // from nested subexpressions.
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
+
+      // Avoid popping the result if it converts to 'false' using the
+      // standard ToBoolean() conversion as described in ECMA-262,
+      // section 9.2, page 30.
+      //
+      // Duplicate the TOS value. The duplicate will be popped by
+      // ToBoolean.
+      frame_->Dup();
+      ControlDestination dest(&pop_and_continue, &exit, true);
+      ToBoolean(&dest);
+
+      // Pop the result of evaluating the first part.
+      frame_->Drop();
+
+      // Compile right side expression.
+      is_true.Bind();
+      Load(node->right());
+
+      // Exit (always with a materialized value).
+      exit.Bind();
+    }
+
+  } else if (op == Token::OR) {
+    JumpTarget is_false;
+    ControlDestination dest(destination()->true_target(), &is_false, false);
+    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+    if (dest.true_was_fall_through()) {
+      // The current true target was used as the fall-through.  If
+      // there are no dangling jumps to is_false then the left
+      // subexpression was unconditionally true.  Otherwise we have
+      // paths where we do have to evaluate the right subexpression.
+      if (is_false.is_linked()) {
+        // We need to compile the right subexpression.  If the jump to
+        // the current true target was a forward jump then we have a
+        // valid frame, we have just bound the true target, and we
+        // have to jump around the code for the right subexpression.
+        if (has_valid_frame()) {
+          destination()->true_target()->Unuse();
+          destination()->true_target()->Jump();
+        }
+        is_false.Bind();
+        // The left subexpression compiled to control flow, so the
+        // right one is free to do so as well.
+        LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+      } else {
+        // We have just jumped to or bound the current true target but
+        // the current control destination is not marked as used.
+        destination()->Use(true);
+      }
+
+    } else if (dest.is_used()) {
+      // The left subexpression compiled to control flow (and is_false
+      // was just bound), so the right is free to do so as well.
+      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+
+    } else {
+      // We have a materialized value on the frame, so we exit with
+      // one on all paths.  There are possibly also jumps to is_false
+      // from nested subexpressions.
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
+
+      // Avoid popping the result if it converts to 'true' using the
+      // standard ToBoolean() conversion as described in ECMA-262,
+      // section 9.2, page 30.
+      //
+      // Duplicate the TOS value. The duplicate will be popped by
+      // ToBoolean.
+      frame_->Dup();
+      ControlDestination dest(&exit, &pop_and_continue, false);
+      ToBoolean(&dest);
+
+      // Pop the result of evaluating the first part.
+      frame_->Drop();
+
+      // Compile right side expression.
+      is_false.Bind();
+      Load(node->right());
+
+      // Exit (always with a materialized value).
+      exit.Bind();
+    }
+
+  } else {
+    // NOTE: The code below assumes that the slow cases (calls to runtime)
+    // never return a constant/immutable object.
+    OverwriteMode overwrite_mode = NO_OVERWRITE;
+    if (node->left()->AsBinaryOperation() != NULL &&
+        node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+      overwrite_mode = OVERWRITE_LEFT;
+    } else if (node->right()->AsBinaryOperation() != NULL &&
+               node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+      overwrite_mode = OVERWRITE_RIGHT;
+    }
+
+    Load(node->left());
+    Load(node->right());
+    GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
+  }
+}
+
+
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+  Comment cmnt(masm_, "[ CompareOperation");
+
+  // Get the expressions from the node.
+  Expression* left = node->left();
+  Expression* right = node->right();
+  Token::Value op = node->op();
+  // To make typeof testing for natives implemented in JavaScript really
+  // efficient, we generate special code for expressions of the form:
+  // 'typeof <expression> == <string>'.
+  UnaryOperation* operation = left->AsUnaryOperation();
+  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
+      (operation != NULL && operation->op() == Token::TYPEOF) &&
+      (right->AsLiteral() != NULL &&
+       right->AsLiteral()->handle()->IsString())) {
+    Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
+
+    // Load the operand and move it to a register.
+    LoadTypeofExpression(operation->expression());
+    Result answer = frame_->Pop();
+    answer.ToRegister();
+
+    if (check->Equals(Heap::number_symbol())) {
+      Condition is_smi = masm_->CheckSmi(answer.reg());
+      destination()->true_target()->Branch(is_smi);
+      frame_->Spill(answer.reg());
+      __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
+      answer.Unuse();
+      destination()->Split(equal);
+
+    } else if (check->Equals(Heap::string_symbol())) {
+      Condition is_smi = masm_->CheckSmi(answer.reg());
+      destination()->false_target()->Branch(is_smi);
+
+      // It can be an undetectable string object.
+      __ movq(kScratchRegister,
+              FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+               Immediate(1 << Map::kIsUndetectable));
+      destination()->false_target()->Branch(not_zero);
+      __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
+      answer.Unuse();
+      destination()->Split(below);  // Unsigned byte comparison needed.
+
+    } else if (check->Equals(Heap::boolean_symbol())) {
+      __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
+      destination()->true_target()->Branch(equal);
+      __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
+      answer.Unuse();
+      destination()->Split(equal);
+
+    } else if (check->Equals(Heap::undefined_symbol())) {
+      __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
+      destination()->true_target()->Branch(equal);
+
+      Condition is_smi = masm_->CheckSmi(answer.reg());
+      destination()->false_target()->Branch(is_smi);
+
+      // It can be an undetectable object.
+      __ movq(kScratchRegister,
+              FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+               Immediate(1 << Map::kIsUndetectable));
+      answer.Unuse();
+      destination()->Split(not_zero);
+
+    } else if (check->Equals(Heap::function_symbol())) {
+      Condition is_smi = masm_->CheckSmi(answer.reg());
+      destination()->false_target()->Branch(is_smi);
+      frame_->Spill(answer.reg());
+      __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+      answer.Unuse();
+      destination()->Split(equal);
+
+    } else if (check->Equals(Heap::object_symbol())) {
+      Condition is_smi = masm_->CheckSmi(answer.reg());
+      destination()->false_target()->Branch(is_smi);
+      __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
+      destination()->true_target()->Branch(equal);
+
+      // It can be an undetectable object.
+      __ movq(kScratchRegister,
+              FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+               Immediate(1 << Map::kIsUndetectable));
+      destination()->false_target()->Branch(not_zero);
+      __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
+      destination()->false_target()->Branch(below);
+      __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+      answer.Unuse();
+      destination()->Split(below_equal);
+    } else {
+      // Uncommon case: typeof testing against a string literal that is
+      // never returned from the typeof operator.
+      answer.Unuse();
+      destination()->Goto(false);
+    }
+    return;
+  }
+
+  Condition cc = no_condition;
+  bool strict = false;
+  switch (op) {
+    case Token::EQ_STRICT:
+      strict = true;
+      // Fall through
+    case Token::EQ:
+      cc = equal;
+      break;
+    case Token::LT:
+      cc = less;
+      break;
+    case Token::GT:
+      cc = greater;
+      break;
+    case Token::LTE:
+      cc = less_equal;
+      break;
+    case Token::GTE:
+      cc = greater_equal;
+      break;
+    case Token::IN: {
+      Load(left);
+      Load(right);
+      Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
+      frame_->Push(&answer);  // push the result
+      return;
+    }
+    case Token::INSTANCEOF: {
+      Load(left);
+      Load(right);
+      InstanceofStub stub;
+      Result answer = frame_->CallStub(&stub, 2);
+      answer.ToRegister();
+      __ testq(answer.reg(), answer.reg());
+      answer.Unuse();
+      destination()->Split(zero);
+      return;
+    }
+    default:
+      UNREACHABLE();
+  }
+  Load(left);
+  Load(right);
+  Comparison(cc, strict, destination());
+}
+
+
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+  frame_->PushFunction();
+}
+
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in rdx and the formal
+  // parameter count in rax.
+  Load(args->at(0));
+  Result key = frame_->Pop();
+  // Explicitly create a constant result.
+  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  // Call the shared stub to get to arguments[key].
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  Result result = frame_->CallStub(&stub, &key, &count);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  Condition is_smi = masm_->CheckSmi(value.reg());
+  destination()->false_target()->Branch(is_smi);
+  // It is a heap object - get map.
+  // Check if the object is a JS array or not.
+  __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
+  value.Unuse();
+  destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  // Get the frame pointer for the calling frame.
+  Result fp = allocator()->Allocate();
+  __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ cmpq(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &check_frame_marker);
+  __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ cmpq(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+  fp.Unuse();
+  destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+  // ArgumentsAccessStub takes the parameter count as an input argument
+  // in register eax.  Create a constant result for it.
+  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  // Call the shared stub to get to the arguments.length.
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
+  Result result = frame_->CallStub(&stub, &count);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+  Comment(masm_, "[ GenerateFastCharCodeAt");
+  ASSERT(args->length() == 2);
+
+  Label slow_case;
+  Label end;
+  Label not_a_flat_string;
+  Label a_cons_string;
+  Label try_again_with_new_string;
+  Label ascii_string;
+  Label got_char_code;
+
+  Load(args->at(0));
+  Load(args->at(1));
+  Result index = frame_->Pop();
+  Result object = frame_->Pop();
+
+  // Get register rcx to use as shift amount later.
+  Result shift_amount;
+  if (object.is_register() && object.reg().is(rcx)) {
+    Result fresh = allocator_->Allocate();
+    shift_amount = object;
+    object = fresh;
+    __ movq(object.reg(), rcx);
+  }
+  if (index.is_register() && index.reg().is(rcx)) {
+    Result fresh = allocator_->Allocate();
+    shift_amount = index;
+    index = fresh;
+    __ movq(index.reg(), rcx);
+  }
+  // There could be references to ecx in the frame. Allocating will
+  // spill them, otherwise spill explicitly.
+  if (shift_amount.is_valid()) {
+    frame_->Spill(rcx);
+  } else {
+    shift_amount = allocator()->Allocate(rcx);
+  }
+  ASSERT(shift_amount.is_register());
+  ASSERT(shift_amount.reg().is(rcx));
+  ASSERT(allocator_->count(rcx) == 1);
+
+  // We will mutate the index register and possibly the object register.
+  // The case where they are somehow the same register is handled
+  // because we only mutate them in the case where the receiver is a
+  // heap object and the index is not.
+  object.ToRegister();
+  index.ToRegister();
+  frame_->Spill(object.reg());
+  frame_->Spill(index.reg());
+
+  // We need a single extra temporary register.
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+
+  // There is no virtual frame effect from here up to the final result
+  // push.
+
+  // If the receiver is a smi trigger the slow case.
+  __ JumpIfSmi(object.reg(), &slow_case);
+
+  // If the index is negative or non-smi trigger the slow case.
+  __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
+
+  // Untag the index.
+  __ SmiToInteger32(index.reg(), index.reg());
+
+  __ bind(&try_again_with_new_string);
+  // Fetch the instance type of the receiver into rcx.
+  __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
+  __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
+  // If the receiver is not a string trigger the slow case.
+  __ testb(rcx, Immediate(kIsNotStringMask));
+  __ j(not_zero, &slow_case);
+
+  // Here we make assumptions about the tag values and the shifts needed.
+  // See the comment in objects.h.
+  ASSERT(kLongStringTag == 0);
+  ASSERT(kMediumStringTag + String::kLongLengthShift ==
+         String::kMediumLengthShift);
+  ASSERT(kShortStringTag + String::kLongLengthShift ==
+         String::kShortLengthShift);
+  __ and_(rcx, Immediate(kStringSizeMask));
+  __ addq(rcx, Immediate(String::kLongLengthShift));
+  // Fetch the length field into the temporary register.
+  __ movl(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
+  __ shrl(temp.reg());  // The shift amount in ecx is implicit operand.
+  // Check for index out of range.
+  __ cmpl(index.reg(), temp.reg());
+  __ j(greater_equal, &slow_case);
+  // Reload the instance type (into the temp register this time)..
+  __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+  __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+
+  // We need special handling for non-flat strings.
+  ASSERT(kSeqStringTag == 0);
+  __ testb(temp.reg(), Immediate(kStringRepresentationMask));
+  __ j(not_zero, &not_a_flat_string);
+  // Check for 1-byte or 2-byte string.
+  __ testb(temp.reg(), Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii_string);
+
+  // 2-byte string.
+  // Load the 2-byte character code into the temp register.
+  __ movzxwl(temp.reg(), FieldOperand(object.reg(),
+                                      index.reg(),
+                                      times_2,
+                                      SeqTwoByteString::kHeaderSize));
+  __ jmp(&got_char_code);
+
+  // ASCII string.
+  __ bind(&ascii_string);
+  // Load the byte into the temp register.
+  __ movzxbl(temp.reg(), FieldOperand(object.reg(),
+                                      index.reg(),
+                                      times_1,
+                                      SeqAsciiString::kHeaderSize));
+  __ bind(&got_char_code);
+  __ Integer32ToSmi(temp.reg(), temp.reg());
+  __ jmp(&end);
+
+  // Handle non-flat strings.
+  __ bind(&not_a_flat_string);
+  __ and_(temp.reg(), Immediate(kStringRepresentationMask));
+  __ cmpb(temp.reg(), Immediate(kConsStringTag));
+  __ j(equal, &a_cons_string);
+  __ cmpb(temp.reg(), Immediate(kSlicedStringTag));
+  __ j(not_equal, &slow_case);
+
+  // SlicedString.
+  // Add the offset to the index and trigger the slow case on overflow.
+  __ addl(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
+  __ j(overflow, &slow_case);
+  // Getting the underlying string is done by running the cons string code.
+
+  // ConsString.
+  __ bind(&a_cons_string);
+  // Get the first of the two strings.  Both sliced and cons strings
+  // store their source string at the same offset.
+  ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+  __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
+  __ jmp(&try_again_with_new_string);
+
+  __ bind(&slow_case);
+  // Move the undefined value into the result register, which will
+  // trigger the slow case.
+  __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
+
+  __ bind(&end);
+  frame_->Push(&temp);
+}
+
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
+  value.Unuse();
+  destination()->Split(positive_smi);
+}
+
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  Condition is_smi = masm_->CheckSmi(value.reg());
+  value.Unuse();
+  destination()->Split(is_smi);
+}
+
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+  // Conditionally generate a log call.
+  // Args:
+  //   0 (literal string): The type of logging (corresponds to the flags).
+  //     This is used to determine whether or not to generate the log call.
+  //   1 (string): Format string.  Access the string at argument index 2
+  //     with '%2s' (see Logger::LogRuntime for all the formats).
+  //   2 (array): Arguments to the format string.
+  ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (ShouldGenerateLog(args->at(0))) {
+    Load(args->at(1));
+    Load(args->at(2));
+    frame_->CallRuntime(Runtime::kLog, 2);
+  }
+#endif
+  // Finally, we're expected to leave a value on the top of the stack.
+  frame_->Push(Factory::undefined_value());
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  Load(args->at(0));
+  Load(args->at(1));
+  Result right = frame_->Pop();
+  Result left = frame_->Pop();
+  right.ToRegister();
+  left.ToRegister();
+  __ cmpq(right.reg(), left.reg());
+  right.Unuse();
+  left.Unuse();
+  destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+  // RBP value is aligned, so it should be tagged as a smi (without necesarily
+  // being padded as a smi).
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  Result rbp_as_smi = allocator_->Allocate();
+  ASSERT(rbp_as_smi.is_valid());
+  __ movq(rbp_as_smi.reg(), rbp);
+  frame_->Push(&rbp_as_smi);
+}
+
+
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+  frame_->SpillAll();
+  __ push(rsi);
+
+  // Make sure the frame is aligned like the OS expects.
+  static const int kFrameAlignment = OS::ActivationFrameAlignment();
+  if (kFrameAlignment > 0) {
+    ASSERT(IsPowerOf2(kFrameAlignment));
+    __ movq(rbx, rsp);  // Save in AMD-64 abi callee-saved register.
+    __ and_(rsp, Immediate(-kFrameAlignment));
+  }
+
+  // Call V8::RandomPositiveSmi().
+  __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
+
+  // Restore stack pointer from callee-saved register.
+  if (kFrameAlignment > 0) {
+    __ movq(rsp, rbx);
+  }
+
+  __ pop(rsi);
+  Result result = allocator_->Allocate(rax);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
+  JumpTarget done;
+  JumpTarget call_runtime;
+  ASSERT(args->length() == 1);
+
+  // Load number and duplicate it.
+  Load(args->at(0));
+  frame_->Dup();
+
+  // Get the number into an unaliased register and load it onto the
+  // floating point stack still leaving one copy on the frame.
+  Result number = frame_->Pop();
+  number.ToRegister();
+  frame_->Spill(number.reg());
+  FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
+  number.Unuse();
+
+  // Perform the operation on the number.
+  switch (op) {
+    case SIN:
+      __ fsin();
+      break;
+    case COS:
+      __ fcos();
+      break;
+  }
+
+  // Go slow case if argument to operation is out of range.
+  Result eax_reg = allocator()->Allocate(rax);
+  ASSERT(eax_reg.is_valid());
+  __ fnstsw_ax();
+  __ testl(rax, Immediate(0x0400));  // Bit 10 is condition flag C2.
+  eax_reg.Unuse();
+  call_runtime.Branch(not_zero);
+
+  // Allocate heap number for result if possible.
+  Result scratch = allocator()->Allocate();
+  Result heap_number = allocator()->Allocate();
+  FloatingPointHelper::AllocateHeapNumber(masm_,
+                                          call_runtime.entry_label(),
+                                          scratch.reg(),
+                                          heap_number.reg());
+  scratch.Unuse();
+
+  // Store the result in the allocated heap number.
+  __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
+  // Replace the extra copy of the argument with the result.
+  frame_->SetElementAt(0, &heap_number);
+  done.Jump();
+
+  call_runtime.Bind();
+  // Free ST(0) which was not popped before calling into the runtime.
+  __ ffree(0);
+  Result answer;
+  switch (op) {
+    case SIN:
+      answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
+      break;
+    case COS:
+      answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
+      break;
+  }
+  frame_->Push(&answer);
+  done.Bind();
+}
+
+
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  JumpTarget leave, null, function, non_function_constructor;
+  Load(args->at(0));  // Load the object.
+  Result obj = frame_->Pop();
+  obj.ToRegister();
+  frame_->Spill(obj.reg());
+
+  // If the object is a smi, we return null.
+  Condition is_smi = masm_->CheckSmi(obj.reg());
+  null.Branch(is_smi);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+
+  __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
+  null.Branch(below);
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
+  function.Branch(equal);
+
+  // Check if the constructor in the map is a function.
+  __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
+  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
+  non_function_constructor.Branch(not_equal);
+
+  // The obj register now contains the constructor function. Grab the
+  // instance class name from there.
+  __ movq(obj.reg(),
+          FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
+  __ movq(obj.reg(),
+          FieldOperand(obj.reg(),
+                       SharedFunctionInfo::kInstanceClassNameOffset));
+  frame_->Push(&obj);
+  leave.Jump();
+
+  // Functions have class 'Function'.
+  function.Bind();
+  frame_->Push(Factory::function_class_symbol());
+  leave.Jump();
+
+  // Objects with a non-function constructor have class 'Object'.
+  non_function_constructor.Bind();
+  frame_->Push(Factory::Object_symbol());
+  leave.Jump();
+
+  // Non-JS objects have class null.
+  null.Bind();
+  frame_->Push(Factory::null_value());
+
+  // All done.
+  leave.Bind();
+}
+
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+  JumpTarget leave;
+  Load(args->at(0));  // Load the object.
+  Load(args->at(1));  // Load the value.
+  Result value = frame_->Pop();
+  Result object = frame_->Pop();
+  value.ToRegister();
+  object.ToRegister();
+
+  // if (object->IsSmi()) return value.
+  Condition is_smi = masm_->CheckSmi(object.reg());
+  leave.Branch(is_smi, &value);
+
+  // It is a heap object - get its map.
+  Result scratch = allocator_->Allocate();
+  ASSERT(scratch.is_valid());
+  // if (!object->IsJSValue()) return value.
+  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
+  leave.Branch(not_equal, &value);
+
+  // Store the value.
+  __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  Result duplicate_value = allocator_->Allocate();
+  ASSERT(duplicate_value.is_valid());
+  __ movq(duplicate_value.reg(), value.reg());
+  // The object register is also overwritten by the write barrier and
+  // possibly aliased in the frame.
+  frame_->Spill(object.reg());
+  __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
+                 scratch.reg());
+  object.Unuse();
+  scratch.Unuse();
+  duplicate_value.Unuse();
+
+  // Leave.
+  leave.Bind(&value);
+  frame_->Push(&value);
+}
+
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  JumpTarget leave;
+  Load(args->at(0));  // Load the object.
+  frame_->Dup();
+  Result object = frame_->Pop();
+  object.ToRegister();
+  ASSERT(object.is_valid());
+  // if (object->IsSmi()) return object.
+  Condition is_smi = masm_->CheckSmi(object.reg());
+  leave.Branch(is_smi);
+  // It is a heap object - get map.
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  // if (!object->IsJSValue()) return object.
+  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
+  leave.Branch(not_equal);
+  __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
+  object.Unuse();
+  frame_->SetElementAt(0, &temp);
+  leave.Bind();
+}
+
+
+// -----------------------------------------------------------------------------
+// CodeGenerator implementation of Expressions
+
+void CodeGenerator::LoadAndSpill(Expression* expression,
+                                 TypeofState typeof_state) {
+  // TODO(x64): No architecture specific code. Move to shared location.
+  ASSERT(in_spilled_code());
+  set_in_spilled_code(false);
+  Load(expression, typeof_state);
+  frame_->SpillAll();
+  set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  ASSERT(!in_spilled_code());
+  JumpTarget true_target;
+  JumpTarget false_target;
+  ControlDestination dest(&true_target, &false_target, true);
+  LoadCondition(x, typeof_state, &dest, false);
+
+  if (dest.false_was_fall_through()) {
+    // The false target was just bound.
+    JumpTarget loaded;
+    frame_->Push(Factory::false_value());
+    // There may be dangling jumps to the true target.
+    if (true_target.is_linked()) {
+      loaded.Jump();
+      true_target.Bind();
+      frame_->Push(Factory::true_value());
+      loaded.Bind();
+    }
+
+  } else if (dest.is_used()) {
+    // There is true, and possibly false, control flow (with true as
+    // the fall through).
+    JumpTarget loaded;
+    frame_->Push(Factory::true_value());
+    if (false_target.is_linked()) {
+      loaded.Jump();
+      false_target.Bind();
+      frame_->Push(Factory::false_value());
+      loaded.Bind();
+    }
+
+  } else {
+    // We have a valid value on top of the frame, but we still may
+    // have dangling jumps to the true and false targets from nested
+    // subexpressions (eg, the left subexpressions of the
+    // short-circuited boolean operators).
+    ASSERT(has_valid_frame());
+    if (true_target.is_linked() || false_target.is_linked()) {
+      JumpTarget loaded;
+      loaded.Jump();  // Don't lose the current TOS.
+      if (true_target.is_linked()) {
+        true_target.Bind();
+        frame_->Push(Factory::true_value());
+        if (false_target.is_linked()) {
+          loaded.Jump();
+        }
+      }
+      if (false_target.is_linked()) {
+        false_target.Bind();
+        frame_->Push(Factory::false_value());
+      }
+      loaded.Bind();
+    }
+  }
+
+  ASSERT(has_valid_frame());
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+// Emit code to load the value of an expression to the top of the
+// frame. If the expression is boolean-valued it may be compiled (or
+// partially compiled) into control flow to the control destination.
+// If force_control is true, control flow is forced.
+void CodeGenerator::LoadCondition(Expression* x,
+                                  TypeofState typeof_state,
+                                  ControlDestination* dest,
+                                  bool force_control) {
+  ASSERT(!in_spilled_code());
+  int original_height = frame_->height();
+
+  { CodeGenState new_state(this, typeof_state, dest);
+    Visit(x);
+
+    // If we hit a stack overflow, we may not have actually visited
+    // the expression.  In that case, we ensure that we have a
+    // valid-looking frame state because we will continue to generate
+    // code as we unwind the C++ stack.
+    //
+    // It's possible to have both a stack overflow and a valid frame
+    // state (eg, a subexpression overflowed, visiting it returned
+    // with a dummied frame state, and visiting this expression
+    // returned with a normal-looking state).
+    if (HasStackOverflow() &&
+        !dest->is_used() &&
+        frame_->height() == original_height) {
+      dest->Goto(true);
+    }
+  }
+
+  if (force_control && !dest->is_used()) {
+    // Convert the TOS value into flow to the control destination.
+    // TODO(X64): Make control flow to control destinations work.
+    ToBoolean(dest);
+  }
+
+  ASSERT(!(force_control && !dest->is_used()));
+  ASSERT(dest->is_used() || frame_->height() == original_height + 1);
+}
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+  ToBooleanStub() { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return ToBoolean; }
+  int MinorKey() { return 0; }
+};
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
+// convert it to a boolean in the condition code register or jump to
+// 'false_target'/'true_target' as appropriate.
+void CodeGenerator::ToBoolean(ControlDestination* dest) {
+  Comment cmnt(masm_, "[ ToBoolean");
+
+  // The value to convert should be popped from the frame.
+  Result value = frame_->Pop();
+  value.ToRegister();
+  // Fast case checks.
+
+  // 'false' => false.
+  __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
+  dest->false_target()->Branch(equal);
+
+  // 'true' => true.
+  __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
+  dest->true_target()->Branch(equal);
+
+  // 'undefined' => false.
+  __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
+  dest->false_target()->Branch(equal);
+
+  // Smi => false iff zero.
+  Condition equals = masm_->CheckSmiEqualsConstant(value.reg(), 0);
+  dest->false_target()->Branch(equals);
+  Condition is_smi = masm_->CheckSmi(value.reg());
+  dest->true_target()->Branch(is_smi);
+
+  // Call the stub for all other cases.
+  frame_->Push(&value);  // Undo the Pop() from above.
+  ToBooleanStub stub;
+  Result temp = frame_->CallStub(&stub, 1);
+  // Convert the result to a condition code.
+  __ testq(temp.reg(), temp.reg());
+  temp.Unuse();
+  dest->Split(not_equal);
+}
+
+
+void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
+  UNIMPLEMENTED();
+  // TODO(X64): Implement security policy for loads of smis.
+}
+
+
+bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
+  return false;
+}
+
+//------------------------------------------------------------------------------
+// CodeGenerator implementation of variables, lookups, and stores.
+
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
+    : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+  cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+  cgen_->UnloadReference(this);
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+  // References are loaded from both spilled and unspilled code.  Set the
+  // state to unspilled to allow that (and explicitly spill after
+  // construction at the construction sites).
+  bool was_in_spilled_code = in_spilled_code_;
+  in_spilled_code_ = false;
+
+  Comment cmnt(masm_, "[ LoadReference");
+  Expression* e = ref->expression();
+  Property* property = e->AsProperty();
+  Variable* var = e->AsVariableProxy()->AsVariable();
+
+  if (property != NULL) {
+    // The expression is either a property or a variable proxy that rewrites
+    // to a property.
+    Load(property->obj());
+    // We use a named reference if the key is a literal symbol, unless it is
+    // a string that can be legally parsed as an integer.  This is because
+    // otherwise we will not get into the slow case code that handles [] on
+    // String objects.
+    Literal* literal = property->key()->AsLiteral();
+    uint32_t dummy;
+    if (literal != NULL &&
+        literal->handle()->IsSymbol() &&
+        !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+      ref->set_type(Reference::NAMED);
+    } else {
+      Load(property->key());
+      ref->set_type(Reference::KEYED);
+    }
+  } else if (var != NULL) {
+    // The expression is a variable proxy that does not rewrite to a
+    // property.  Global variables are treated as named property references.
+    if (var->is_global()) {
+      LoadGlobal();
+      ref->set_type(Reference::NAMED);
+    } else {
+      ASSERT(var->slot() != NULL);
+      ref->set_type(Reference::SLOT);
+    }
+  } else {
+    // Anything else is a runtime error.
+    Load(e);
+    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+  }
+
+  in_spilled_code_ = was_in_spilled_code;
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+  // Pop a reference from the stack while preserving TOS.
+  Comment cmnt(masm_, "[ UnloadReference");
+  frame_->Nip(ref->size());
+}
+
+
+Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+  // Currently, this assertion will fail if we try to assign to
+  // a constant variable that is constant because it is read-only
+  // (such as the variable referring to a named function expression).
+  // We need to implement assignments to read-only variables.
+  // Ideally, we should do this during AST generation (by converting
+  // such assignments into expression statements); however, in general
+  // we may not be able to make the decision until past AST generation,
+  // that is when the entire program is known.
+  ASSERT(slot != NULL);
+  int index = slot->index();
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+      return frame_->ParameterAt(index);
+
+    case Slot::LOCAL:
+      return frame_->LocalAt(index);
+
+    case Slot::CONTEXT: {
+      // Follow the context chain if necessary.
+      ASSERT(!tmp.is(rsi));  // do not overwrite context register
+      Register context = rsi;
+      int chain_length = scope()->ContextChainLength(slot->var()->scope());
+      for (int i = 0; i < chain_length; i++) {
+        // Load the closure.
+        // (All contexts, even 'with' contexts, have a closure,
+        // and it is the same for all contexts inside a function.
+        // There is no need to go to the function context first.)
+        __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+        // Load the function context (which is the incoming, outer context).
+        __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+        context = tmp;
+      }
+      // We may have a 'with' context now. Get the function context.
+      // (In fact this mov may never be the needed, since the scope analysis
+      // may not permit a direct context access in this case and thus we are
+      // always at a function context. However it is safe to dereference be-
+      // cause the function context of a function context is itself. Before
+      // deleting this mov we should try to create a counter-example first,
+      // though...)
+      __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+      return ContextOperand(tmp, index);
+    }
+
+    default:
+      UNREACHABLE();
+      return Operand(rsp, 0);
+  }
+}
+
+
+Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
+                                                         Result tmp,
+                                                         JumpTarget* slow) {
+  ASSERT(slot->type() == Slot::CONTEXT);
+  ASSERT(tmp.is_register());
+  Register context = rsi;
+
+  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+                Immediate(0));
+        slow->Branch(not_equal, not_taken);
+      }
+      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+      context = tmp.reg();
+    }
+  }
+  // Check that last extension is NULL.
+  __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+  slow->Branch(not_equal, not_taken);
+  __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
+  return ContextOperand(tmp.reg(), slot->index());
+}
+
+
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+  if (slot->type() == Slot::LOOKUP) {
+    ASSERT(slot->var()->is_dynamic());
+
+    JumpTarget slow;
+    JumpTarget done;
+    Result value;
+
+    // Generate fast-case code for variables that might be shadowed by
+    // eval-introduced variables.  Eval is used a lot without
+    // introducing variables.  In those cases, we do not want to
+    // perform a runtime call for all variables in the scope
+    // containing the eval.
+    if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+      value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
+      // If there was no control flow to slow, we can exit early.
+      if (!slow.is_linked()) {
+        frame_->Push(&value);
+        return;
+      }
+
+      done.Jump(&value);
+
+    } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+      Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+      // Only generate the fast case for locals that rewrite to slots.
+      // This rules out argument loads.
+      if (potential_slot != NULL) {
+        // Allocate a fresh register to use as a temp in
+        // ContextSlotOperandCheckExtensions and to hold the result
+        // value.
+        value = allocator_->Allocate();
+        ASSERT(value.is_valid());
+        __ movq(value.reg(),
+               ContextSlotOperandCheckExtensions(potential_slot,
+                                                 value,
+                                                 &slow));
+        if (potential_slot->var()->mode() == Variable::CONST) {
+          __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+          done.Branch(not_equal, &value);
+          __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
+        }
+        // There is always control flow to slow from
+        // ContextSlotOperandCheckExtensions so we have to jump around
+        // it.
+        done.Jump(&value);
+      }
+    }
+
+    slow.Bind();
+    // A runtime call is inevitable.  We eagerly sync frame elements
+    // to memory so that we can push the arguments directly into place
+    // on top of the frame.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+    frame_->EmitPush(rsi);
+    __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
+    frame_->EmitPush(kScratchRegister);
+    if (typeof_state == INSIDE_TYPEOF) {
+       value =
+         frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    } else {
+       value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+    }
+
+    done.Bind(&value);
+    frame_->Push(&value);
+
+  } else if (slot->var()->mode() == Variable::CONST) {
+    // Const slots may contain 'the hole' value (the constant hasn't been
+    // initialized yet) which needs to be converted into the 'undefined'
+    // value.
+    //
+    // We currently spill the virtual frame because constants use the
+    // potentially unsafe direct-frame access of SlotOperand.
+    VirtualFrame::SpilledScope spilled_scope;
+    Comment cmnt(masm_, "[ Load const");
+    JumpTarget exit;
+    __ movq(rcx, SlotOperand(slot, rcx));
+    __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
+    exit.Branch(not_equal);
+    __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
+    exit.Bind();
+    frame_->EmitPush(rcx);
+
+  } else if (slot->type() == Slot::PARAMETER) {
+    frame_->PushParameterAt(slot->index());
+
+  } else if (slot->type() == Slot::LOCAL) {
+    frame_->PushLocalAt(slot->index());
+
+  } else {
+    // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
+    // here.
+    //
+    // The use of SlotOperand below is safe for an unspilled frame
+    // because it will always be a context slot.
+    ASSERT(slot->type() == Slot::CONTEXT);
+    Result temp = allocator_->Allocate();
+    ASSERT(temp.is_valid());
+    __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
+    frame_->Push(&temp);
+  }
+}
+
+
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+                                                  TypeofState state) {
+  LoadFromSlot(slot, state);
+
+  // Bail out quickly if we're not using lazy arguments allocation.
+  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+
+  // ... or if the slot isn't a non-parameter arguments slot.
+  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+
+  // Pop the loaded value from the stack.
+  Result value = frame_->Pop();
+
+  // If the loaded value is a constant, we know if the arguments
+  // object has been lazily loaded yet.
+  if (value.is_constant()) {
+    if (value.handle()->IsTheHole()) {
+      Result arguments = StoreArgumentsObject(false);
+      frame_->Push(&arguments);
+    } else {
+      frame_->Push(&value);
+    }
+    return;
+  }
+
+  // The loaded value is in a register. If it is the sentinel that
+  // indicates that we haven't loaded the arguments object yet, we
+  // need to do it now.
+  JumpTarget exit;
+  __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+  frame_->Push(&value);
+  exit.Branch(not_equal);
+  Result arguments = StoreArgumentsObject(false);
+  frame_->SetElementAt(0, &arguments);
+  exit.Bind();
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+  if (slot->type() == Slot::LOOKUP) {
+    ASSERT(slot->var()->is_dynamic());
+
+    // For now, just do a runtime call.  Since the call is inevitable,
+    // we eagerly sync the virtual frame so we can directly push the
+    // arguments into place.
+    frame_->SyncRange(0, frame_->element_count() - 1);
+
+    frame_->EmitPush(rsi);
+    frame_->EmitPush(slot->var()->name());
+
+    Result value;
+    if (init_state == CONST_INIT) {
+      // Same as the case for a normal store, but ignores attribute
+      // (e.g. READ_ONLY) of context slot so that we can initialize const
+      // properties (introduced via eval("const foo = (some expr);")). Also,
+      // uses the current function context instead of the top context.
+      //
+      // Note that we must declare the foo upon entry of eval(), via a
+      // context slot declaration, but we cannot initialize it at the same
+      // time, because the const declaration may be at the end of the eval
+      // code (sigh...) and the const variable may have been used before
+      // (where its value is 'undefined'). Thus, we can only do the
+      // initialization when we actually encounter the expression and when
+      // the expression operands are defined and valid, and thus we need the
+      // split into 2 operations: declaration of the context slot followed
+      // by initialization.
+      value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+    } else {
+      value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+    }
+    // Storing a variable must keep the (new) value on the expression
+    // stack. This is necessary for compiling chained assignment
+    // expressions.
+    frame_->Push(&value);
+  } else {
+    ASSERT(!slot->var()->is_dynamic());
+
+    JumpTarget exit;
+    if (init_state == CONST_INIT) {
+      ASSERT(slot->var()->mode() == Variable::CONST);
+      // Only the first const initialization must be executed (the slot
+      // still contains 'the hole' value). When the assignment is executed,
+      // the code is identical to a normal store (see below).
+      //
+      // We spill the frame in the code below because the direct-frame
+      // access of SlotOperand is potentially unsafe with an unspilled
+      // frame.
+      VirtualFrame::SpilledScope spilled_scope;
+      Comment cmnt(masm_, "[ Init const");
+      __ movq(rcx, SlotOperand(slot, rcx));
+      __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
+      exit.Branch(not_equal);
+    }
+
+    // We must execute the store.  Storing a variable must keep the (new)
+    // value on the stack. This is necessary for compiling assignment
+    // expressions.
+    //
+    // Note: We will reach here even with slot->var()->mode() ==
+    // Variable::CONST because of const declarations which will initialize
+    // consts to 'the hole' value and by doing so, end up calling this code.
+    if (slot->type() == Slot::PARAMETER) {
+      frame_->StoreToParameterAt(slot->index());
+    } else if (slot->type() == Slot::LOCAL) {
+      frame_->StoreToLocalAt(slot->index());
+    } else {
+      // The other slot types (LOOKUP and GLOBAL) cannot reach here.
+      //
+      // The use of SlotOperand below is safe for an unspilled frame
+      // because the slot is a context slot.
+      ASSERT(slot->type() == Slot::CONTEXT);
+      frame_->Dup();
+      Result value = frame_->Pop();
+      value.ToRegister();
+      Result start = allocator_->Allocate();
+      ASSERT(start.is_valid());
+      __ movq(SlotOperand(slot, start.reg()), value.reg());
+      // RecordWrite may destroy the value registers.
+      //
+      // TODO(204): Avoid actually spilling when the value is not
+      // needed (probably the common case).
+      frame_->Spill(value.reg());
+      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+      Result temp = allocator_->Allocate();
+      ASSERT(temp.is_valid());
+      __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
+      // The results start, value, and temp are unused by going out of
+      // scope.
+    }
+
+    exit.Bind();
+  }
+}
+
+
+Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
+    Slot* slot,
+    TypeofState typeof_state,
+    JumpTarget* slow) {
+  // Check that no extension objects have been created by calls to
+  // eval from the current scope to the global scope.
+  Register context = rsi;
+  Result tmp = allocator_->Allocate();
+  ASSERT(tmp.is_valid());  // All non-reserved registers were available.
+
+  Scope* s = scope();
+  while (s != NULL) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+               Immediate(0));
+        slow->Branch(not_equal, not_taken);
+      }
+      // Load next context in chain.
+      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+      context = tmp.reg();
+    }
+    // If no outer scope calls eval, we do not need to check more
+    // context extensions.  If we have reached an eval scope, we check
+    // all extensions from this point.
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    s = s->outer_scope();
+  }
+
+  if (s->is_eval_scope()) {
+    // Loop up the context chain.  There is no frame effect so it is
+    // safe to use raw labels here.
+    Label next, fast;
+    if (!context.is(tmp.reg())) {
+      __ movq(tmp.reg(), context);
+    }
+    // Load map for comparison into register, outside loop.
+    __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
+    __ bind(&next);
+    // Terminate at global context.
+    __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
+    __ j(equal, &fast);
+    // Check that extension is NULL.
+    __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
+    slow->Branch(not_equal);
+    // Load next context in chain.
+    __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
+    __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+    __ jmp(&next);
+    __ bind(&fast);
+  }
+  tmp.Unuse();
+
+  // All extension objects were empty and it is safe to use a global
+  // load IC call.
+  LoadGlobal();
+  frame_->Push(slot->var()->name());
+  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+                         ? RelocInfo::CODE_TARGET
+                         : RelocInfo::CODE_TARGET_CONTEXT;
+  Result answer = frame_->CallLoadIC(mode);
+  // A test rax instruction following the call signals that the inobject
+  // property case was inlined.  Ensure that there is not a test rax
+  // instruction here.
+  masm_->nop();
+  // Discard the global object. The result is in answer.
+  frame_->Drop();
+  return answer;
+}
+
+
+void CodeGenerator::LoadGlobal() {
+  if (in_spilled_code()) {
+    frame_->EmitPush(GlobalObject());
+  } else {
+    Result temp = allocator_->Allocate();
+    __ movq(temp.reg(), GlobalObject());
+    frame_->Push(&temp);
+  }
+}
+
+
+void CodeGenerator::LoadGlobalReceiver() {
+  Result temp = allocator_->Allocate();
+  Register reg = temp.reg();
+  __ movq(reg, GlobalObject());
+  __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
+  frame_->Push(&temp);
+}
+
+
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
+  if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+  ASSERT(scope_->arguments_shadow() != NULL);
+  // We don't want to do lazy arguments allocation for functions that
+  // have heap-allocated contexts, because it interfers with the
+  // uninitialized const tracking in the context objects.
+  return (scope_->num_heap_slots() > 0)
+      ? EAGER_ARGUMENTS_ALLOCATION
+      : LAZY_ARGUMENTS_ALLOCATION;
+}
+
+
+Result CodeGenerator::StoreArgumentsObject(bool initial) {
+  ArgumentsAllocationMode mode = ArgumentsMode();
+  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+
+  Comment cmnt(masm_, "[ store arguments object");
+  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
+    // When using lazy arguments allocation, we store the hole value
+    // as a sentinel indicating that the arguments object hasn't been
+    // allocated yet.
+    frame_->Push(Factory::the_hole_value());
+  } else {
+    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+    frame_->PushFunction();
+    frame_->PushReceiverSlotAddress();
+    frame_->Push(Smi::FromInt(scope_->num_parameters()));
+    Result result = frame_->CallStub(&stub, 3);
+    frame_->Push(&result);
+  }
+
+  { Reference shadow_ref(this, scope_->arguments_shadow());
+    Reference arguments_ref(this, scope_->arguments());
+    ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
+    // Here we rely on the convenient property that references to slot
+    // take up zero space in the frame (ie, it doesn't matter that the
+    // stored value is actually below the reference on the frame).
+    JumpTarget done;
+    bool skip_arguments = false;
+    if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+      // We have to skip storing into the arguments slot if it has
+      // already been written to. This can happen if the a function
+      // has a local variable named 'arguments'.
+      LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+      Result arguments = frame_->Pop();
+      if (arguments.is_constant()) {
+        // We have to skip updating the arguments object if it has
+        // been assigned a proper value.
+        skip_arguments = !arguments.handle()->IsTheHole();
+      } else {
+        __ CompareRoot(arguments.reg(), Heap::kTheHoleValueRootIndex);
+        arguments.Unuse();
+        done.Branch(not_equal);
+      }
+    }
+    if (!skip_arguments) {
+      arguments_ref.SetValue(NOT_CONST_INIT);
+      if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+    }
+    shadow_ref.SetValue(NOT_CONST_INIT);
+  }
+  return frame_->Pop();
+}
+
+
+// TODO(1241834): Get rid of this function in favor of just using Load, now
+// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
+// variables w/o reference errors elsewhere.
+void CodeGenerator::LoadTypeofExpression(Expression* x) {
+  Variable* variable = x->AsVariableProxy()->AsVariable();
+  if (variable != NULL && !variable->is_this() && variable->is_global()) {
+    // NOTE: This is somewhat nasty. We force the compiler to load
+    // the variable as if through '<global>.<variable>' to make sure we
+    // do not get reference errors.
+    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+    Literal key(variable->name());
+    // TODO(1241834): Fetch the position from the variable instead of using
+    // no position.
+    Property property(&global, &key, RelocInfo::kNoPosition);
+    Load(&property);
+  } else {
+    Load(x, INSIDE_TYPEOF);
+  }
+}
+
+
+void CodeGenerator::Comparison(Condition cc,
+                               bool strict,
+                               ControlDestination* dest) {
+  // Strict only makes sense for equality comparisons.
+  ASSERT(!strict || cc == equal);
+
+  Result left_side;
+  Result right_side;
+  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
+  if (cc == greater || cc == less_equal) {
+    cc = ReverseCondition(cc);
+    left_side = frame_->Pop();
+    right_side = frame_->Pop();
+  } else {
+    right_side = frame_->Pop();
+    left_side = frame_->Pop();
+  }
+  ASSERT(cc == less || cc == equal || cc == greater_equal);
+
+  // If either side is a constant smi, optimize the comparison.
+  bool left_side_constant_smi =
+      left_side.is_constant() && left_side.handle()->IsSmi();
+  bool right_side_constant_smi =
+      right_side.is_constant() && right_side.handle()->IsSmi();
+  bool left_side_constant_null =
+      left_side.is_constant() && left_side.handle()->IsNull();
+  bool right_side_constant_null =
+      right_side.is_constant() && right_side.handle()->IsNull();
+
+  if (left_side_constant_smi || right_side_constant_smi) {
+    if (left_side_constant_smi && right_side_constant_smi) {
+      // Trivial case, comparing two constants.
+      int left_value = Smi::cast(*left_side.handle())->value();
+      int right_value = Smi::cast(*right_side.handle())->value();
+      switch (cc) {
+        case less:
+          dest->Goto(left_value < right_value);
+          break;
+        case equal:
+          dest->Goto(left_value == right_value);
+          break;
+        case greater_equal:
+          dest->Goto(left_value >= right_value);
+          break;
+        default:
+          UNREACHABLE();
+      }
+    } else {  // Only one side is a constant Smi.
+      // If left side is a constant Smi, reverse the operands.
+      // Since one side is a constant Smi, conversion order does not matter.
+      if (left_side_constant_smi) {
+        Result temp = left_side;
+        left_side = right_side;
+        right_side = temp;
+        cc = ReverseCondition(cc);
+        // This may reintroduce greater or less_equal as the value of cc.
+        // CompareStub and the inline code both support all values of cc.
+      }
+      // Implement comparison against a constant Smi, inlining the case
+      // where both sides are Smis.
+      left_side.ToRegister();
+
+      // Here we split control flow to the stub call and inlined cases
+      // before finally splitting it to the control destination.  We use
+      // a jump target and branching to duplicate the virtual frame at
+      // the first split.  We manually handle the off-frame references
+      // by reconstituting them on the non-fall-through path.
+      JumpTarget is_smi;
+      Register left_reg = left_side.reg();
+      Handle<Object> right_val = right_side.handle();
+
+      Condition left_is_smi = masm_->CheckSmi(left_side.reg());
+      is_smi.Branch(left_is_smi);
+
+      // Setup and call the compare stub.
+      CompareStub stub(cc, strict);
+      Result result = frame_->CallStub(&stub, &left_side, &right_side);
+      result.ToRegister();
+      __ testq(result.reg(), result.reg());
+      result.Unuse();
+      dest->true_target()->Branch(cc);
+      dest->false_target()->Jump();
+
+      is_smi.Bind();
+      left_side = Result(left_reg);
+      right_side = Result(right_val);
+      // Test smi equality and comparison by signed int comparison.
+      // Both sides are smis, so we can use an Immediate.
+      __ cmpl(left_side.reg(), Immediate(Smi::cast(*right_side.handle())));
+      left_side.Unuse();
+      right_side.Unuse();
+      dest->Split(cc);
+    }
+  } else if (cc == equal &&
+             (left_side_constant_null || right_side_constant_null)) {
+    // To make null checks efficient, we check if either the left side or
+    // the right side is the constant 'null'.
+    // If so, we optimize the code by inlining a null check instead of
+    // calling the (very) general runtime routine for checking equality.
+    Result operand = left_side_constant_null ? right_side : left_side;
+    right_side.Unuse();
+    left_side.Unuse();
+    operand.ToRegister();
+    __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
+    if (strict) {
+      operand.Unuse();
+      dest->Split(equal);
+    } else {
+      // The 'null' value is only equal to 'undefined' if using non-strict
+      // comparisons.
+      dest->true_target()->Branch(equal);
+      __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
+      dest->true_target()->Branch(equal);
+      Condition is_smi = masm_->CheckSmi(operand.reg());
+      dest->false_target()->Branch(is_smi);
+
+      // It can be an undetectable object.
+      // Use a scratch register in preference to spilling operand.reg().
+      Result temp = allocator()->Allocate();
+      ASSERT(temp.is_valid());
+      __ movq(temp.reg(),
+             FieldOperand(operand.reg(), HeapObject::kMapOffset));
+      __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+               Immediate(1 << Map::kIsUndetectable));
+      temp.Unuse();
+      operand.Unuse();
+      dest->Split(not_zero);
+    }
+  } else {  // Neither side is a constant Smi or null.
+    // If either side is a non-smi constant, skip the smi check.
+    bool known_non_smi =
+        (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
+        (right_side.is_constant() && !right_side.handle()->IsSmi());
+    left_side.ToRegister();
+    right_side.ToRegister();
+
+    if (known_non_smi) {
+      // When non-smi, call out to the compare stub.
+      CompareStub stub(cc, strict);
+      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+      // The result is a Smi, which is negative, zero, or positive.
+      __ testl(answer.reg(), answer.reg());  // Both zero and sign flag right.
+      answer.Unuse();
+      dest->Split(cc);
+    } else {
+      // Here we split control flow to the stub call and inlined cases
+      // before finally splitting it to the control destination.  We use
+      // a jump target and branching to duplicate the virtual frame at
+      // the first split.  We manually handle the off-frame references
+      // by reconstituting them on the non-fall-through path.
+      JumpTarget is_smi;
+      Register left_reg = left_side.reg();
+      Register right_reg = right_side.reg();
+
+      Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
+      is_smi.Branch(both_smi);
+      // When non-smi, call out to the compare stub.
+      CompareStub stub(cc, strict);
+      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+      __ testl(answer.reg(), answer.reg());  // Sets both zero and sign flags.
+      answer.Unuse();
+      dest->true_target()->Branch(cc);
+      dest->false_target()->Jump();
+
+      is_smi.Bind();
+      left_side = Result(left_reg);
+      right_side = Result(right_reg);
+      __ cmpl(left_side.reg(), right_side.reg());
+      right_side.Unuse();
+      left_side.Unuse();
+      dest->Split(cc);
+    }
+  }
+}
+
+
+class DeferredInlineBinaryOperation: public DeferredCode {
+ public:
+  DeferredInlineBinaryOperation(Token::Value op,
+                                Register dst,
+                                Register left,
+                                Register right,
+                                OverwriteMode mode)
+      : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
+    set_comment("[ DeferredInlineBinaryOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Token::Value op_;
+  Register dst_;
+  Register left_;
+  Register right_;
+  OverwriteMode mode_;
+};
+
+
+void DeferredInlineBinaryOperation::Generate() {
+  __ push(left_);
+  __ push(right_);
+  GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
+  __ CallStub(&stub);
+  if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+void CodeGenerator::GenericBinaryOperation(Token::Value op,
+                                           SmiAnalysis* type,
+                                           OverwriteMode overwrite_mode) {
+  Comment cmnt(masm_, "[ BinaryOperation");
+  Comment cmnt_token(masm_, Token::String(op));
+
+  if (op == Token::COMMA) {
+    // Simply discard left value.
+    frame_->Nip(1);
+    return;
+  }
+
+  // Set the flags based on the operation, type and loop nesting level.
+  GenericBinaryFlags flags;
+  switch (op) {
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR:
+      // Bit operations always assume they likely operate on Smis. Still only
+      // generate the inline Smi check code if this operation is part of a loop.
+      flags = (loop_nesting() > 0)
+              ? SMI_CODE_INLINED
+              : SMI_CODE_IN_STUB;
+      break;
+
+    default:
+      // By default only inline the Smi check code for likely smis if this
+      // operation is part of a loop.
+      flags = ((loop_nesting() > 0) && type->IsLikelySmi())
+              ? SMI_CODE_INLINED
+              : SMI_CODE_IN_STUB;
+      break;
+  }
+
+  Result right = frame_->Pop();
+  Result left = frame_->Pop();
+
+  if (op == Token::ADD) {
+    bool left_is_string = left.is_constant() && left.handle()->IsString();
+    bool right_is_string = right.is_constant() && right.handle()->IsString();
+    if (left_is_string || right_is_string) {
+      frame_->Push(&left);
+      frame_->Push(&right);
+      Result answer;
+      if (left_is_string) {
+        if (right_is_string) {
+          // TODO(lrn): if both are constant strings
+          // -- do a compile time cons, if allocation during codegen is allowed.
+          answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+        } else {
+          answer =
+            frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
+        }
+      } else if (right_is_string) {
+        answer =
+          frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
+      }
+      frame_->Push(&answer);
+      return;
+    }
+    // Neither operand is known to be a string.
+  }
+
+  bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
+  bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
+  bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
+  bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+  bool generate_no_smi_code = false;  // No smi code at all, inline or in stub.
+
+  if (left_is_smi && right_is_smi) {
+    // Compute the constant result at compile time, and leave it on the frame.
+    int left_int = Smi::cast(*left.handle())->value();
+    int right_int = Smi::cast(*right.handle())->value();
+    if (FoldConstantSmis(op, left_int, right_int)) return;
+  }
+
+  if (left_is_non_smi || right_is_non_smi) {
+    // Set flag so that we go straight to the slow case, with no smi code.
+    generate_no_smi_code = true;
+  } else if (right_is_smi) {
+    ConstantSmiBinaryOperation(op, &left, right.handle(),
+                               type, false, overwrite_mode);
+    return;
+  } else if (left_is_smi) {
+    ConstantSmiBinaryOperation(op, &right, left.handle(),
+                               type, true, overwrite_mode);
+    return;
+  }
+
+  if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+    LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+  } else {
+    frame_->Push(&left);
+    frame_->Push(&right);
+    // If we know the arguments aren't smis, use the binary operation stub
+    // that does not check for the fast smi case.
+    // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
+    if (generate_no_smi_code) {
+      flags = SMI_CODE_INLINED;
+    }
+    GenericBinaryOpStub stub(op, overwrite_mode, flags);
+    Result answer = frame_->CallStub(&stub, 2);
+    frame_->Push(&answer);
+  }
+}
+
+
+// Emit a LoadIC call to get the value from receiver and leave it in
+// dst.  The receiver register is restored after the call.
+class DeferredReferenceGetNamedValue: public DeferredCode {
+ public:
+  DeferredReferenceGetNamedValue(Register dst,
+                                 Register receiver,
+                                 Handle<String> name)
+      : dst_(dst), receiver_(receiver),  name_(name) {
+    set_comment("[ DeferredReferenceGetNamedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Label patch_site_;
+  Register dst_;
+  Register receiver_;
+  Handle<String> name_;
+};
+
+
+void DeferredReferenceGetNamedValue::Generate() {
+  __ push(receiver_);
+  __ Move(rcx, name_);
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  __ Call(ic, RelocInfo::CODE_TARGET);
+  // The call must be followed by a test rax instruction to indicate
+  // that the inobject property case was inlined.
+  //
+  // Store the delta to the map check instruction here in the test
+  // instruction.  Use masm_-> instead of the __ macro since the
+  // latter can't return a value.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->testl(rax, Immediate(-delta_to_patch_site));
+  __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+
+  if (!dst_.is(rax)) __ movq(dst_, rax);
+  __ pop(receiver_);
+}
+
+
+void DeferredInlineSmiAdd::Generate() {
+  __ push(dst_);
+  __ push(Immediate(value_));
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+void DeferredInlineSmiAddReversed::Generate() {
+  __ push(Immediate(value_));  // Note: sign extended.
+  __ push(dst_);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+void DeferredInlineSmiSub::Generate() {
+  __ push(dst_);
+  __ push(Immediate(value_));  // Note: sign extended.
+  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+void DeferredInlineSmiOperation::Generate() {
+  __ push(src_);
+  __ push(Immediate(value_));  // Note: sign extended.
+  // For mod we don't generate all the Smi code inline.
+  GenericBinaryOpStub stub(
+      op_,
+      overwrite_mode_,
+      (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
+  __ CallStub(&stub);
+  if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+                                               Result* operand,
+                                               Handle<Object> value,
+                                               SmiAnalysis* type,
+                                               bool reversed,
+                                               OverwriteMode overwrite_mode) {
+  // NOTE: This is an attempt to inline (a bit) more of the code for
+  // some possible smi operations (like + and -) when (at least) one
+  // of the operands is a constant smi.
+  // Consumes the argument "operand".
+
+  // TODO(199): Optimize some special cases of operations involving a
+  // smi literal (multiply by 2, shift by 0, etc.).
+  if (IsUnsafeSmi(value)) {
+    Result unsafe_operand(value);
+    if (reversed) {
+      LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+                               overwrite_mode);
+    } else {
+      LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+                               overwrite_mode);
+    }
+    ASSERT(!operand->is_valid());
+    return;
+  }
+
+  // Get the literal value.
+  Smi* smi_value = Smi::cast(*value);
+  int int_value = smi_value->value();
+
+  switch (op) {
+    case Token::ADD: {
+      operand->ToRegister();
+      frame_->Spill(operand->reg());
+      DeferredCode* deferred = NULL;
+      if (reversed) {
+        deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+                                                    smi_value,
+                                                    overwrite_mode);
+      } else {
+        deferred = new DeferredInlineSmiAdd(operand->reg(),
+                                            smi_value,
+                                            overwrite_mode);
+      }
+      __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+      __ SmiAddConstant(operand->reg(),
+                        operand->reg(),
+                        int_value,
+                        deferred->entry_label());
+      deferred->BindExit();
+      frame_->Push(operand);
+      break;
+    }
+
+    case Token::SUB: {
+      if (reversed) {
+        Result constant_operand(value);
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        operand->ToRegister();
+        frame_->Spill(operand->reg());
+        DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
+                                                          smi_value,
+                                                          overwrite_mode);
+        __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+        // A smi currently fits in a 32-bit Immediate.
+        __ SmiSubConstant(operand->reg(),
+                          operand->reg(),
+                          int_value,
+                          deferred->entry_label());
+        deferred->BindExit();
+        frame_->Push(operand);
+      }
+      break;
+    }
+
+    case Token::SAR:
+      if (reversed) {
+        Result constant_operand(value);
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
+        operand->ToRegister();
+        frame_->Spill(operand->reg());
+        DeferredInlineSmiOperation* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           operand->reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
+        __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+        __ SmiShiftArithmeticRightConstant(operand->reg(),
+                                           operand->reg(),
+                                           shift_value);
+        deferred->BindExit();
+        frame_->Push(operand);
+      }
+      break;
+
+    case Token::SHR:
+      if (reversed) {
+        Result constant_operand(value);
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
+        operand->ToRegister();
+        Result answer = allocator()->Allocate();
+        ASSERT(answer.is_valid());
+        DeferredInlineSmiOperation* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           answer.reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
+        __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+        __ SmiShiftLogicalRightConstant(answer.reg(),
+                                      operand->reg(),
+                                      shift_value,
+                                      deferred->entry_label());
+        deferred->BindExit();
+        operand->Unuse();
+        frame_->Push(&answer);
+      }
+      break;
+
+    case Token::SHL:
+      if (reversed) {
+        Result constant_operand(value);
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
+        operand->ToRegister();
+        if (shift_value == 0) {
+          // Spill operand so it can be overwritten in the slow case.
+          frame_->Spill(operand->reg());
+          DeferredInlineSmiOperation* deferred =
+              new DeferredInlineSmiOperation(op,
+                                             operand->reg(),
+                                             operand->reg(),
+                                             smi_value,
+                                             overwrite_mode);
+          __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+          deferred->BindExit();
+          frame_->Push(operand);
+        } else {
+          // Use a fresh temporary for nonzero shift values.
+          Result answer = allocator()->Allocate();
+          ASSERT(answer.is_valid());
+          DeferredInlineSmiOperation* deferred =
+              new DeferredInlineSmiOperation(op,
+                                             answer.reg(),
+                                             operand->reg(),
+                                             smi_value,
+                                             overwrite_mode);
+          __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+          __ SmiShiftLeftConstant(answer.reg(),
+                                  operand->reg(),
+                                  shift_value,
+                                  deferred->entry_label());
+          deferred->BindExit();
+          operand->Unuse();
+          frame_->Push(&answer);
+        }
+      }
+      break;
+
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND: {
+      operand->ToRegister();
+      frame_->Spill(operand->reg());
+      if (reversed) {
+        // Bit operations with a constant smi are commutative.
+        // We can swap left and right operands with no problem.
+        // Swap left and right overwrite modes.  0->0, 1->2, 2->1.
+        overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
+      }
+      DeferredCode* deferred =  new DeferredInlineSmiOperation(op,
+                                                               operand->reg(),
+                                                               operand->reg(),
+                                                               smi_value,
+                                                               overwrite_mode);
+      __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+      if (op == Token::BIT_AND) {
+        __ SmiAndConstant(operand->reg(), operand->reg(), int_value);
+      } else if (op == Token::BIT_XOR) {
+        if (int_value != 0) {
+          __ SmiXorConstant(operand->reg(), operand->reg(), int_value);
+        }
+      } else {
+        ASSERT(op == Token::BIT_OR);
+        if (int_value != 0) {
+          __ SmiOrConstant(operand->reg(), operand->reg(), int_value);
+        }
+      }
+      deferred->BindExit();
+      frame_->Push(operand);
+      break;
+    }
+
+    // Generate inline code for mod of powers of 2 and negative powers of 2.
+    case Token::MOD:
+      if (!reversed &&
+          int_value != 0 &&
+          (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
+        operand->ToRegister();
+        frame_->Spill(operand->reg());
+        DeferredCode* deferred = new DeferredInlineSmiOperation(op,
+                                                                operand->reg(),
+                                                                operand->reg(),
+                                                                smi_value,
+                                                                overwrite_mode);
+        // Check for negative or non-Smi left hand side.
+        __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
+        if (int_value < 0) int_value = -int_value;
+        if (int_value == 1) {
+          __ movl(operand->reg(), Immediate(Smi::FromInt(0)));
+        } else {
+          __ SmiAndConstant(operand->reg(), operand->reg(), int_value - 1);
+        }
+        deferred->BindExit();
+        frame_->Push(operand);
+        break;  // This break only applies if we generated code for MOD.
+      }
+      // Fall through if we did not find a power of 2 on the right hand side!
+      // The next case must be the default.
+
+    default: {
+      Result constant_operand(value);
+      if (reversed) {
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        LikelySmiBinaryOperation(op, operand, &constant_operand,
+                                 overwrite_mode);
+      }
+      break;
+    }
+  }
+  ASSERT(!operand->is_valid());
+}
+
+void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+                                             Result* left,
+                                             Result* right,
+                                             OverwriteMode overwrite_mode) {
+  // Special handling of div and mod because they use fixed registers.
+  if (op == Token::DIV || op == Token::MOD) {
+    // We need rax as the quotient register, rdx as the remainder
+    // register, neither left nor right in rax or rdx, and left copied
+    // to rax.
+    Result quotient;
+    Result remainder;
+    bool left_is_in_rax = false;
+    // Step 1: get rax for quotient.
+    if ((left->is_register() && left->reg().is(rax)) ||
+        (right->is_register() && right->reg().is(rax))) {
+      // One or both is in rax.  Use a fresh non-rdx register for
+      // them.
+      Result fresh = allocator_->Allocate();
+      ASSERT(fresh.is_valid());
+      if (fresh.reg().is(rdx)) {
+        remainder = fresh;
+        fresh = allocator_->Allocate();
+        ASSERT(fresh.is_valid());
+      }
+      if (left->is_register() && left->reg().is(rax)) {
+        quotient = *left;
+        *left = fresh;
+        left_is_in_rax = true;
+      }
+      if (right->is_register() && right->reg().is(rax)) {
+        quotient = *right;
+        *right = fresh;
+      }
+      __ movq(fresh.reg(), rax);
+    } else {
+      // Neither left nor right is in rax.
+      quotient = allocator_->Allocate(rax);
+    }
+    ASSERT(quotient.is_register() && quotient.reg().is(rax));
+    ASSERT(!(left->is_register() && left->reg().is(rax)));
+    ASSERT(!(right->is_register() && right->reg().is(rax)));
+
+    // Step 2: get rdx for remainder if necessary.
+    if (!remainder.is_valid()) {
+      if ((left->is_register() && left->reg().is(rdx)) ||
+          (right->is_register() && right->reg().is(rdx))) {
+        Result fresh = allocator_->Allocate();
+        ASSERT(fresh.is_valid());
+        if (left->is_register() && left->reg().is(rdx)) {
+          remainder = *left;
+          *left = fresh;
+        }
+        if (right->is_register() && right->reg().is(rdx)) {
+          remainder = *right;
+          *right = fresh;
+        }
+        __ movq(fresh.reg(), rdx);
+      } else {
+        // Neither left nor right is in rdx.
+        remainder = allocator_->Allocate(rdx);
+      }
+    }
+    ASSERT(remainder.is_register() && remainder.reg().is(rdx));
+    ASSERT(!(left->is_register() && left->reg().is(rdx)));
+    ASSERT(!(right->is_register() && right->reg().is(rdx)));
+
+    left->ToRegister();
+    right->ToRegister();
+    frame_->Spill(rax);
+    frame_->Spill(rdx);
+
+    // Check that left and right are smi tagged.
+    DeferredInlineBinaryOperation* deferred =
+        new DeferredInlineBinaryOperation(op,
+                                          (op == Token::DIV) ? rax : rdx,
+                                          left->reg(),
+                                          right->reg(),
+                                          overwrite_mode);
+    __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
+
+    if (op == Token::DIV) {
+      __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
+      deferred->BindExit();
+      left->Unuse();
+      right->Unuse();
+      frame_->Push(&quotient);
+    } else {
+      ASSERT(op == Token::MOD);
+      __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
+      deferred->BindExit();
+      left->Unuse();
+      right->Unuse();
+      frame_->Push(&remainder);
+    }
+    return;
+  }
+
+  // Special handling of shift operations because they use fixed
+  // registers.
+  if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
+    // Move left out of rcx if necessary.
+    if (left->is_register() && left->reg().is(rcx)) {
+      *left = allocator_->Allocate();
+      ASSERT(left->is_valid());
+      __ movq(left->reg(), rcx);
+    }
+    right->ToRegister(rcx);
+    left->ToRegister();
+    ASSERT(left->is_register() && !left->reg().is(rcx));
+    ASSERT(right->is_register() && right->reg().is(rcx));
+
+    // We will modify right, it must be spilled.
+    frame_->Spill(rcx);
+
+    // Use a fresh answer register to avoid spilling the left operand.
+    Result answer = allocator_->Allocate();
+    ASSERT(answer.is_valid());
+    // Check that both operands are smis using the answer register as a
+    // temporary.
+    DeferredInlineBinaryOperation* deferred =
+        new DeferredInlineBinaryOperation(op,
+                                          answer.reg(),
+                                          left->reg(),
+                                          rcx,
+                                          overwrite_mode);
+    __ movq(answer.reg(), left->reg());
+    __ or_(answer.reg(), rcx);
+    __ JumpIfNotSmi(answer.reg(), deferred->entry_label());
+
+    // Perform the operation.
+    switch (op) {
+      case Token::SAR:
+        __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
+        break;
+      case Token::SHR: {
+        __ SmiShiftLogicalRight(answer.reg(),
+                              left->reg(),
+                              rcx,
+                              deferred->entry_label());
+        break;
+      }
+      case Token::SHL: {
+        __ SmiShiftLeft(answer.reg(),
+                        left->reg(),
+                        rcx,
+                        deferred->entry_label());
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+    deferred->BindExit();
+    left->Unuse();
+    right->Unuse();
+    frame_->Push(&answer);
+    return;
+  }
+
+  // Handle the other binary operations.
+  left->ToRegister();
+  right->ToRegister();
+  // A newly allocated register answer is used to hold the answer.  The
+  // registers containing left and right are not modified so they don't
+  // need to be spilled in the fast case.
+  Result answer = allocator_->Allocate();
+  ASSERT(answer.is_valid());
+
+  // Perform the smi tag check.
+  DeferredInlineBinaryOperation* deferred =
+      new DeferredInlineBinaryOperation(op,
+                                        answer.reg(),
+                                        left->reg(),
+                                        right->reg(),
+                                        overwrite_mode);
+  __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
+
+  switch (op) {
+    case Token::ADD:
+      __ SmiAdd(answer.reg(),
+                left->reg(),
+                right->reg(),
+                deferred->entry_label());
+      break;
+
+    case Token::SUB:
+      __ SmiSub(answer.reg(),
+                left->reg(),
+                right->reg(),
+                deferred->entry_label());
+      break;
+
+    case Token::MUL: {
+      __ SmiMul(answer.reg(),
+                left->reg(),
+                right->reg(),
+                deferred->entry_label());
+      break;
+    }
+
+    case Token::BIT_OR:
+      __ SmiOr(answer.reg(), left->reg(), right->reg());
+      break;
+
+    case Token::BIT_AND:
+      __ SmiAnd(answer.reg(), left->reg(), right->reg());
+      break;
+
+    case Token::BIT_XOR:
+      __ SmiXor(answer.reg(), left->reg(), right->reg());
+      break;
+
+    default:
+      UNREACHABLE();
+      break;
+  }
+  deferred->BindExit();
+  left->Unuse();
+  right->Unuse();
+  frame_->Push(&answer);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+Handle<String> Reference::GetName() {
+  ASSERT(type_ == NAMED);
+  Property* property = expression_->AsProperty();
+  if (property == NULL) {
+    // Global variable reference treated as a named property reference.
+    VariableProxy* proxy = expression_->AsVariableProxy();
+    ASSERT(proxy->AsVariable() != NULL);
+    ASSERT(proxy->AsVariable()->is_global());
+    return proxy->name();
+  } else {
+    Literal* raw_name = property->key()->AsLiteral();
+    ASSERT(raw_name != NULL);
+    return Handle<String>(String::cast(*raw_name->handle()));
+  }
+}
+
+
+void Reference::GetValue(TypeofState typeof_state) {
+  ASSERT(!cgen_->in_spilled_code());
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_illegal());
+  MacroAssembler* masm = cgen_->masm();
+
+  // Record the source position for the property load.
+  Property* property = expression_->AsProperty();
+  if (property != NULL) {
+    cgen_->CodeForSourcePosition(property->position());
+  }
+
+  switch (type_) {
+    case SLOT: {
+      Comment cmnt(masm, "[ Load from Slot");
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      ASSERT(slot != NULL);
+      cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
+      break;
+    }
+
+    case NAMED: {
+      // TODO(1241834): Make sure that it is safe to ignore the
+      // distinction between expressions in a typeof and not in a
+      // typeof. If there is a chance that reference errors can be
+      // thrown below, we must distinguish between the two kinds of
+      // loads (typeof expression loads must not throw a reference
+      // error).
+      Variable* var = expression_->AsVariableProxy()->AsVariable();
+      bool is_global = var != NULL;
+      ASSERT(!is_global || var->is_global());
+
+      // Do not inline the inobject property case for loads from the global
+      // object.  Also do not inline for unoptimized code.  This saves time
+      // in the code generator.  Unoptimized code is toplevel code or code
+      // that is not in a loop.
+      if (is_global ||
+          cgen_->scope()->is_global_scope() ||
+          cgen_->loop_nesting() == 0) {
+        Comment cmnt(masm, "[ Load from named Property");
+        cgen_->frame()->Push(GetName());
+
+        RelocInfo::Mode mode = is_global
+                               ? RelocInfo::CODE_TARGET_CONTEXT
+                               : RelocInfo::CODE_TARGET;
+        Result answer = cgen_->frame()->CallLoadIC(mode);
+        // A test rax instruction following the call signals that the
+        // inobject property case was inlined.  Ensure that there is not
+        // a test rax instruction here.
+        __ nop();
+        cgen_->frame()->Push(&answer);
+      } else {
+        // Inline the inobject property case.
+        Comment cmnt(masm, "[ Inlined named property load");
+        Result receiver = cgen_->frame()->Pop();
+        receiver.ToRegister();
+        Result value = cgen_->allocator()->Allocate();
+        ASSERT(value.is_valid());
+        // Cannot use r12 for receiver, because that changes
+        // the distance between a call and a fixup location,
+        // due to a special encoding of r12 as r/m in a ModR/M byte.
+        if (receiver.reg().is(r12)) {
+          // Swap receiver and value.
+          __ movq(value.reg(), receiver.reg());
+          Result temp = receiver;
+          receiver = value;
+          value = temp;
+          cgen_->frame()->Spill(value.reg());  // r12 may have been shared.
+        }
+
+        DeferredReferenceGetNamedValue* deferred =
+            new DeferredReferenceGetNamedValue(value.reg(),
+                                               receiver.reg(),
+                                               GetName());
+
+        // Check that the receiver is a heap object.
+        __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+
+        __ bind(deferred->patch_site());
+        // This is the map check instruction that will be patched (so we can't
+        // use the double underscore macro that may insert instructions).
+        // Initially use an invalid map to force a failure.
+        masm->Move(kScratchRegister, Factory::null_value());
+        masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                   kScratchRegister);
+        // This branch is always a forwards branch so it's always a fixed
+        // size which allows the assert below to succeed and patching to work.
+        // Don't use deferred->Branch(...), since that might add coverage code.
+        masm->j(not_equal, deferred->entry_label());
+
+        // The delta from the patch label to the load offset must be
+        // statically known.
+        ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
+               LoadIC::kOffsetToLoadInstruction);
+        // The initial (invalid) offset has to be large enough to force
+        // a 32-bit instruction encoding to allow patching with an
+        // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
+        int offset = kMaxInt;
+        masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
+
+        __ IncrementCounter(&Counters::named_load_inline, 1);
+        deferred->BindExit();
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&value);
+      }
+      break;
+    }
+
+    case KEYED: {
+      // TODO(1241834): Make sure that this it is safe to ignore the
+      // distinction between expressions in a typeof and not in a typeof.
+      Comment cmnt(masm, "[ Load from keyed Property");
+      Variable* var = expression_->AsVariableProxy()->AsVariable();
+      bool is_global = var != NULL;
+      ASSERT(!is_global || var->is_global());
+
+      // Inline array load code if inside of a loop.  We do not know
+      // the receiver map yet, so we initially generate the code with
+      // a check against an invalid map.  In the inline cache code, we
+      // patch the map check if appropriate.
+      if (cgen_->loop_nesting() > 0) {
+        Comment cmnt(masm, "[ Inlined load from keyed Property");
+
+        Result key = cgen_->frame()->Pop();
+        Result receiver = cgen_->frame()->Pop();
+        key.ToRegister();
+        receiver.ToRegister();
+
+        // Use a fresh temporary to load the elements without destroying
+        // the receiver which is needed for the deferred slow case.
+        Result elements = cgen_->allocator()->Allocate();
+        ASSERT(elements.is_valid());
+
+        // Use a fresh temporary for the index and later the loaded
+        // value.
+        Result index = cgen_->allocator()->Allocate();
+        ASSERT(index.is_valid());
+
+        DeferredReferenceGetKeyedValue* deferred =
+            new DeferredReferenceGetKeyedValue(index.reg(),
+                                               receiver.reg(),
+                                               key.reg(),
+                                               is_global);
+
+        // Check that the receiver is not a smi (only needed if this
+        // is not a load from the global context) and that it has the
+        // expected map.
+        if (!is_global) {
+          __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+        }
+
+        // Initially, use an invalid map. The map is patched in the IC
+        // initialization code.
+        __ bind(deferred->patch_site());
+        // Use masm-> here instead of the double underscore macro since extra
+        // coverage code can interfere with the patching.
+        masm->movq(kScratchRegister, Factory::null_value(),
+                   RelocInfo::EMBEDDED_OBJECT);
+        masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                   kScratchRegister);
+        deferred->Branch(not_equal);
+
+        // Check that the key is a non-negative smi.
+        __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
+
+        // Get the elements array from the receiver and check that it
+        // is not a dictionary.
+        __ movq(elements.reg(),
+                FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+        __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+               Factory::fixed_array_map());
+        deferred->Branch(not_equal);
+
+        // Shift the key to get the actual index value and check that
+        // it is within bounds.
+        __ SmiToInteger32(index.reg(), key.reg());
+        __ cmpl(index.reg(),
+                FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+        deferred->Branch(above_equal);
+
+        // The index register holds the un-smi-tagged key. It has been
+        // zero-extended to 64-bits, so it can be used directly as index in the
+        // operand below.
+        // Load and check that the result is not the hole.  We could
+        // reuse the index or elements register for the value.
+        //
+        // TODO(206): Consider whether it makes sense to try some
+        // heuristic about which register to reuse.  For example, if
+        // one is rax, the we can reuse that one because the value
+        // coming from the deferred code will be in rax.
+        Result value = index;
+        __ movq(value.reg(),
+                Operand(elements.reg(),
+                        index.reg(),
+                        times_pointer_size,
+                        FixedArray::kHeaderSize - kHeapObjectTag));
+        elements.Unuse();
+        index.Unuse();
+        __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+        deferred->Branch(equal);
+        __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+        deferred->BindExit();
+        // Restore the receiver and key to the frame and push the
+        // result on top of it.
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&key);
+        cgen_->frame()->Push(&value);
+
+      } else {
+        Comment cmnt(masm, "[ Load from keyed Property");
+        RelocInfo::Mode mode = is_global
+                               ? RelocInfo::CODE_TARGET_CONTEXT
+                               : RelocInfo::CODE_TARGET;
+        Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
+        // Make sure that we do not have a test instruction after the
+        // call.  A test instruction after the call is used to
+        // indicate that we have generated an inline version of the
+        // keyed load.  The explicit nop instruction is here because
+        // the push that follows might be peep-hole optimized away.
+        __ nop();
+        cgen_->frame()->Push(&answer);
+      }
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void Reference::TakeValue(TypeofState typeof_state) {
+  // TODO(X64): This function is completely architecture independent. Move
+  // it somewhere shared.
+
+  // For non-constant frame-allocated slots, we invalidate the value in the
+  // slot.  For all others, we fall back on GetValue.
+  ASSERT(!cgen_->in_spilled_code());
+  ASSERT(!is_illegal());
+  if (type_ != SLOT) {
+    GetValue(typeof_state);
+    return;
+  }
+
+  Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+  ASSERT(slot != NULL);
+  if (slot->type() == Slot::LOOKUP ||
+      slot->type() == Slot::CONTEXT ||
+      slot->var()->mode() == Variable::CONST ||
+      slot->is_arguments()) {
+    GetValue(typeof_state);
+    return;
+  }
+
+  // Only non-constant, frame-allocated parameters and locals can reach
+  // here.  Be careful not to use the optimizations for arguments
+  // object access since it may not have been initialized yet.
+  ASSERT(!slot->is_arguments());
+  if (slot->type() == Slot::PARAMETER) {
+    cgen_->frame()->TakeParameterAt(slot->index());
+  } else {
+    ASSERT(slot->type() == Slot::LOCAL);
+    cgen_->frame()->TakeLocalAt(slot->index());
+  }
+}
+
+
+void Reference::SetValue(InitState init_state) {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_illegal());
+  MacroAssembler* masm = cgen_->masm();
+  switch (type_) {
+    case SLOT: {
+      Comment cmnt(masm, "[ Store to Slot");
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      ASSERT(slot != NULL);
+      cgen_->StoreToSlot(slot, init_state);
+      break;
+    }
+
+    case NAMED: {
+      Comment cmnt(masm, "[ Store to named Property");
+      cgen_->frame()->Push(GetName());
+      Result answer = cgen_->frame()->CallStoreIC();
+      cgen_->frame()->Push(&answer);
+      break;
+    }
+
+    case KEYED: {
+      Comment cmnt(masm, "[ Store to keyed Property");
+
+      // Generate inlined version of the keyed store if the code is in
+      // a loop and the key is likely to be a smi.
+      Property* property = expression()->AsProperty();
+      ASSERT(property != NULL);
+      SmiAnalysis* key_smi_analysis = property->key()->type();
+
+      if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
+        Comment cmnt(masm, "[ Inlined store to keyed Property");
+
+        // Get the receiver, key and value into registers.
+        Result value = cgen_->frame()->Pop();
+        Result key = cgen_->frame()->Pop();
+        Result receiver = cgen_->frame()->Pop();
+
+        Result tmp = cgen_->allocator_->Allocate();
+        ASSERT(tmp.is_valid());
+
+        // Determine whether the value is a constant before putting it
+        // in a register.
+        bool value_is_constant = value.is_constant();
+
+        // Make sure that value, key and receiver are in registers.
+        value.ToRegister();
+        key.ToRegister();
+        receiver.ToRegister();
+
+        DeferredReferenceSetKeyedValue* deferred =
+            new DeferredReferenceSetKeyedValue(value.reg(),
+                                               key.reg(),
+                                               receiver.reg());
+
+        // Check that the value is a smi if it is not a constant.
+        // We can skip the write barrier for smis and constants.
+        if (!value_is_constant) {
+          __ JumpIfNotSmi(value.reg(), deferred->entry_label());
+        }
+
+        // Check that the key is a non-negative smi.
+        __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
+        // Ensure that the smi is zero-extended.  This is not guaranteed.
+        __ movl(key.reg(), key.reg());
+
+        // Check that the receiver is not a smi.
+        __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+
+        // Check that the receiver is a JSArray.
+        __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
+        deferred->Branch(not_equal);
+
+        // Check that the key is within bounds.  Both the key and the
+        // length of the JSArray are smis, so compare only low 32 bits.
+        __ cmpl(key.reg(),
+                FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+        deferred->Branch(greater_equal);
+
+        // Get the elements array from the receiver and check that it
+        // is a flat array (not a dictionary).
+        __ movq(tmp.reg(),
+                FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+        // Bind the deferred code patch site to be able to locate the
+        // fixed array map comparison.  When debugging, we patch this
+        // comparison to always fail so that we will hit the IC call
+        // in the deferred code which will allow the debugger to
+        // break for fast case stores.
+        __ bind(deferred->patch_site());
+        // Avoid using __ to ensure the distance from patch_site
+        // to the map address is always the same.
+        masm->movq(kScratchRegister, Factory::fixed_array_map(),
+                   RelocInfo::EMBEDDED_OBJECT);
+        __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+                kScratchRegister);
+        deferred->Branch(not_equal);
+
+        // Store the value.
+        SmiIndex index =
+            masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
+              __ movq(Operand(tmp.reg(),
+                        index.reg,
+                        index.scale,
+                        FixedArray::kHeaderSize - kHeapObjectTag),
+                value.reg());
+        __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+        deferred->BindExit();
+
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&key);
+        cgen_->frame()->Push(&value);
+      } else {
+        Result answer = cgen_->frame()->CallKeyedStoreIC();
+        // Make sure that we do not have a test instruction after the
+        // call.  A test instruction after the call is used to
+        // indicate that we have generated an inline version of the
+        // keyed store.
+        masm->nop();
+        cgen_->frame()->Push(&answer);
+      }
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+  Label false_result, true_result, not_string;
+  __ movq(rax, Operand(rsp, 1 * kPointerSize));
+
+  // 'null' => false.
+  __ CompareRoot(rax, Heap::kNullValueRootIndex);
+  __ j(equal, &false_result);
+
+  // Get the map and type of the heap object.
+  // We don't use CmpObjectType because we manipulate the type field.
+  __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+  __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+
+  // Undetectable => false.
+  __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
+  __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
+  __ j(not_zero, &false_result);
+
+  // JavaScript object => true.
+  __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
+  __ j(above_equal, &true_result);
+
+  // String value => false iff empty.
+  __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
+  __ j(above_equal, &not_string);
+  __ and_(rcx, Immediate(kStringSizeMask));
+  __ cmpq(rcx, Immediate(kShortStringTag));
+  __ j(not_equal, &true_result);  // Empty string is always short.
+  __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
+  __ shr(rdx, Immediate(String::kShortLengthShift));
+  __ j(zero, &false_result);
+  __ jmp(&true_result);
+
+  __ bind(&not_string);
+  // HeapNumber => false iff +0, -0, or NaN.
+  // These three cases set C3 when compared to zero in the FPU.
+  __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &true_result);
+  // TODO(x64): Don't use fp stack, use MMX registers?
+  __ fldz();  // Load zero onto fp stack
+  // Load heap-number double value onto fp stack
+  __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+  __ fucompp();  // Compare and pop both values.
+  __ movq(kScratchRegister, rax);
+  __ fnstsw_ax();  // Store fp status word in ax, no checking for exceptions.
+  __ testl(rax, Immediate(0x4000));  // Test FP condition flag C3, bit 16.
+  __ movq(rax, kScratchRegister);
+  __ j(not_zero, &false_result);
+  // Fall through to |true_result|.
+
+  // Return 1/0 for true/false in rax.
+  __ bind(&true_result);
+  __ movq(rax, Immediate(1));
+  __ ret(1 * kPointerSize);
+  __ bind(&false_result);
+  __ xor_(rax, rax);
+  __ ret(1 * kPointerSize);
+}
+
+
+bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
+  // TODO(X64): This method is identical to the ia32 version.
+  // Either find a reason to change it, or move it somewhere where it can be
+  // shared. (Notice: It assumes that a Smi can fit in an int).
+
+  Object* answer_object = Heap::undefined_value();
+  switch (op) {
+    case Token::ADD:
+      if (Smi::IsValid(left + right)) {
+        answer_object = Smi::FromInt(left + right);
+      }
+      break;
+    case Token::SUB:
+      if (Smi::IsValid(left - right)) {
+        answer_object = Smi::FromInt(left - right);
+      }
+      break;
+    case Token::MUL: {
+        double answer = static_cast<double>(left) * right;
+        if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
+          // If the product is zero and the non-zero factor is negative,
+          // the spec requires us to return floating point negative zero.
+          if (answer != 0 || (left + right) >= 0) {
+            answer_object = Smi::FromInt(static_cast<int>(answer));
+          }
+        }
+      }
+      break;
+    case Token::DIV:
+    case Token::MOD:
+      break;
+    case Token::BIT_OR:
+      answer_object = Smi::FromInt(left | right);
+      break;
+    case Token::BIT_AND:
+      answer_object = Smi::FromInt(left & right);
+      break;
+    case Token::BIT_XOR:
+      answer_object = Smi::FromInt(left ^ right);
+      break;
+
+    case Token::SHL: {
+        int shift_amount = right & 0x1F;
+        if (Smi::IsValid(left << shift_amount)) {
+          answer_object = Smi::FromInt(left << shift_amount);
+        }
+        break;
+      }
+    case Token::SHR: {
+        int shift_amount = right & 0x1F;
+        unsigned int unsigned_left = left;
+        unsigned_left >>= shift_amount;
+        if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
+          answer_object = Smi::FromInt(unsigned_left);
+        }
+        break;
+      }
+    case Token::SAR: {
+        int shift_amount = right & 0x1F;
+        unsigned int unsigned_left = left;
+        if (left < 0) {
+          // Perform arithmetic shift of a negative number by
+          // complementing number, logical shifting, complementing again.
+          unsigned_left = ~unsigned_left;
+          unsigned_left >>= shift_amount;
+          unsigned_left = ~unsigned_left;
+        } else {
+          unsigned_left >>= shift_amount;
+        }
+        ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
+        answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
+        break;
+      }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  if (answer_object == Heap::undefined_value()) {
+    return false;
+  }
+  frame_->Push(Handle<Object>(answer_object));
+  return true;
+}
+
+
+// End of CodeGenerator implementation.
+
+void UnarySubStub::Generate(MacroAssembler* masm) {
+  Label slow;
+  Label done;
+  Label try_float;
+  Label special;
+  // Check whether the value is a smi.
+  __ JumpIfNotSmi(rax, &try_float);
+
+  // Enter runtime system if the value of the smi is zero
+  // to make sure that we switch between 0 and -0.
+  // Also enter it if the value of the smi is Smi::kMinValue
+  __ testl(rax, Immediate(0x7FFFFFFE));
+  __ j(zero, &special);
+  __ negl(rax);
+  __ jmp(&done);
+
+  __ bind(&special);
+  // Either zero or -0x4000000, neither of which become a smi when negated.
+  __ testl(rax, rax);
+  __ j(not_zero, &slow);
+  __ Move(rax, Factory::minus_zero_value());
+  __ jmp(&done);
+
+  // Enter runtime system.
+  __ bind(&slow);
+  __ pop(rcx);  // pop return address
+  __ push(rax);
+  __ push(rcx);  // push return address
+  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+  __ jmp(&done);
+
+  // Try floating point case.
+  __ bind(&try_float);
+  __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+  __ Cmp(rdx, Factory::heap_number_map());
+  __ j(not_equal, &slow);
+  // Operand is a float, negate its value by flipping sign bit.
+  __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+  __ movq(kScratchRegister, Immediate(0x01));
+  __ shl(kScratchRegister, Immediate(63));
+  __ xor_(rdx, kScratchRegister);  // Flip sign.
+  // rdx is value to store.
+  if (overwrite_) {
+    __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+  } else {
+    FloatingPointHelper::AllocateHeapNumber(masm, &slow, rbx, rcx);
+    // rcx: allocated 'empty' number
+    __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+    __ movq(rax, rcx);
+  }
+
+  __ bind(&done);
+  __ StubReturn(1);
+}
+
+
+void CompareStub::Generate(MacroAssembler* masm) {
+  Label call_builtin, done;
+
+  // NOTICE! This code is only reached after a smi-fast-case check, so
+  // it is certain that at least one operand isn't a smi.
+
+  if (cc_ == equal) {  // Both strict and non-strict.
+    Label slow;  // Fallthrough label.
+    // Equality is almost reflexive (everything but NaN), so start by testing
+    // for "identity and not NaN".
+    {
+      Label not_identical;
+      __ cmpq(rax, rdx);
+      __ j(not_equal, &not_identical);
+      // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+      // so we do the second best thing - test it ourselves.
+
+      Label return_equal;
+      Label heap_number;
+      // If it's not a heap number, then return equal.
+      __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+             Factory::heap_number_map());
+      __ j(equal, &heap_number);
+      __ bind(&return_equal);
+      __ xor_(rax, rax);
+      __ ret(0);
+
+      __ bind(&heap_number);
+      // It is a heap number, so return non-equal if it's NaN and equal if it's
+      // not NaN.
+      // The representation of NaN values has all exponent bits (52..62) set,
+      // and not all mantissa bits (0..51) clear.
+      // Read double representation into rax.
+      __ movq(rbx, V8_UINT64_C(0x7ff0000000000000), RelocInfo::NONE);
+      __ movq(rax, FieldOperand(rdx, HeapNumber::kValueOffset));
+      // Test that exponent bits are all set.
+      __ or_(rbx, rax);
+      __ cmpq(rbx, rax);
+      __ j(not_equal, &return_equal);
+      // Shift out flag and all exponent bits, retaining only mantissa.
+      __ shl(rax, Immediate(12));
+      // If all bits in the mantissa are zero the number is Infinity, and
+      // we return zero.  Otherwise it is a NaN, and we return non-zero.
+      // We cannot just return rax because only eax is tested on return.
+      __ setcc(not_zero, rax);
+      __ ret(0);
+
+      __ bind(&not_identical);
+    }
+
+    // If we're doing a strict equality comparison, we don't have to do
+    // type conversion, so we generate code to do fast comparison for objects
+    // and oddballs. Non-smi numbers and strings still go through the usual
+    // slow-case code.
+    if (strict_) {
+      // If either is a Smi (we know that not both are), then they can only
+      // be equal if the other is a HeapNumber. If so, use the slow case.
+      {
+        Label not_smis;
+        __ SelectNonSmi(rbx, rax, rdx, &not_smis);
+
+        // Check if the non-smi operand is a heap number.
+        __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+               Factory::heap_number_map());
+        // If heap number, handle it in the slow case.
+        __ j(equal, &slow);
+        // Return non-equal.  ebx (the lower half of rbx) is not zero.
+        __ movq(rax, rbx);
+        __ ret(0);
+
+        __ bind(&not_smis);
+      }
+
+      // If either operand is a JSObject or an oddball value, then they are not
+      // equal since their pointers are different
+      // There is no test for undetectability in strict equality.
+
+      // If the first object is a JS object, we have done pointer comparison.
+      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+      Label first_non_object;
+      __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+      __ j(below, &first_non_object);
+      // Return non-zero (eax (not rax) is not zero)
+      Label return_not_equal;
+      ASSERT(kHeapObjectTag != 0);
+      __ bind(&return_not_equal);
+      __ ret(0);
+
+      __ bind(&first_non_object);
+      // Check for oddballs: true, false, null, undefined.
+      __ CmpInstanceType(rcx, ODDBALL_TYPE);
+      __ j(equal, &return_not_equal);
+
+      __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+      __ j(above_equal, &return_not_equal);
+
+      // Check for oddballs: true, false, null, undefined.
+      __ CmpInstanceType(rcx, ODDBALL_TYPE);
+      __ j(equal, &return_not_equal);
+
+      // Fall through to the general case.
+    }
+    __ bind(&slow);
+  }
+
+  // Push arguments below the return address to prepare jump to builtin.
+  __ pop(rcx);
+  __ push(rax);
+  __ push(rdx);
+  __ push(rcx);
+
+  // Inlined floating point compare.
+  // Call builtin if operands are not floating point or smi.
+  Label check_for_symbols;
+  // Push arguments on stack, for helper functions.
+  FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols);
+  FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
+  __ FCmp();
+
+  // Jump to builtin for NaN.
+  __ j(parity_even, &call_builtin);
+
+  // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
+  Label below_lbl, above_lbl;
+  // use rdx, rax to convert unsigned to signed comparison
+  __ j(below, &below_lbl);
+  __ j(above, &above_lbl);
+
+  __ xor_(rax, rax);  // equal
+  __ ret(2 * kPointerSize);
+
+  __ bind(&below_lbl);
+  __ movq(rax, Immediate(-1));
+  __ ret(2 * kPointerSize);
+
+  __ bind(&above_lbl);
+  __ movq(rax, Immediate(1));
+  __ ret(2 * kPointerSize);  // rax, rdx were pushed
+
+  // Fast negative check for symbol-to-symbol equality.
+  __ bind(&check_for_symbols);
+  if (cc_ == equal) {
+    BranchIfNonSymbol(masm, &call_builtin, rax, kScratchRegister);
+    BranchIfNonSymbol(masm, &call_builtin, rdx, kScratchRegister);
+
+    // We've already checked for object identity, so if both operands
+    // are symbols they aren't equal. Register eax (not rax) already holds a
+    // non-zero value, which indicates not equal, so just return.
+    __ ret(2 * kPointerSize);
+  }
+
+  __ bind(&call_builtin);
+  // must swap argument order
+  __ pop(rcx);
+  __ pop(rdx);
+  __ pop(rax);
+  __ push(rdx);
+  __ push(rax);
+
+  // Figure out which native to call and setup the arguments.
+  Builtins::JavaScript builtin;
+  if (cc_ == equal) {
+    builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+  } else {
+    builtin = Builtins::COMPARE;
+    int ncr;  // NaN compare result
+    if (cc_ == less || cc_ == less_equal) {
+      ncr = GREATER;
+    } else {
+      ASSERT(cc_ == greater || cc_ == greater_equal);  // remaining cases
+      ncr = LESS;
+    }
+    __ push(Immediate(Smi::FromInt(ncr)));
+  }
+
+  // Restore return address on the stack.
+  __ push(rcx);
+
+  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+                                    Label* label,
+                                    Register object,
+                                    Register scratch) {
+  __ JumpIfSmi(object, label);
+  __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+  __ movzxbq(scratch,
+             FieldOperand(scratch, Map::kInstanceTypeOffset));
+  __ and_(scratch, Immediate(kIsSymbolMask | kIsNotStringMask));
+  __ cmpb(scratch, Immediate(kSymbolTag | kStringTag));
+  __ j(not_equal, label);
+}
+
+
+// Call the function just below TOS on the stack with the given
+// arguments. The receiver is the TOS.
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+                                      int position) {
+  // Push the arguments ("left-to-right") on the stack.
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Load(args->at(i));
+  }
+
+  // Record the position for debugging purposes.
+  CodeForSourcePosition(position);
+
+  // Use the shared code stub to call the function.
+  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub call_function(arg_count, in_loop);
+  Result answer = frame_->CallStub(&call_function, arg_count + 1);
+  // Restore context and replace function on the stack with the
+  // result of the stub invocation.
+  frame_->RestoreContextRegister();
+  frame_->SetElementAt(0, &answer);
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+  // Implements "value instanceof function" operator.
+  // Expected input state:
+  //   rsp[0] : return address
+  //   rsp[1] : function pointer
+  //   rsp[2] : value
+
+  // Get the object - go slow case if it's a smi.
+  Label slow;
+  __ movq(rax, Operand(rsp, 2 * kPointerSize));
+  __ JumpIfSmi(rax, &slow);
+
+  // Check that the left hand is a JS object. Leave its map in rax.
+  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+  __ j(below, &slow);
+  __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+  __ j(above, &slow);
+
+  // Get the prototype of the function.
+  __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+  __ TryGetFunctionPrototype(rdx, rbx, &slow);
+
+  // Check that the function prototype is a JS object.
+  __ JumpIfSmi(rbx, &slow);
+  __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+  __ j(below, &slow);
+  __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+  __ j(above, &slow);
+
+  // Register mapping: rax is object map and rbx is function prototype.
+  __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
+
+  // Loop through the prototype chain looking for the function prototype.
+  Label loop, is_instance, is_not_instance;
+  __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
+  __ bind(&loop);
+  __ cmpq(rcx, rbx);
+  __ j(equal, &is_instance);
+  __ cmpq(rcx, kScratchRegister);
+  __ j(equal, &is_not_instance);
+  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+  __ jmp(&loop);
+
+  __ bind(&is_instance);
+  __ xor_(rax, rax);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&is_not_instance);
+  __ movq(rax, Immediate(Smi::FromInt(1)));
+  __ ret(2 * kPointerSize);
+
+  // Slow-case: Go through the JavaScript implementation.
+  __ bind(&slow);
+  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+  // The displacement is used for skipping the return address and the
+  // frame pointer on the stack. It is the offset of the last
+  // parameter (if any) relative to the frame pointer.
+  static const int kDisplacement = 2 * kPointerSize;
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+  __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &runtime);
+  // Value in rcx is Smi encoded.
+
+  // Patch the arguments.length and the parameters pointer.
+  __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+  SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
+  __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
+  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  Runtime::Function* f = Runtime::FunctionForId(Runtime::kNewArgumentsFast);
+  __ TailCallRuntime(ExternalReference(f), 3, f->result_size);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+  // The key is in rdx and the parameter count is in rax.
+
+  // The displacement is used for skipping the frame pointer on the
+  // stack. It is the offset of the last parameter (if any) relative
+  // to the frame pointer.
+  static const int kDisplacement = 1 * kPointerSize;
+
+  // Check that the key is a smi.
+  Label slow;
+  __ JumpIfNotSmi(rdx, &slow);
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset));
+  __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adaptor);
+
+  // Check index against formal parameters count limit passed in
+  // through register rax. Use unsigned comparison to get negative
+  // check for free.
+  __ cmpq(rdx, rax);
+  __ j(above_equal, &slow);
+
+  // Read the argument from the stack and return it.
+  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+  __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
+  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+  __ Ret();
+
+  // Arguments adaptor case: Check index against actual arguments
+  // limit found in the arguments adaptor frame. Use unsigned
+  // comparison to get negative check for free.
+  __ bind(&adaptor);
+  __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ cmpq(rdx, rcx);
+  __ j(above_equal, &slow);
+
+  // Read the argument from the stack and return it.
+  index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
+  __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
+  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+  __ Ret();
+
+  // Slow-case: Handle non-smi or out-of-bounds access to arguments
+  // by calling the runtime system.
+  __ bind(&slow);
+  __ pop(rbx);  // Return address.
+  __ push(rdx);
+  __ push(rbx);
+  Runtime::Function* f =
+      Runtime::FunctionForId(Runtime::kGetArgumentsProperty);
+  __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
+}
+
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+  __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adaptor);
+
+  // Nothing to do: The formal number of parameters has already been
+  // passed in register rax by calling function. Just return it.
+  __ ret(0);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame and return it.
+  __ bind(&adaptor);
+  __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ ret(0);
+}
+
+
+int CEntryStub::MinorKey() {
+  ASSERT(result_size_ <= 2);
+#ifdef _WIN64
+  // Simple results returned in rax (using default code).
+  // Complex results must be written to address passed as first argument.
+  // Use even numbers for minor keys, reserving the odd numbers for
+  // CEntryDebugBreakStub.
+  return (result_size_ < 2) ? 0 : result_size_ * 2;
+#else
+  // Single results returned in rax (both AMD64 and Win64 calling conventions)
+  // and a struct of two pointers in rax+rdx (AMD64 calling convention only)
+  // by default.
+  return 0;
+#endif
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  // Check that stack should contain next handler, frame pointer, state and
+  // return address in that order.
+  ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+            StackHandlerConstants::kStateOffset);
+  ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+            StackHandlerConstants::kPCOffset);
+
+  ExternalReference handler_address(Top::k_handler_address);
+  __ movq(kScratchRegister, handler_address);
+  __ movq(rsp, Operand(kScratchRegister, 0));
+  // get next in chain
+  __ pop(rcx);
+  __ movq(Operand(kScratchRegister, 0), rcx);
+  __ pop(rbp);  // pop frame pointer
+  __ pop(rdx);  // remove state
+
+  // Before returning we restore the context from the frame pointer if not NULL.
+  // The frame pointer is NULL in the exception handler of a JS entry frame.
+  __ xor_(rsi, rsi);  // tentatively set context pointer to NULL
+  Label skip;
+  __ cmpq(rbp, Immediate(0));
+  __ j(equal, &skip);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ bind(&skip);
+  __ ret(0);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+                              Label* throw_normal_exception,
+                              Label* throw_termination_exception,
+                              Label* throw_out_of_memory_exception,
+                              StackFrame::Type frame_type,
+                              bool do_gc,
+                              bool always_allocate_scope) {
+  // rax: result parameter for PerformGC, if any.
+  // rbx: pointer to C function  (C callee-saved).
+  // rbp: frame pointer  (restored after C call).
+  // rsp: stack pointer  (restored after C call).
+  // r14: number of arguments including receiver (C callee-saved).
+  // r15: pointer to the first argument (C callee-saved).
+  //      This pointer is reused in LeaveExitFrame(), so it is stored in a
+  //      callee-saved register.
+
+  if (do_gc) {
+    // Pass failure code returned from last attempt as first argument to GC.
+#ifdef _WIN64
+    __ movq(rcx, rax);
+#else  // ! defined(_WIN64)
+    __ movq(rdi, rax);
+#endif
+    __ movq(kScratchRegister,
+            FUNCTION_ADDR(Runtime::PerformGC),
+            RelocInfo::RUNTIME_ENTRY);
+    __ call(kScratchRegister);
+  }
+
+  ExternalReference scope_depth =
+      ExternalReference::heap_always_allocate_scope_depth();
+  if (always_allocate_scope) {
+    __ movq(kScratchRegister, scope_depth);
+    __ incl(Operand(kScratchRegister, 0));
+  }
+
+  // Call C function.
+#ifdef _WIN64
+  // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
+  // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
+  __ movq(Operand(rsp, 4 * kPointerSize), r14);  // argc.
+  __ movq(Operand(rsp, 5 * kPointerSize), r15);  // argv.
+  if (result_size_ < 2) {
+    // Pass a pointer to the Arguments object as the first argument.
+    // Return result in single register (rax).
+    __ lea(rcx, Operand(rsp, 4 * kPointerSize));
+  } else {
+    ASSERT_EQ(2, result_size_);
+    // Pass a pointer to the result location as the first argument.
+    __ lea(rcx, Operand(rsp, 6 * kPointerSize));
+    // Pass a pointer to the Arguments object as the second argument.
+    __ lea(rdx, Operand(rsp, 4 * kPointerSize));
+  }
+
+#else  // ! defined(_WIN64)
+  // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
+  __ movq(rdi, r14);  // argc.
+  __ movq(rsi, r15);  // argv.
+#endif
+  __ call(rbx);
+  // Result is in rax - do not destroy this register!
+
+  if (always_allocate_scope) {
+    __ movq(kScratchRegister, scope_depth);
+    __ decl(Operand(kScratchRegister, 0));
+  }
+
+  // Check for failure result.
+  Label failure_returned;
+  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  __ lea(rcx, Operand(rax, 1));
+  // Lower 2 bits of rcx are 0 iff rax has failure tag.
+  __ testl(rcx, Immediate(kFailureTagMask));
+  __ j(zero, &failure_returned);
+
+  // Exit the JavaScript to C++ exit frame.
+  __ LeaveExitFrame(frame_type, result_size_);
+  __ ret(0);
+
+  // Handling of failure.
+  __ bind(&failure_returned);
+
+  Label retry;
+  // If the returned exception is RETRY_AFTER_GC continue at retry label
+  ASSERT(Failure::RETRY_AFTER_GC == 0);
+  __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+  __ j(zero, &retry);
+
+  // Special handling of out of memory exceptions.
+  __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
+  __ cmpq(rax, kScratchRegister);
+  __ j(equal, throw_out_of_memory_exception);
+
+  // Retrieve the pending exception and clear the variable.
+  ExternalReference pending_exception_address(Top::k_pending_exception_address);
+  __ movq(kScratchRegister, pending_exception_address);
+  __ movq(rax, Operand(kScratchRegister, 0));
+  __ movq(rdx, ExternalReference::the_hole_value_location());
+  __ movq(rdx, Operand(rdx, 0));
+  __ movq(Operand(kScratchRegister, 0), rdx);
+
+  // Special handling of termination exceptions which are uncatchable
+  // by javascript code.
+  __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
+  __ j(equal, throw_termination_exception);
+
+  // Handle normal exception.
+  __ jmp(throw_normal_exception);
+
+  // Retry.
+  __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+                                          UncatchableExceptionType type) {
+  // Fetch top stack handler.
+  ExternalReference handler_address(Top::k_handler_address);
+  __ movq(kScratchRegister, handler_address);
+  __ movq(rsp, Operand(kScratchRegister, 0));
+
+  // Unwind the handlers until the ENTRY handler is found.
+  Label loop, done;
+  __ bind(&loop);
+  // Load the type of the current stack handler.
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
+  __ j(equal, &done);
+  // Fetch the next handler in the list.
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  __ movq(rsp, Operand(rsp, kNextOffset));
+  __ jmp(&loop);
+  __ bind(&done);
+
+  // Set the top handler address to next handler past the current ENTRY handler.
+  __ movq(kScratchRegister, handler_address);
+  __ pop(Operand(kScratchRegister, 0));
+
+  if (type == OUT_OF_MEMORY) {
+    // Set external caught exception to false.
+    ExternalReference external_caught(Top::k_external_caught_exception_address);
+    __ movq(rax, Immediate(false));
+    __ store_rax(external_caught);
+
+    // Set pending exception and rax to out of memory exception.
+    ExternalReference pending_exception(Top::k_pending_exception_address);
+    __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+    __ store_rax(pending_exception);
+  }
+
+  // Clear the context pointer.
+  __ xor_(rsi, rsi);
+
+  // Restore registers from handler.
+  ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
+            StackHandlerConstants::kFPOffset);
+  __ pop(rbp);  // FP
+  ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+            StackHandlerConstants::kStateOffset);
+  __ pop(rdx);  // State
+
+  ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+            StackHandlerConstants::kPCOffset);
+  __ ret(0);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+  Label slow;
+
+  // Get the function to call from the stack.
+  // +2 ~ receiver, return address
+  __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
+
+  // Check that the function really is a JavaScript function.
+  __ JumpIfSmi(rdi, &slow);
+  // Goto slow case if we do not have a function.
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &slow);
+
+  // Fast-case: Just invoke the function.
+  ParameterCount actual(argc_);
+  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+  // Slow-case: Non-function called.
+  __ bind(&slow);
+  __ Set(rax, argc_);
+  __ Set(rbx, 0);
+  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+  Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+  __ Jump(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+  // rax: number of arguments including receiver
+  // rbx: pointer to C function  (C callee-saved)
+  // rbp: frame pointer of calling JS frame (restored after C call)
+  // rsp: stack pointer  (restored after C call)
+  // rsi: current context (restored)
+
+  // NOTE: Invocations of builtins may return failure objects
+  // instead of a proper result. The builtin entry handles
+  // this by performing a garbage collection and retrying the
+  // builtin once.
+
+  StackFrame::Type frame_type = is_debug_break ?
+      StackFrame::EXIT_DEBUG :
+      StackFrame::EXIT;
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  __ EnterExitFrame(frame_type, result_size_);
+
+  // rax: Holds the context at this point, but should not be used.
+  //      On entry to code generated by GenerateCore, it must hold
+  //      a failure result if the collect_garbage argument to GenerateCore
+  //      is true.  This failure result can be the result of code
+  //      generated by a previous call to GenerateCore.  The value
+  //      of rax is then passed to Runtime::PerformGC.
+  // rbx: pointer to builtin function  (C callee-saved).
+  // rbp: frame pointer of exit frame  (restored after C call).
+  // rsp: stack pointer (restored after C call).
+  // r14: number of arguments including receiver (C callee-saved).
+  // r15: argv pointer (C callee-saved).
+
+  Label throw_normal_exception;
+  Label throw_termination_exception;
+  Label throw_out_of_memory_exception;
+
+  // Call into the runtime system.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               false,
+               false);
+
+  // Do space-specific GC and retry runtime call.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               true,
+               false);
+
+  // Do full GC and retry runtime call one final time.
+  Failure* failure = Failure::InternalError();
+  __ movq(rax, failure, RelocInfo::NONE);
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               true,
+               true);
+
+  __ bind(&throw_out_of_memory_exception);
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+  __ bind(&throw_termination_exception);
+  GenerateThrowUncatchable(masm, TERMINATION);
+
+  __ bind(&throw_normal_exception);
+  GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+  Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  Label not_outermost_js, not_outermost_js_2;
+#endif
+
+  // Setup frame.
+  __ push(rbp);
+  __ movq(rbp, rsp);
+
+  // Push the stack frame type marker twice.
+  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  __ push(Immediate(Smi::FromInt(marker)));  // context slot
+  __ push(Immediate(Smi::FromInt(marker)));  // function slot
+  // Save callee-saved registers (X64 calling conventions).
+  __ push(r12);
+  __ push(r13);
+  __ push(r14);
+  __ push(r15);
+  __ push(rdi);
+  __ push(rsi);
+  __ push(rbx);
+  // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
+  // callee-save in JS code as well.
+
+  // Save copies of the top frame descriptor on the stack.
+  ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+  __ load_rax(c_entry_fp);
+  __ push(rax);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If this is the outermost JS call, set js_entry_sp value.
+  ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+  __ load_rax(js_entry_sp);
+  __ testq(rax, rax);
+  __ j(not_zero, &not_outermost_js);
+  __ movq(rax, rbp);
+  __ store_rax(js_entry_sp);
+  __ bind(&not_outermost_js);
+#endif
+
+  // Call a faked try-block that does the invoke.
+  __ call(&invoke);
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  ExternalReference pending_exception(Top::k_pending_exception_address);
+  __ store_rax(pending_exception);
+  __ movq(rax, Failure::Exception(), RelocInfo::NONE);
+  __ jmp(&exit);
+
+  // Invoke: Link this frame into the handler chain.
+  __ bind(&invoke);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+
+  // Clear any pending exceptions.
+  __ load_rax(ExternalReference::the_hole_value_location());
+  __ store_rax(pending_exception);
+
+  // Fake a receiver (NULL).
+  __ push(Immediate(0));  // receiver
+
+  // Invoke the function by calling through JS entry trampoline
+  // builtin and pop the faked function when we return. We load the address
+  // from an external reference instead of inlining the call target address
+  // directly in the code, because the builtin stubs may not have been
+  // generated yet at the time this code is generated.
+  if (is_construct) {
+    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+    __ load_rax(construct_entry);
+  } else {
+    ExternalReference entry(Builtins::JSEntryTrampoline);
+    __ load_rax(entry);
+  }
+  __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+  __ call(kScratchRegister);
+
+  // Unlink this frame from the handler chain.
+  __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+  __ pop(Operand(kScratchRegister, 0));
+  // Pop next_sp.
+  __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If current EBP value is the same as js_entry_sp value, it means that
+  // the current function is the outermost.
+  __ movq(kScratchRegister, js_entry_sp);
+  __ cmpq(rbp, Operand(kScratchRegister, 0));
+  __ j(not_equal, &not_outermost_js_2);
+  __ movq(Operand(kScratchRegister, 0), Immediate(0));
+  __ bind(&not_outermost_js_2);
+#endif
+
+  // Restore the top frame descriptor from the stack.
+  __ bind(&exit);
+  __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
+  __ pop(Operand(kScratchRegister, 0));
+
+  // Restore callee-saved registers (X64 conventions).
+  __ pop(rbx);
+  __ pop(rsi);
+  __ pop(rdi);
+  __ pop(r15);
+  __ pop(r14);
+  __ pop(r13);
+  __ pop(r12);
+  __ addq(rsp, Immediate(2 * kPointerSize));  // remove markers
+
+  // Restore frame pointer and return.
+  __ pop(rbp);
+  __ ret(0);
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of stubs.
+
+//  Stub classes have public member named masm, not masm_.
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+  // Because builtins always remove the receiver from the stack, we
+  // have to fake one to avoid underflowing the stack. The receiver
+  // must be inserted below the return address on the stack so we
+  // temporarily store that in a register.
+  __ pop(rax);
+  __ push(Immediate(Smi::FromInt(0)));
+  __ push(rax);
+
+  // Do tail-call to runtime routine.
+  Runtime::Function* f = Runtime::FunctionForId(Runtime::kStackGuard);
+  __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
+}
+
+
+void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
+                                             Label* need_gc,
+                                             Register scratch,
+                                             Register result) {
+  // Allocate heap number in new space.
+  __ AllocateInNewSpace(HeapNumber::kSize,
+                        result,
+                        scratch,
+                        no_reg,
+                        need_gc,
+                        TAG_OBJECT);
+
+  // Set the map and tag the result.
+  __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
+  __ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+}
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+                                           Register number) {
+  Label load_smi, done;
+
+  __ JumpIfSmi(number, &load_smi);
+  __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi);
+  __ SmiToInteger32(number, number);
+  __ push(number);
+  __ fild_s(Operand(rsp, 0));
+  __ pop(number);
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+                                           Register src,
+                                           XMMRegister dst) {
+  Label load_smi, done;
+
+  __ JumpIfSmi(src, &load_smi);
+  __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi);
+  __ SmiToInteger32(src, src);
+  __ cvtlsi2sd(dst, src);
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+                                            XMMRegister dst1,
+                                            XMMRegister dst2) {
+  __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
+  LoadFloatOperand(masm, kScratchRegister, dst1);
+  __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+  LoadFloatOperand(masm, kScratchRegister, dst2);
+}
+
+
+void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
+                                           const Operand& src,
+                                           Register dst) {
+  // TODO(X64): Convert number operands to int32 values.
+  // Don't convert a Smi to a double first.
+  UNIMPLEMENTED();
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
+  Label load_smi_1, load_smi_2, done_load_1, done;
+  __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
+  __ JumpIfSmi(kScratchRegister, &load_smi_1);
+  __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
+  __ bind(&done_load_1);
+
+  __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+  __ JumpIfSmi(kScratchRegister, &load_smi_2);
+  __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi_1);
+  __ SmiToInteger32(kScratchRegister, kScratchRegister);
+  __ push(kScratchRegister);
+  __ fild_s(Operand(rsp, 0));
+  __ pop(kScratchRegister);
+  __ jmp(&done_load_1);
+
+  __ bind(&load_smi_2);
+  __ SmiToInteger32(kScratchRegister, kScratchRegister);
+  __ push(kScratchRegister);
+  __ fild_s(Operand(rsp, 0));
+  __ pop(kScratchRegister);
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+                                            Register lhs,
+                                            Register rhs) {
+  Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
+  __ JumpIfSmi(lhs, &load_smi_lhs);
+  __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
+  __ bind(&done_load_lhs);
+
+  __ JumpIfSmi(rhs, &load_smi_rhs);
+  __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi_lhs);
+  __ SmiToInteger64(kScratchRegister, lhs);
+  __ push(kScratchRegister);
+  __ fild_d(Operand(rsp, 0));
+  __ pop(kScratchRegister);
+  __ jmp(&done_load_lhs);
+
+  __ bind(&load_smi_rhs);
+  __ SmiToInteger64(kScratchRegister, rhs);
+  __ push(kScratchRegister);
+  __ fild_d(Operand(rsp, 0));
+  __ pop(kScratchRegister);
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+                                             Label* non_float) {
+  Label test_other, done;
+  // Test if both operands are numbers (heap_numbers or smis).
+  // If not, jump to label non_float.
+  __ JumpIfSmi(rdx, &test_other);  // argument in rdx is OK
+  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
+  __ j(not_equal, non_float);  // The argument in rdx is not a number.
+
+  __ bind(&test_other);
+  __ JumpIfSmi(rax, &done);  // argument in rax is OK
+  __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
+  __ j(not_equal, non_float);  // The argument in rax is not a number.
+
+  // Fall-through: Both operands are numbers.
+  __ bind(&done);
+}
+
+
+const char* GenericBinaryOpStub::GetName() {
+  switch (op_) {
+    case Token::ADD: return "GenericBinaryOpStub_ADD";
+    case Token::SUB: return "GenericBinaryOpStub_SUB";
+    case Token::MUL: return "GenericBinaryOpStub_MUL";
+    case Token::DIV: return "GenericBinaryOpStub_DIV";
+    case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+    case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+    case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+    case Token::SAR: return "GenericBinaryOpStub_SAR";
+    case Token::SHL: return "GenericBinaryOpStub_SHL";
+    case Token::SHR: return "GenericBinaryOpStub_SHR";
+    default:         return "GenericBinaryOpStub";
+  }
+}
+
+
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+  // Perform fast-case smi code for the operation (rax <op> rbx) and
+  // leave result in register rax.
+
+  // Smi check both operands.
+  __ JumpIfNotBothSmi(rax, rbx, slow);
+
+  switch (op_) {
+    case Token::ADD: {
+      __ SmiAdd(rax, rax, rbx, slow);
+      break;
+    }
+
+    case Token::SUB: {
+      __ SmiSub(rax, rax, rbx, slow);
+      break;
+    }
+
+    case Token::MUL:
+      __ SmiMul(rax, rax, rbx, slow);
+      break;
+
+    case Token::DIV:
+      __ SmiDiv(rax, rax, rbx, slow);
+      break;
+
+    case Token::MOD:
+      __ SmiMod(rax, rax, rbx, slow);
+      break;
+
+    case Token::BIT_OR:
+      __ SmiOr(rax, rax, rbx);
+      break;
+
+    case Token::BIT_AND:
+      __ SmiAnd(rax, rax, rbx);
+      break;
+
+    case Token::BIT_XOR:
+      __ SmiXor(rax, rax, rbx);
+      break;
+
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR:
+      // Move the second operand into register ecx.
+      __ movl(rcx, rbx);
+      // Perform the operation.
+      switch (op_) {
+        case Token::SAR:
+          __ SmiShiftArithmeticRight(rax, rax, rbx);
+          break;
+        case Token::SHR:
+          __ SmiShiftLogicalRight(rax, rax, rbx, slow);
+          break;
+        case Token::SHL:
+          __ SmiShiftLeft(rax, rax, rbx, slow);
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+  Label call_runtime;
+  if (flags_ == SMI_CODE_IN_STUB) {
+    // The fast case smi code wasn't inlined in the stub caller
+    // code. Generate it here to speed up common operations.
+    Label slow;
+    __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // get y
+    __ movq(rax, Operand(rsp, 2 * kPointerSize));  // get x
+    GenerateSmiCode(masm, &slow);
+    __ ret(2 * kPointerSize);  // remove both operands
+
+    // Too bad. The fast case smi code didn't succeed.
+    __ bind(&slow);
+  }
+
+  // Setup registers.
+  __ movq(rax, Operand(rsp, 1 * kPointerSize));  // get y
+  __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // get x
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      // rax: y
+      // rdx: x
+      FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+      // Fast-case: Both operands are numbers.
+      // Allocate a heap number, if needed.
+      Label skip_allocation;
+      switch (mode_) {
+        case OVERWRITE_LEFT:
+          __ movq(rax, rdx);
+          // Fall through!
+        case OVERWRITE_RIGHT:
+          // If the argument in rax is already an object, we skip the
+          // allocation of a heap number.
+          __ JumpIfNotSmi(rax, &skip_allocation);
+          // Fall through!
+        case NO_OVERWRITE:
+          FloatingPointHelper::AllocateHeapNumber(masm,
+                                                  &call_runtime,
+                                                  rcx,
+                                                  rax);
+          __ bind(&skip_allocation);
+          break;
+        default: UNREACHABLE();
+      }
+      // xmm4 and xmm5 are volatile XMM registers.
+      FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
+
+      switch (op_) {
+        case Token::ADD: __ addsd(xmm4, xmm5); break;
+        case Token::SUB: __ subsd(xmm4, xmm5); break;
+        case Token::MUL: __ mulsd(xmm4, xmm5); break;
+        case Token::DIV: __ divsd(xmm4, xmm5); break;
+        default: UNREACHABLE();
+      }
+      __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
+      __ ret(2 * kPointerSize);
+    }
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+      // TODO(X64): Don't convert a Smi to float and then back to int32
+      // afterwards.
+      FloatingPointHelper::LoadFloatOperands(masm);
+
+      Label skip_allocation, non_smi_result, operand_conversion_failure;
+
+      // Reserve space for converted numbers.
+      __ subq(rsp, Immediate(2 * kPointerSize));
+
+      if (use_sse3_) {
+        // Truncate the operands to 32-bit integers and check for
+        // exceptions in doing so.
+        CpuFeatures::Scope scope(CpuFeatures::SSE3);
+        __ fisttp_s(Operand(rsp, 0 * kPointerSize));
+        __ fisttp_s(Operand(rsp, 1 * kPointerSize));
+        __ fnstsw_ax();
+        __ testl(rax, Immediate(1));
+        __ j(not_zero, &operand_conversion_failure);
+      } else {
+        // Check if right operand is int32.
+        __ fist_s(Operand(rsp, 0 * kPointerSize));
+        __ fild_s(Operand(rsp, 0 * kPointerSize));
+        __ fucompp();
+        __ fnstsw_ax();
+        if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
+          __ sahf();
+          __ j(not_zero, &operand_conversion_failure);
+          __ j(parity_even, &operand_conversion_failure);
+        } else {
+          __ and_(rax, Immediate(0x4400));
+          __ cmpl(rax, Immediate(0x4000));
+          __ j(not_zero, &operand_conversion_failure);
+        }
+        // Check if left operand is int32.
+        __ fist_s(Operand(rsp, 1 * kPointerSize));
+        __ fild_s(Operand(rsp, 1 * kPointerSize));
+        __ fucompp();
+        __ fnstsw_ax();
+        if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
+          __ sahf();
+          __ j(not_zero, &operand_conversion_failure);
+          __ j(parity_even, &operand_conversion_failure);
+        } else {
+          __ and_(rax, Immediate(0x4400));
+          __ cmpl(rax, Immediate(0x4000));
+          __ j(not_zero, &operand_conversion_failure);
+        }
+      }
+
+      // Get int32 operands and perform bitop.
+      __ pop(rcx);
+      __ pop(rax);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(rax, rcx); break;
+        case Token::BIT_AND: __ and_(rax, rcx); break;
+        case Token::BIT_XOR: __ xor_(rax, rcx); break;
+        case Token::SAR: __ sarl(rax); break;
+        case Token::SHL: __ shll(rax); break;
+        case Token::SHR: __ shrl(rax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ testl(rax, Immediate(0xc0000000));
+        __ j(not_zero, &non_smi_result);
+      } else {
+        // Check if result fits in a smi.
+        __ cmpl(rax, Immediate(0xc0000000));
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      __ Integer32ToSmi(rax, rax);
+      __ ret(2 * kPointerSize);
+
+      // All ops except SHR return a signed int32 that we load in a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ movsxlq(rbx, rax);  // rbx: sign extended 32-bit result
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+            // allocation of a heap number.
+            __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
+                                 1 * kPointerSize : 2 * kPointerSize));
+            __ JumpIfNotSmi(rax, &skip_allocation);
+            // Fall through!
+          case NO_OVERWRITE:
+            FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
+                                                    rcx, rax);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+        __ fild_s(Operand(rsp, 1 * kPointerSize));
+        __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+        __ ret(2 * kPointerSize);
+      }
+
+      // Clear the FPU exception flag and reset the stack before calling
+      // the runtime system.
+      __ bind(&operand_conversion_failure);
+      __ addq(rsp, Immediate(2 * kPointerSize));
+      if (use_sse3_) {
+        // If we've used the SSE3 instructions for truncating the
+        // floating point values to integers and it failed, we have a
+        // pending #IA exception. Clear it.
+        __ fnclex();
+      } else {
+        // The non-SSE3 variant does early bailout if the right
+        // operand isn't a 32-bit integer, so we may have a single
+        // value on the FPU stack we need to get rid of.
+        __ ffree(0);
+      }
+
+      // SHR should return uint32 - go to runtime for non-smi/negative result.
+      if (op_ == Token::SHR) {
+        __ bind(&non_smi_result);
+      }
+      __ movq(rax, Operand(rsp, 1 * kPointerSize));
+      __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+
+  // If all else fails, use the runtime system to get the correct
+  // result.
+  __ bind(&call_runtime);
+  switch (op_) {
+    case Token::ADD:
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+        break;
+    case Token::DIV:
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+int CompareStub::MinorKey() {
+  // Encode the two parameters in a unique 16 bit value.
+  ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
+  return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
new file mode 100644
index 0000000..87db3a9
--- /dev/null
+++ b/src/x64/codegen-x64.h
@@ -0,0 +1,673 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_CODEGEN_X64_H_
+#define V8_X64_CODEGEN_X64_H_
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations
+class DeferredCode;
+class RegisterAllocator;
+class RegisterFile;
+
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+// -------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
+class Reference BASE_EMBEDDED {
+ public:
+  // The values of the types is important, see size().
+  enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+  Reference(CodeGenerator* cgen, Expression* expression);
+  ~Reference();
+
+  Expression* expression() const { return expression_; }
+  Type type() const { return type_; }
+  void set_type(Type value) {
+    ASSERT(type_ == ILLEGAL);
+    type_ = value;
+  }
+
+  // The size the reference takes up on the stack.
+  int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+
+  bool is_illegal() const { return type_ == ILLEGAL; }
+  bool is_slot() const { return type_ == SLOT; }
+  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+
+  // Return the name.  Only valid for named property references.
+  Handle<String> GetName();
+
+  // Generate code to push the value of the reference on top of the
+  // expression stack.  The reference is expected to be already on top of
+  // the expression stack, and it is left in place with its value above it.
+  void GetValue(TypeofState typeof_state);
+
+  // Like GetValue except that the slot is expected to be written to before
+  // being read from again.  Thae value of the reference may be invalidated,
+  // causing subsequent attempts to read it to fail.
+  void TakeValue(TypeofState typeof_state);
+
+  // Generate code to store the value on top of the expression stack in the
+  // reference.  The reference is expected to be immediately below the value
+  // on the expression stack.  The stored value is left in place (with the
+  // reference intact below it) to support chained assignments.
+  void SetValue(InitState init_state);
+
+ private:
+  CodeGenerator* cgen_;
+  Expression* expression_;
+  Type type_;
+};
+
+
+// -------------------------------------------------------------------------
+// Control destinations.
+
+// A control destination encapsulates a pair of jump targets and a
+// flag indicating which one is the preferred fall-through.  The
+// preferred fall-through must be unbound, the other may be already
+// bound (ie, a backward target).
+//
+// The true and false targets may be jumped to unconditionally or
+// control may split conditionally.  Unconditional jumping and
+// splitting should be emitted in tail position (as the last thing
+// when compiling an expression) because they can cause either label
+// to be bound or the non-fall through to be jumped to leaving an
+// invalid virtual frame.
+//
+// The labels in the control destination can be extracted and
+// manipulated normally without affecting the state of the
+// destination.
+
+class ControlDestination BASE_EMBEDDED {
+ public:
+  ControlDestination(JumpTarget* true_target,
+                     JumpTarget* false_target,
+                     bool true_is_fall_through)
+      : true_target_(true_target),
+        false_target_(false_target),
+        true_is_fall_through_(true_is_fall_through),
+        is_used_(false) {
+    ASSERT(true_is_fall_through ? !true_target->is_bound()
+                                : !false_target->is_bound());
+  }
+
+  // Accessors for the jump targets.  Directly jumping or branching to
+  // or binding the targets will not update the destination's state.
+  JumpTarget* true_target() const { return true_target_; }
+  JumpTarget* false_target() const { return false_target_; }
+
+  // True if the the destination has been jumped to unconditionally or
+  // control has been split to both targets.  This predicate does not
+  // test whether the targets have been extracted and manipulated as
+  // raw jump targets.
+  bool is_used() const { return is_used_; }
+
+  // True if the destination is used and the true target (respectively
+  // false target) was the fall through.  If the target is backward,
+  // "fall through" included jumping unconditionally to it.
+  bool true_was_fall_through() const {
+    return is_used_ && true_is_fall_through_;
+  }
+
+  bool false_was_fall_through() const {
+    return is_used_ && !true_is_fall_through_;
+  }
+
+  // Emit a branch to one of the true or false targets, and bind the
+  // other target.  Because this binds the fall-through target, it
+  // should be emitted in tail position (as the last thing when
+  // compiling an expression).
+  void Split(Condition cc) {
+    ASSERT(!is_used_);
+    if (true_is_fall_through_) {
+      false_target_->Branch(NegateCondition(cc));
+      true_target_->Bind();
+    } else {
+      true_target_->Branch(cc);
+      false_target_->Bind();
+    }
+    is_used_ = true;
+  }
+
+  // Emit an unconditional jump in tail position, to the true target
+  // (if the argument is true) or the false target.  The "jump" will
+  // actually bind the jump target if it is forward, jump to it if it
+  // is backward.
+  void Goto(bool where) {
+    ASSERT(!is_used_);
+    JumpTarget* target = where ? true_target_ : false_target_;
+    if (target->is_bound()) {
+      target->Jump();
+    } else {
+      target->Bind();
+    }
+    is_used_ = true;
+    true_is_fall_through_ = where;
+  }
+
+  // Mark this jump target as used as if Goto had been called, but
+  // without generating a jump or binding a label (the control effect
+  // should have already happened).  This is used when the left
+  // subexpression of the short-circuit boolean operators are
+  // compiled.
+  void Use(bool where) {
+    ASSERT(!is_used_);
+    ASSERT((where ? true_target_ : false_target_)->is_bound());
+    is_used_ = true;
+    true_is_fall_through_ = where;
+  }
+
+  // Swap the true and false targets but keep the same actual label as
+  // the fall through.  This is used when compiling negated
+  // expressions, where we want to swap the targets but preserve the
+  // state.
+  void Invert() {
+    JumpTarget* temp_target = true_target_;
+    true_target_ = false_target_;
+    false_target_ = temp_target;
+
+    true_is_fall_through_ = !true_is_fall_through_;
+  }
+
+ private:
+  // True and false jump targets.
+  JumpTarget* true_target_;
+  JumpTarget* false_target_;
+
+  // Before using the destination: true if the true target is the
+  // preferred fall through, false if the false target is.  After
+  // using the destination: true if the true target was actually used
+  // as the fall through, false if the false target was.
+  bool true_is_fall_through_;
+
+  // True if the Split or Goto functions have been called.
+  bool is_used_;
+};
+
+
+// -------------------------------------------------------------------------
+// Code generation state
+
+// The state is passed down the AST by the code generator (and back up, in
+// the form of the state of the jump target pair).  It is threaded through
+// the call stack.  Constructing a state implicitly pushes it on the owning
+// code generator's stack of states, and destroying one implicitly pops it.
+//
+// The code generator state is only used for expressions, so statements have
+// the initial state.
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+  // Create an initial code generator state.  Destroying the initial state
+  // leaves the code generator with a NULL state.
+  explicit CodeGenState(CodeGenerator* owner);
+
+  // Create a code generator state based on a code generator's current
+  // state.  The new state may or may not be inside a typeof, and has its
+  // own control destination.
+  CodeGenState(CodeGenerator* owner,
+               TypeofState typeof_state,
+               ControlDestination* destination);
+
+  // Destroy a code generator state and restore the owning code generator's
+  // previous state.
+  ~CodeGenState();
+
+  // Accessors for the state.
+  TypeofState typeof_state() const { return typeof_state_; }
+  ControlDestination* destination() const { return destination_; }
+
+ private:
+  // The owning code generator.
+  CodeGenerator* owner_;
+
+  // A flag indicating whether we are compiling the immediate subexpression
+  // of a typeof expression.
+  TypeofState typeof_state_;
+
+  // A control destination in case the expression has a control-flow
+  // effect.
+  ControlDestination* destination_;
+
+  // The previous state of the owning code generator, restored when
+  // this state is destroyed.
+  CodeGenState* previous_;
+};
+
+
+// -------------------------------------------------------------------------
+// Arguments allocation mode
+
+enum ArgumentsAllocationMode {
+  NO_ARGUMENTS_ALLOCATION,
+  EAGER_ARGUMENTS_ALLOCATION,
+  LAZY_ARGUMENTS_ALLOCATION
+};
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator
+
+class CodeGenerator: public AstVisitor {
+ public:
+  // Takes a function literal, generates code for it. This function should only
+  // be called by compiler.cc.
+  static Handle<Code> MakeCode(FunctionLiteral* fun,
+                               Handle<Script> script,
+                               bool is_eval);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static bool ShouldGenerateLog(Expression* type);
+#endif
+
+  static void SetFunctionInfo(Handle<JSFunction> fun,
+                              FunctionLiteral* lit,
+                              bool is_toplevel,
+                              Handle<Script> script);
+
+  // Accessors
+  MacroAssembler* masm() { return masm_; }
+
+  VirtualFrame* frame() const { return frame_; }
+
+  bool has_valid_frame() const { return frame_ != NULL; }
+
+  // Set the virtual frame to be new_frame, with non-frame register
+  // reference counts given by non_frame_registers.  The non-frame
+  // register reference counts of the old frame are returned in
+  // non_frame_registers.
+  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+  void DeleteFrame();
+
+  RegisterAllocator* allocator() const { return allocator_; }
+
+  CodeGenState* state() { return state_; }
+  void set_state(CodeGenState* state) { state_ = state; }
+
+  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+
+  bool in_spilled_code() const { return in_spilled_code_; }
+  void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+
+ private:
+  // Construction/Destruction
+  CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
+  virtual ~CodeGenerator() { delete masm_; }
+
+  // Accessors
+  Scope* scope() const { return scope_; }
+
+  // Generating deferred code.
+  void ProcessDeferred();
+
+  bool is_eval() { return is_eval_; }
+
+  // State
+  TypeofState typeof_state() const { return state_->typeof_state(); }
+  ControlDestination* destination() const { return state_->destination(); }
+
+  // Track loop nesting level.
+  int loop_nesting() const { return loop_nesting_; }
+  void IncrementLoopNesting() { loop_nesting_++; }
+  void DecrementLoopNesting() { loop_nesting_--; }
+
+
+  // Node visitors.
+  void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DEF_VISIT(type) \
+  void Visit##type(type* node);
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+  // Visit a statement and then spill the virtual frame if control flow can
+  // reach the end of the statement (ie, it does not exit via break,
+  // continue, return, or throw).  This function is used temporarily while
+  // the code generator is being transformed.
+  void VisitAndSpill(Statement* statement);
+
+  // Visit a list of statements and then spill the virtual frame if control
+  // flow can reach the end of the list.
+  void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
+
+  // Main code generation function
+  void GenCode(FunctionLiteral* fun);
+
+  // Generate the return sequence code.  Should be called no more than
+  // once per compiled function, immediately after binding the return
+  // target (which can not be done more than once).
+  void GenerateReturnSequence(Result* return_value);
+
+  // Returns the arguments allocation mode.
+  ArgumentsAllocationMode ArgumentsMode() const;
+
+  // Store the arguments object and allocate it if necessary.
+  Result StoreArgumentsObject(bool initial);
+
+  // The following are used by class Reference.
+  void LoadReference(Reference* ref);
+  void UnloadReference(Reference* ref);
+
+  Operand ContextOperand(Register context, int index) const {
+    return Operand(context, Context::SlotOffset(index));
+  }
+
+  Operand SlotOperand(Slot* slot, Register tmp);
+
+  Operand ContextSlotOperandCheckExtensions(Slot* slot,
+                                            Result tmp,
+                                            JumpTarget* slow);
+
+  // Expressions
+  Operand GlobalObject() const {
+    return ContextOperand(rsi, Context::GLOBAL_INDEX);
+  }
+
+  void LoadCondition(Expression* x,
+                     TypeofState typeof_state,
+                     ControlDestination* destination,
+                     bool force_control);
+  void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+  void LoadGlobal();
+  void LoadGlobalReceiver();
+
+  // Generate code to push the value of an expression on top of the frame
+  // and then spill the frame fully to memory.  This function is used
+  // temporarily while the code generator is being transformed.
+  void LoadAndSpill(Expression* expression,
+                    TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+
+  // Read a value from a slot and leave it on top of the expression stack.
+  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
+  Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
+                                           TypeofState typeof_state,
+                                           JumpTarget* slow);
+
+  // Store the value on top of the expression stack into a slot, leaving the
+  // value in place.
+  void StoreToSlot(Slot* slot, InitState init_state);
+
+  // Special code for typeof expressions: Unfortunately, we must
+  // be careful when loading the expression in 'typeof'
+  // expressions. We are not allowed to throw reference errors for
+  // non-existing properties of the global object, so we must make it
+  // look like an explicit property access, instead of an access
+  // through the context chain.
+  void LoadTypeofExpression(Expression* x);
+
+  // Translate the value on top of the frame into control flow to the
+  // control destination.
+  void ToBoolean(ControlDestination* destination);
+
+  void GenericBinaryOperation(
+      Token::Value op,
+      SmiAnalysis* type,
+      OverwriteMode overwrite_mode);
+
+  // If possible, combine two constant smi values using op to produce
+  // a smi result, and push it on the virtual frame, all at compile time.
+  // Returns true if it succeeds.  Otherwise it has no effect.
+  bool FoldConstantSmis(Token::Value op, int left, int right);
+
+  // Emit code to perform a binary operation on a constant
+  // smi and a likely smi.  Consumes the Result *operand.
+  void ConstantSmiBinaryOperation(Token::Value op,
+                                  Result* operand,
+                                  Handle<Object> constant_operand,
+                                  SmiAnalysis* type,
+                                  bool reversed,
+                                  OverwriteMode overwrite_mode);
+
+  // Emit code to perform a binary operation on two likely smis.
+  // The code to handle smi arguments is produced inline.
+  // Consumes the Results *left and *right.
+  void LikelySmiBinaryOperation(Token::Value op,
+                                Result* left,
+                                Result* right,
+                                OverwriteMode overwrite_mode);
+
+  void Comparison(Condition cc,
+                  bool strict,
+                  ControlDestination* destination);
+
+  // To prevent long attacker-controlled byte sequences, integer constants
+  // from the JavaScript source are loaded in two parts if they are larger
+  // than 16 bits.
+  static const int kMaxSmiInlinedBits = 16;
+  bool IsUnsafeSmi(Handle<Object> value);
+  // Load an integer constant x into a register target using
+  // at most 16 bits of user-controlled data per assembly operation.
+  void LoadUnsafeSmi(Register target, Handle<Object> value);
+
+  void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+
+  // Use an optimized version of Function.prototype.apply that avoid
+  // allocating the arguments object and just copies the arguments
+  // from the stack.
+  void CallApplyLazy(Property* apply,
+                     Expression* receiver,
+                     VariableProxy* arguments,
+                     int position);
+
+  void CheckStack();
+
+  struct InlineRuntimeLUT {
+    void (CodeGenerator::*method)(ZoneList<Expression*>*);
+    const char* name;
+  };
+  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+  bool CheckForInlineRuntimeCall(CallRuntime* node);
+  static bool PatchInlineRuntimeEntry(Handle<String> name,
+                                      const InlineRuntimeLUT& new_entry,
+                                      InlineRuntimeLUT* old_entry);
+  Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
+  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+
+  // Declare global variables and functions in the given array of
+  // name/value pairs.
+  void DeclareGlobals(Handle<FixedArray> pairs);
+
+  // Instantiate the function boilerplate.
+  void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+
+  // Support for type checks.
+  void GenerateIsSmi(ZoneList<Expression*>* args);
+  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
+  void GenerateIsArray(ZoneList<Expression*>* args);
+
+  // Support for construct call checks.
+  void GenerateIsConstructCall(ZoneList<Expression*>* args);
+
+  // Support for arguments.length and arguments[?].
+  void GenerateArgumentsLength(ZoneList<Expression*>* args);
+  void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+  // Support for accessing the class and value fields of an object.
+  void GenerateClassOf(ZoneList<Expression*>* args);
+  void GenerateValueOf(ZoneList<Expression*>* args);
+  void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+  // Fast support for charCodeAt(n).
+  void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+
+  // Fast support for object equality testing.
+  void GenerateObjectEquals(ZoneList<Expression*>* args);
+
+  void GenerateLog(ZoneList<Expression*>* args);
+
+  void GenerateGetFramePointer(ZoneList<Expression*>* args);
+
+  // Fast support for Math.random().
+  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+  // Fast support for Math.sin and Math.cos.
+  enum MathOp { SIN, COS };
+  void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
+  inline void GenerateMathSin(ZoneList<Expression*>* args);
+  inline void GenerateMathCos(ZoneList<Expression*>* args);
+
+  // Methods used to indicate which source code is generated for. Source
+  // positions are collected by the assembler and emitted with the relocation
+  // information.
+  void CodeForFunctionPosition(FunctionLiteral* fun);
+  void CodeForReturnPosition(FunctionLiteral* fun);
+  void CodeForStatementPosition(Statement* node);
+  void CodeForSourcePosition(int pos);
+
+#ifdef DEBUG
+  // True if the registers are valid for entry to a block.  There should
+  // be no frame-external references to (non-reserved) registers.
+  bool HasValidEntryRegisters();
+#endif
+
+  bool is_eval_;  // Tells whether code is generated for eval.
+  Handle<Script> script_;
+  ZoneList<DeferredCode*> deferred_;
+
+  // Assembler
+  MacroAssembler* masm_;  // to generate code
+
+  // Code generation state
+  Scope* scope_;
+  VirtualFrame* frame_;
+  RegisterAllocator* allocator_;
+  CodeGenState* state_;
+  int loop_nesting_;
+
+  // Jump targets.
+  // The target of the return from the function.
+  BreakTarget function_return_;
+
+  // True if the function return is shadowed (ie, jumping to the target
+  // function_return_ does not jump to the true function return, but rather
+  // to some unlinking code).
+  bool function_return_is_shadowed_;
+
+  // True when we are in code that expects the virtual frame to be fully
+  // spilled.  Some virtual frame function are disabled in DEBUG builds when
+  // called from spilled code, because they do not leave the virtual frame
+  // in a spilled state.
+  bool in_spilled_code_;
+
+  static InlineRuntimeLUT kInlineRuntimeLUT[];
+
+  friend class VirtualFrame;
+  friend class JumpTarget;
+  friend class Reference;
+  friend class Result;
+
+  friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
+
+  DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
+// -------------------------------------------------------------------------
+// Code stubs
+//
+// These independent code objects are created once, and used multiple
+// times by generated code to perform common tasks, often the slow
+// case of a JavaScript operation.  They are all subclasses of CodeStub,
+// which is declared in code-stubs.h.
+
+
+// Flag that indicates whether or not the code that handles smi arguments
+// should be placed in the stub, inlined, or omitted entirely.
+enum GenericBinaryFlags {
+  SMI_CODE_IN_STUB,
+  SMI_CODE_INLINED
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+  GenericBinaryOpStub(Token::Value op,
+                      OverwriteMode mode,
+                      GenericBinaryFlags flags)
+      : op_(op), mode_(mode), flags_(flags) {
+    use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
+    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+  }
+
+  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+
+ private:
+  Token::Value op_;
+  OverwriteMode mode_;
+  GenericBinaryFlags flags_;
+  bool use_sse3_;
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+           Token::String(op_),
+           static_cast<int>(mode_),
+           static_cast<int>(flags_));
+  }
+#endif
+
+  // Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 12> {};
+  class SSE3Bits: public BitField<bool, 14, 1> {};
+  class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+
+  Major MajorKey() { return GenericBinaryOp; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return OpBits::encode(op_)
+        | ModeBits::encode(mode_)
+        | FlagBits::encode(flags_)
+        | SSE3Bits::encode(use_sse3_);
+  }
+  void Generate(MacroAssembler* masm);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_CODEGEN_X64_H_
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
new file mode 100644
index 0000000..8df0ab7
--- /dev/null
+++ b/src/x64/cpu-x64.cc
@@ -0,0 +1,66 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for x64 independent of OS goes here.
+
+#include "v8.h"
+
+#include "cpu.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::Setup() {
+  CpuFeatures::Probe();
+}
+
+
+void CPU::FlushICache(void* start, size_t size) {
+  // No need to flush the instruction cache on Intel. On Intel instruction
+  // cache flushing is only necessary when multiple cores running the same
+  // code simultaneously. V8 (and JavaScript) is single threaded and when code
+  // is patched on an intel CPU the core performing the patching will have its
+  // own instruction cache updated automatically.
+
+  // If flushing of the instruction cache becomes necessary Windows has the
+  // API function FlushInstructionCache.
+}
+
+
+void CPU::DebugBreak() {
+#ifdef _MSC_VER
+  // To avoid Visual Studio runtime support the following code can be used
+  // instead
+  // __asm { int 3 }
+  __debugbreak();
+#else
+  asm("int $3");
+#endif
+}
+
+} }  // namespace v8::internal
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
new file mode 100644
index 0000000..10092c5
--- /dev/null
+++ b/src/x64/debug-x64.cc
@@ -0,0 +1,204 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  // 11th byte of patch is 0x49 (REX.WB byte of computed jump/call to r10),
+  // 11th byte of JS return is 0xCC (int3).
+  ASSERT(*(rinfo->pc() + 10) == 0x49 || *(rinfo->pc() + 10) == 0xCC);
+  return (*(rinfo->pc() + 10) != 0xCC);
+}
+
+#define __ ACCESS_MASM(masm)
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+                                          RegList pointer_regs,
+                                          bool convert_call_to_jmp) {
+  // Save the content of all general purpose registers in memory. This copy in
+  // memory is later pushed onto the JS expression stack for the fake JS frame
+  // generated and also to the C frame generated on top of that. In the JS
+  // frame ONLY the registers containing pointers will be pushed on the
+  // expression stack. This causes the GC to update these pointers so that
+  // they will have the correct value when returning from the debugger.
+  __ SaveRegistersToMemory(kJSCallerSaved);
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Store the registers containing object pointers on the expression stack to
+  // make sure that these are correctly updated during GC.
+  __ PushRegistersFromMemory(pointer_regs);
+
+#ifdef DEBUG
+  __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+  __ xor_(rax, rax);  // No arguments (argc == 0).
+  __ movq(rbx, ExternalReference::debug_break());
+
+  CEntryDebugBreakStub ceb;
+  __ CallStub(&ceb);
+
+  // Restore the register values containing object pointers from the expression
+  // stack in the reverse order as they where pushed.
+  __ PopRegistersToMemory(pointer_regs);
+
+  // Get rid of the internal frame.
+  __ LeaveInternalFrame();
+
+  // If this call did not replace a call but patched other code then there will
+  // be an unwanted return address left on the stack. Here we get rid of that.
+  if (convert_call_to_jmp) {
+    __ pop(rax);
+  }
+
+  // Finally restore all registers.
+  __ RestoreRegistersFromMemory(kJSCallerSaved);
+
+  // Now that the break point has been handled, resume normal execution by
+  // jumping to the target address intended by the caller and that was
+  // overwritten by the address of DebugBreakXXX.
+  ExternalReference after_break_target =
+      ExternalReference(Debug_Address::AfterBreakTarget());
+  __ movq(kScratchRegister, after_break_target);
+  __ jmp(Operand(kScratchRegister, 0));
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+  // Register state for keyed IC call call (from ic-x64.cc)
+  // ----------- S t a t e -------------
+  //  -- rax: number of arguments
+  // -----------------------------------
+  // The number of arguments in rax is not smi encoded.
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+  // Register state just before return from JS function (from codegen-x64.cc).
+  // rax is the actual number of arguments not encoded as a smi, see comment
+  // above IC call.
+  // ----------- S t a t e -------------
+  //  -- rax: number of arguments
+  // -----------------------------------
+  // The number of arguments in rax is not smi encoded.
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+  // Register state for keyed IC load call (from ic-x64.cc).
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+  // Register state for keyed IC load call (from ic-x64.cc).
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  // -----------------------------------
+  // Register rax contains an object that needs to be pushed on the
+  // expression stack of the fake JS frame.
+  Generate_DebugBreakCallHelper(masm, rax.bit(), false);
+}
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+  // Register state for IC load call (from ic-x64.cc).
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, rcx.bit(), false);
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+  // Register state just before return from JS function (from codegen-x64.cc).
+  // ----------- S t a t e -------------
+  //  -- rax: return value
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, rax.bit(), true);
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+  // REgister state for IC store call (from ic-x64.cc).
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), false);
+}
+
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+  // Register state for stub CallFunction (from CallFunctionStub in ic-x64.cc).
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0, false);
+}
+
+
+#undef __
+
+
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Debug::kX64JSReturnSequenceLength);
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtReturn()  {
+  return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn()  {
+  ASSERT(Debug::kX64JSReturnSequenceLength >= Debug::kX64CallInstructionLength);
+  rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
+      Debug::kX64JSReturnSequenceLength - Debug::kX64CallInstructionLength);
+}
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+} }  // namespace v8::internal
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
new file mode 100644
index 0000000..d8d6dbb
--- /dev/null
+++ b/src/x64/disasm-x64.cc
@@ -0,0 +1,1591 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "v8.h"
+#include "disasm.h"
+
+namespace disasm {
+
+enum OperandType {
+  UNSET_OP_ORDER = 0,
+  // Operand size decides between 16, 32 and 64 bit operands.
+  REG_OPER_OP_ORDER = 1,  // Register destination, operand source.
+  OPER_REG_OP_ORDER = 2,  // Operand destination, register source.
+  // Fixed 8-bit operands.
+  BYTE_SIZE_OPERAND_FLAG = 4,
+  BYTE_REG_OPER_OP_ORDER = REG_OPER_OP_ORDER | BYTE_SIZE_OPERAND_FLAG,
+  BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG
+};
+
+//------------------------------------------------------------------
+// Tables
+//------------------------------------------------------------------
+struct ByteMnemonic {
+  int b;  // -1 terminates, otherwise must be in range (0..255)
+  OperandType op_order_;
+  const char* mnem;
+};
+
+
+static ByteMnemonic two_operands_instr[] = {
+  { 0x00, BYTE_OPER_REG_OP_ORDER, "add" },
+  { 0x01, OPER_REG_OP_ORDER,      "add" },
+  { 0x02, BYTE_REG_OPER_OP_ORDER, "add" },
+  { 0x03, REG_OPER_OP_ORDER,      "add" },
+  { 0x08, BYTE_OPER_REG_OP_ORDER, "or" },
+  { 0x09, OPER_REG_OP_ORDER,      "or" },
+  { 0x0A, BYTE_REG_OPER_OP_ORDER, "or" },
+  { 0x0B, REG_OPER_OP_ORDER,      "or" },
+  { 0x10, BYTE_OPER_REG_OP_ORDER, "adc" },
+  { 0x11, OPER_REG_OP_ORDER,      "adc" },
+  { 0x12, BYTE_REG_OPER_OP_ORDER, "adc" },
+  { 0x13, REG_OPER_OP_ORDER,      "adc" },
+  { 0x18, BYTE_OPER_REG_OP_ORDER, "sbb" },
+  { 0x19, OPER_REG_OP_ORDER,      "sbb" },
+  { 0x1A, BYTE_REG_OPER_OP_ORDER, "sbb" },
+  { 0x1B, REG_OPER_OP_ORDER,      "sbb" },
+  { 0x20, BYTE_OPER_REG_OP_ORDER, "and" },
+  { 0x21, OPER_REG_OP_ORDER,      "and" },
+  { 0x22, BYTE_REG_OPER_OP_ORDER, "and" },
+  { 0x23, REG_OPER_OP_ORDER,      "and" },
+  { 0x28, BYTE_OPER_REG_OP_ORDER, "sub" },
+  { 0x29, OPER_REG_OP_ORDER,      "sub" },
+  { 0x2A, BYTE_REG_OPER_OP_ORDER, "sub" },
+  { 0x2B, REG_OPER_OP_ORDER,      "sub" },
+  { 0x30, BYTE_OPER_REG_OP_ORDER, "xor" },
+  { 0x31, OPER_REG_OP_ORDER,      "xor" },
+  { 0x32, BYTE_REG_OPER_OP_ORDER, "xor" },
+  { 0x33, REG_OPER_OP_ORDER,      "xor" },
+  { 0x38, BYTE_OPER_REG_OP_ORDER, "cmp" },
+  { 0x39, OPER_REG_OP_ORDER,      "cmp" },
+  { 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
+  { 0x3B, REG_OPER_OP_ORDER,      "cmp" },
+  { 0x63, REG_OPER_OP_ORDER,      "movsxlq" },
+  { 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
+  { 0x85, REG_OPER_OP_ORDER,      "test" },
+  { 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
+  { 0x87, REG_OPER_OP_ORDER,      "xchg" },
+  { 0x88, BYTE_OPER_REG_OP_ORDER, "mov" },
+  { 0x89, OPER_REG_OP_ORDER,      "mov" },
+  { 0x8A, BYTE_REG_OPER_OP_ORDER, "mov" },
+  { 0x8B, REG_OPER_OP_ORDER,      "mov" },
+  { 0x8D, REG_OPER_OP_ORDER,      "lea" },
+  { -1, UNSET_OP_ORDER, "" }
+};
+
+
+static ByteMnemonic zero_operands_instr[] = {
+  { 0xC3, UNSET_OP_ORDER, "ret" },
+  { 0xC9, UNSET_OP_ORDER, "leave" },
+  { 0xF4, UNSET_OP_ORDER, "hlt" },
+  { 0xCC, UNSET_OP_ORDER, "int3" },
+  { 0x60, UNSET_OP_ORDER, "pushad" },
+  { 0x61, UNSET_OP_ORDER, "popad" },
+  { 0x9C, UNSET_OP_ORDER, "pushfd" },
+  { 0x9D, UNSET_OP_ORDER, "popfd" },
+  { 0x9E, UNSET_OP_ORDER, "sahf" },
+  { 0x99, UNSET_OP_ORDER, "cdq" },
+  { 0x9B, UNSET_OP_ORDER, "fwait" },
+  { -1, UNSET_OP_ORDER, "" }
+};
+
+
+static ByteMnemonic call_jump_instr[] = {
+  { 0xE8, UNSET_OP_ORDER, "call" },
+  { 0xE9, UNSET_OP_ORDER, "jmp" },
+  { -1, UNSET_OP_ORDER, "" }
+};
+
+
+static ByteMnemonic short_immediate_instr[] = {
+  { 0x05, UNSET_OP_ORDER, "add" },
+  { 0x0D, UNSET_OP_ORDER, "or" },
+  { 0x15, UNSET_OP_ORDER, "adc" },
+  { 0x1D, UNSET_OP_ORDER, "sbb" },
+  { 0x25, UNSET_OP_ORDER, "and" },
+  { 0x2D, UNSET_OP_ORDER, "sub" },
+  { 0x35, UNSET_OP_ORDER, "xor" },
+  { 0x3D, UNSET_OP_ORDER, "cmp" },
+  { -1, UNSET_OP_ORDER, "" }
+};
+
+
+static const char* conditional_code_suffix[] = {
+  "o", "no", "c", "nc", "z", "nz", "na", "a",
+  "s", "ns", "pe", "po", "l", "ge", "le", "g"
+};
+
+
+enum InstructionType {
+  NO_INSTR,
+  ZERO_OPERANDS_INSTR,
+  TWO_OPERANDS_INSTR,
+  JUMP_CONDITIONAL_SHORT_INSTR,
+  REGISTER_INSTR,
+  PUSHPOP_INSTR,  // Has implicit 64-bit operand size.
+  MOVE_REG_INSTR,
+  CALL_JUMP_INSTR,
+  SHORT_IMMEDIATE_INSTR
+};
+
+
+struct InstructionDesc {
+  const char* mnem;
+  InstructionType type;
+  OperandType op_order_;
+  bool byte_size_operation;  // Fixed 8-bit operation.
+};
+
+
+class InstructionTable {
+ public:
+  InstructionTable();
+  const InstructionDesc& Get(byte x) const {
+    return instructions_[x];
+  }
+
+ private:
+  InstructionDesc instructions_[256];
+  void Clear();
+  void Init();
+  void CopyTable(ByteMnemonic bm[], InstructionType type);
+  void SetTableRange(InstructionType type, byte start, byte end, bool byte_size,
+                     const char* mnem);
+  void AddJumpConditionalShort();
+};
+
+
+InstructionTable::InstructionTable() {
+  Clear();
+  Init();
+}
+
+
+void InstructionTable::Clear() {
+  for (int i = 0; i < 256; i++) {
+    instructions_[i].mnem = "(bad)";
+    instructions_[i].type = NO_INSTR;
+    instructions_[i].op_order_ = UNSET_OP_ORDER;
+    instructions_[i].byte_size_operation = false;
+  }
+}
+
+
+void InstructionTable::Init() {
+  CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
+  CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
+  CopyTable(call_jump_instr, CALL_JUMP_INSTR);
+  CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
+  AddJumpConditionalShort();
+  SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, false, "push");
+  SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, false, "pop");
+  SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, false, "mov");
+}
+
+
+void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
+  for (int i = 0; bm[i].b >= 0; i++) {
+    InstructionDesc* id = &instructions_[bm[i].b];
+    id->mnem = bm[i].mnem;
+    OperandType op_order = bm[i].op_order_;
+    id->op_order_ =
+        static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG);
+    assert(id->type == NO_INSTR);  // Information not already entered
+    id->type = type;
+    id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0);
+  }
+}
+
+
+void InstructionTable::SetTableRange(InstructionType type,
+                                     byte start,
+                                     byte end,
+                                     bool byte_size,
+                                     const char* mnem) {
+  for (byte b = start; b <= end; b++) {
+    InstructionDesc* id = &instructions_[b];
+    assert(id->type == NO_INSTR);  // Information already entered
+    id->mnem = mnem;
+    id->type = type;
+    id->byte_size_operation = byte_size;
+  }
+}
+
+
+void InstructionTable::AddJumpConditionalShort() {
+  for (byte b = 0x70; b <= 0x7F; b++) {
+    InstructionDesc* id = &instructions_[b];
+    assert(id->type == NO_INSTR);  // Information already entered
+    id->mnem = NULL;  // Computed depending on condition code.
+    id->type = JUMP_CONDITIONAL_SHORT_INSTR;
+  }
+}
+
+
+static InstructionTable instruction_table;
+
+static InstructionDesc cmov_instructions[16] = {
+  {"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovnc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovnz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovna", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmova", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovs", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovns", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovpe", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovpo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovl", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovge", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+  {"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}
+};
+
+//------------------------------------------------------------------------------
+// DisassemblerX64 implementation.
+
+enum UnimplementedOpcodeAction {
+  CONTINUE_ON_UNIMPLEMENTED_OPCODE,
+  ABORT_ON_UNIMPLEMENTED_OPCODE
+};
+
+// A new DisassemblerX64 object is created to disassemble each instruction.
+// The object can only disassemble a single instruction.
+class DisassemblerX64 {
+ public:
+  DisassemblerX64(const NameConverter& converter,
+                  UnimplementedOpcodeAction unimplemented_action =
+                      ABORT_ON_UNIMPLEMENTED_OPCODE)
+      : converter_(converter),
+        tmp_buffer_pos_(0),
+        abort_on_unimplemented_(
+            unimplemented_action == ABORT_ON_UNIMPLEMENTED_OPCODE),
+        rex_(0),
+        operand_size_(0),
+        group_1_prefix_(0),
+        byte_size_operand_(false) {
+    tmp_buffer_[0] = '\0';
+  }
+
+  virtual ~DisassemblerX64() {
+  }
+
+  // Writes one disassembled instruction into 'buffer' (0-terminated).
+  // Returns the length of the disassembled machine instruction in bytes.
+  int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
+
+ private:
+  enum OperandSize {
+    BYTE_SIZE = 0,
+    WORD_SIZE = 1,
+    DOUBLEWORD_SIZE = 2,
+    QUADWORD_SIZE = 3
+  };
+
+  const NameConverter& converter_;
+  v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
+  unsigned int tmp_buffer_pos_;
+  bool abort_on_unimplemented_;
+  // Prefixes parsed
+  byte rex_;
+  byte operand_size_;  // 0x66 or (if no group 3 prefix is present) 0x0.
+  byte group_1_prefix_;  // 0xF2, 0xF3, or (if no group 1 prefix is present) 0.
+  // Byte size operand override.
+  bool byte_size_operand_;
+
+  void setRex(byte rex) {
+    ASSERT_EQ(0x40, rex & 0xF0);
+    rex_ = rex;
+  }
+
+  bool rex() { return rex_ != 0; }
+
+  bool rex_b() { return (rex_ & 0x01) != 0; }
+
+  // Actual number of base register given the low bits and the rex.b state.
+  int base_reg(int low_bits) { return low_bits | ((rex_ & 0x01) << 3); }
+
+  bool rex_x() { return (rex_ & 0x02) != 0; }
+
+  bool rex_r() { return (rex_ & 0x04) != 0; }
+
+  bool rex_w() { return (rex_ & 0x08) != 0; }
+
+  OperandSize operand_size() {
+    if (byte_size_operand_) return BYTE_SIZE;
+    if (rex_w()) return QUADWORD_SIZE;
+    if (operand_size_ != 0) return WORD_SIZE;
+    return DOUBLEWORD_SIZE;
+  }
+
+  char operand_size_code() {
+    return "bwlq"[operand_size()];
+  }
+
+  const char* NameOfCPURegister(int reg) const {
+    return converter_.NameOfCPURegister(reg);
+  }
+
+  const char* NameOfByteCPURegister(int reg) const {
+    return converter_.NameOfByteCPURegister(reg);
+  }
+
+  const char* NameOfXMMRegister(int reg) const {
+    return converter_.NameOfXMMRegister(reg);
+  }
+
+  const char* NameOfAddress(byte* addr) const {
+    return converter_.NameOfAddress(addr);
+  }
+
+  // Disassembler helper functions.
+  void get_modrm(byte data,
+                 int* mod,
+                 int* regop,
+                 int* rm) {
+    *mod = (data >> 6) & 3;
+    *regop = ((data & 0x38) >> 3) | (rex_r() ? 8 : 0);
+    *rm = (data & 7) | (rex_b() ? 8 : 0);
+  }
+
+  void get_sib(byte data,
+               int* scale,
+               int* index,
+               int* base) {
+    *scale = (data >> 6) & 3;
+    *index = ((data >> 3) & 7) | (rex_x() ? 8 : 0);
+    *base = (data & 7) | (rex_b() ? 8 : 0);
+  }
+
+  typedef const char* (DisassemblerX64::*RegisterNameMapping)(int reg) const;
+
+  int PrintRightOperandHelper(byte* modrmp,
+                              RegisterNameMapping register_name);
+  int PrintRightOperand(byte* modrmp);
+  int PrintRightByteOperand(byte* modrmp);
+  int PrintOperands(const char* mnem,
+                    OperandType op_order,
+                    byte* data);
+  int PrintImmediate(byte* data, OperandSize size);
+  int PrintImmediateOp(byte* data);
+  const char* TwoByteMnemonic(byte opcode);
+  int TwoByteOpcodeInstruction(byte* data);
+  int F7Instruction(byte* data);
+  int ShiftInstruction(byte* data);
+  int JumpShort(byte* data);
+  int JumpConditional(byte* data);
+  int JumpConditionalShort(byte* data);
+  int SetCC(byte* data);
+  int FPUInstruction(byte* data);
+  void AppendToBuffer(const char* format, ...);
+
+  void UnimplementedInstruction() {
+    if (abort_on_unimplemented_) {
+      CHECK(false);
+    } else {
+      AppendToBuffer("'Unimplemented Instruction'");
+    }
+  }
+};
+
+
+void DisassemblerX64::AppendToBuffer(const char* format, ...) {
+  v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
+  va_list args;
+  va_start(args, format);
+  int result = v8::internal::OS::VSNPrintF(buf, format, args);
+  va_end(args);
+  tmp_buffer_pos_ += result;
+}
+
+
+int DisassemblerX64::PrintRightOperandHelper(
+    byte* modrmp,
+    RegisterNameMapping register_name) {
+  int mod, regop, rm;
+  get_modrm(*modrmp, &mod, &regop, &rm);
+  switch (mod) {
+    case 0:
+      if ((rm & 7) == 5) {
+        int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1);
+        AppendToBuffer("[0x%x]", disp);
+        return 5;
+      } else if ((rm & 7) == 4) {
+        // Codes for SIB byte.
+        byte sib = *(modrmp + 1);
+        int scale, index, base;
+        get_sib(sib, &scale, &index, &base);
+        if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
+          // index == rsp means no index. Only use sib byte with no index for
+          // rsp and r12 base.
+          AppendToBuffer("[%s]", (this->*register_name)(base));
+          return 2;
+        } else if (base == 5) {
+          // base == rbp means no base register (when mod == 0).
+          int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
+          AppendToBuffer("[%s*%d+0x%x]",
+                         (this->*register_name)(index),
+                         1 << scale, disp);
+          return 6;
+        } else if (index != 4 && base != 5) {
+          // [base+index*scale]
+          AppendToBuffer("[%s+%s*%d]",
+                         (this->*register_name)(base),
+                         (this->*register_name)(index),
+                         1 << scale);
+          return 2;
+        } else {
+          UnimplementedInstruction();
+          return 1;
+        }
+      } else {
+        AppendToBuffer("[%s]", (this->*register_name)(rm));
+        return 1;
+      }
+      break;
+    case 1:  // fall through
+    case 2:
+      if ((rm & 7) == 4) {
+        byte sib = *(modrmp + 1);
+        int scale, index, base;
+        get_sib(sib, &scale, &index, &base);
+        int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
+                              : *reinterpret_cast<char*>(modrmp + 2);
+        if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
+          if (-disp > 0) {
+            AppendToBuffer("[%s-0x%x]", (this->*register_name)(base), -disp);
+          } else {
+            AppendToBuffer("[%s+0x%x]", (this->*register_name)(base), disp);
+          }
+        } else {
+          if (-disp > 0) {
+            AppendToBuffer("[%s+%s*%d-0x%x]",
+                           (this->*register_name)(base),
+                           (this->*register_name)(index),
+                           1 << scale,
+                           -disp);
+          } else {
+            AppendToBuffer("[%s+%s*%d+0x%x]",
+                           (this->*register_name)(base),
+                           (this->*register_name)(index),
+                           1 << scale,
+                           disp);
+          }
+        }
+        return mod == 2 ? 6 : 3;
+      } else {
+        // No sib.
+        int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
+                              : *reinterpret_cast<char*>(modrmp + 1);
+        if (-disp > 0) {
+        AppendToBuffer("[%s-0x%x]", (this->*register_name)(rm), -disp);
+        } else {
+        AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+        }
+        return (mod == 2) ? 5 : 2;
+      }
+      break;
+    case 3:
+      AppendToBuffer("%s", (this->*register_name)(rm));
+      return 1;
+    default:
+      UnimplementedInstruction();
+      return 1;
+  }
+  UNREACHABLE();
+}
+
+
+int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
+  int64_t value;
+  int count;
+  switch (size) {
+    case BYTE_SIZE:
+      value = *data;
+      count = 1;
+      break;
+    case WORD_SIZE:
+      value = *reinterpret_cast<int16_t*>(data);
+      count = 2;
+      break;
+    case DOUBLEWORD_SIZE:
+      value = *reinterpret_cast<uint32_t*>(data);
+      count = 4;
+      break;
+    case QUADWORD_SIZE:
+      value = *reinterpret_cast<int32_t*>(data);
+      count = 4;
+      break;
+    default:
+      UNREACHABLE();
+      value = 0;  // Initialize variables on all paths to satisfy the compiler.
+      count = 0;
+  }
+  AppendToBuffer("%" V8_PTR_PREFIX "x", value);
+  return count;
+}
+
+
+int DisassemblerX64::PrintRightOperand(byte* modrmp) {
+  return PrintRightOperandHelper(modrmp,
+                                 &DisassemblerX64::NameOfCPURegister);
+}
+
+
+int DisassemblerX64::PrintRightByteOperand(byte* modrmp) {
+  return PrintRightOperandHelper(modrmp,
+                                 &DisassemblerX64::NameOfByteCPURegister);
+}
+
+
+// Returns number of bytes used including the current *data.
+// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
+int DisassemblerX64::PrintOperands(const char* mnem,
+                                   OperandType op_order,
+                                   byte* data) {
+  byte modrm = *data;
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  int advance = 0;
+  const char* register_name =
+      byte_size_operand_ ? NameOfByteCPURegister(regop)
+                         : NameOfCPURegister(regop);
+  switch (op_order) {
+    case REG_OPER_OP_ORDER: {
+      AppendToBuffer("%s%c %s,",
+                     mnem,
+                     operand_size_code(),
+                     register_name);
+      advance = byte_size_operand_ ? PrintRightByteOperand(data)
+                                   : PrintRightOperand(data);
+      break;
+    }
+    case OPER_REG_OP_ORDER: {
+      AppendToBuffer("%s%c ", mnem, operand_size_code());
+      advance = byte_size_operand_ ? PrintRightByteOperand(data)
+                                   : PrintRightOperand(data);
+      AppendToBuffer(",%s", register_name);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return advance;
+}
+
+
+// Returns number of bytes used by machine instruction, including *data byte.
+// Writes immediate instructions to 'tmp_buffer_'.
+int DisassemblerX64::PrintImmediateOp(byte* data) {
+  bool byte_size_immediate = (*data & 0x02) != 0;
+  byte modrm = *(data + 1);
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  const char* mnem = "Imm???";
+  switch (regop) {
+    case 0:
+      mnem = "add";
+      break;
+    case 1:
+      mnem = "or";
+      break;
+    case 2:
+      mnem = "adc";
+      break;
+    case 4:
+      mnem = "and";
+      break;
+    case 5:
+      mnem = "sub";
+      break;
+    case 6:
+      mnem = "xor";
+      break;
+    case 7:
+      mnem = "cmp";
+      break;
+    default:
+      UnimplementedInstruction();
+  }
+  AppendToBuffer("%s%c ", mnem, operand_size_code());
+  int count = PrintRightOperand(data + 1);
+  AppendToBuffer(",0x");
+  OperandSize immediate_size = byte_size_immediate ? BYTE_SIZE : operand_size();
+  count += PrintImmediate(data + 1 + count, immediate_size);
+  return 1 + count;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::F7Instruction(byte* data) {
+  assert(*data == 0xF7);
+  byte modrm = *(data + 1);
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  if (mod == 3 && regop != 0) {
+    const char* mnem = NULL;
+    switch (regop) {
+      case 2:
+        mnem = "not";
+        break;
+      case 3:
+        mnem = "neg";
+        break;
+      case 4:
+        mnem = "mul";
+        break;
+      case 7:
+        mnem = "idiv";
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+    AppendToBuffer("%s%c %s",
+                   mnem,
+                   operand_size_code(),
+                   NameOfCPURegister(rm));
+    return 2;
+  } else if (mod == 3 && regop == 0) {
+    int32_t imm = *reinterpret_cast<int32_t*>(data + 2);
+    AppendToBuffer("test%c %s,0x%x",
+                   operand_size_code(),
+                   NameOfCPURegister(rm),
+                   imm);
+    return 6;
+  } else if (regop == 0) {
+    AppendToBuffer("test%c ", operand_size_code());
+    int count = PrintRightOperand(data + 1);
+    int32_t imm = *reinterpret_cast<int32_t*>(data + 1 + count);
+    AppendToBuffer(",0x%x", imm);
+    return 1 + count + 4 /*int32_t*/;
+  } else {
+    UnimplementedInstruction();
+    return 2;
+  }
+}
+
+
+int DisassemblerX64::ShiftInstruction(byte* data) {
+  byte op = *data & (~1);
+  if (op != 0xD0 && op != 0xD2 && op != 0xC0) {
+    UnimplementedInstruction();
+    return 1;
+  }
+  byte modrm = *(data + 1);
+  int mod, regop, rm;
+  get_modrm(modrm, &mod, &regop, &rm);
+  regop &= 0x7;  // The REX.R bit does not affect the operation.
+  int imm8 = -1;
+  int num_bytes = 2;
+  if (mod != 3) {
+    UnimplementedInstruction();
+    return num_bytes;
+  }
+  const char* mnem = NULL;
+  switch (regop) {
+    case 0:
+      mnem = "rol";
+      break;
+    case 1:
+      mnem = "ror";
+      break;
+    case 2:
+      mnem = "rcl";
+      break;
+    case 3:
+      mnem = "rcr";
+      break;
+    case 4:
+      mnem = "shl";
+      break;
+    case 5:
+      mnem = "shr";
+      break;
+    case 7:
+      mnem = "sar";
+      break;
+    default:
+      UnimplementedInstruction();
+      return num_bytes;
+  }
+  assert(mnem != NULL);
+  if (op == 0xD0) {
+    imm8 = 1;
+  } else if (op == 0xC0) {
+    imm8 = *(data + 2);
+    num_bytes = 3;
+  }
+  AppendToBuffer("%s%c %s,",
+                 mnem,
+                 operand_size_code(),
+                 byte_size_operand_ ? NameOfByteCPURegister(rm)
+                                    : NameOfCPURegister(rm));
+  if (op == 0xD2) {
+    AppendToBuffer("cl");
+  } else {
+    AppendToBuffer("%d", imm8);
+  }
+  return num_bytes;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::JumpShort(byte* data) {
+  assert(*data == 0xEB);
+  byte b = *(data + 1);
+  byte* dest = data + static_cast<int8_t>(b) + 2;
+  AppendToBuffer("jmp %s", NameOfAddress(dest));
+  return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::JumpConditional(byte* data) {
+  assert(*data == 0x0F);
+  byte cond = *(data + 1) & 0x0F;
+  byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
+  const char* mnem = conditional_code_suffix[cond];
+  AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
+  return 6;  // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::JumpConditionalShort(byte* data) {
+  byte cond = *data & 0x0F;
+  byte b = *(data + 1);
+  byte* dest = data + static_cast<int8_t>(b) + 2;
+  const char* mnem = conditional_code_suffix[cond];
+  AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
+  return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::SetCC(byte* data) {
+  assert(*data == 0x0F);
+  byte cond = *(data + 1) & 0x0F;
+  const char* mnem = conditional_code_suffix[cond];
+  AppendToBuffer("set%s%c ", mnem, operand_size_code());
+  PrintRightByteOperand(data + 2);
+  return 3;  // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::FPUInstruction(byte* data) {
+  byte b1 = *data;
+  byte b2 = *(data + 1);
+  if (b1 == 0xD9) {
+    const char* mnem = NULL;
+    switch (b2) {
+      case 0xE0:
+        mnem = "fchs";
+        break;
+      case 0xE1:
+        mnem = "fabs";
+        break;
+      case 0xE4:
+        mnem = "ftst";
+        break;
+      case 0xF5:
+        mnem = "fprem1";
+        break;
+      case 0xF7:
+        mnem = "fincstp";
+        break;
+      case 0xE8:
+        mnem = "fld1";
+        break;
+      case 0xEE:
+        mnem = "fldz";
+        break;
+      case 0xF8:
+        mnem = "fprem";
+        break;
+    }
+    if (mnem != NULL) {
+      AppendToBuffer("%s", mnem);
+      return 2;
+    } else if ((b2 & 0xF8) == 0xC8) {
+      AppendToBuffer("fxch st%d", b2 & 0x7);
+      return 2;
+    } else {
+      int mod, regop, rm;
+      get_modrm(*(data + 1), &mod, &regop, &rm);
+      const char* mnem = "?";
+      switch (regop) {
+        case 0:
+          mnem = "fld_s";
+          break;
+        case 3:
+          mnem = "fstp_s";
+          break;
+        default:
+          UnimplementedInstruction();
+      }
+      AppendToBuffer("%s ", mnem);
+      int count = PrintRightOperand(data + 1);
+      return count + 1;
+    }
+  } else if (b1 == 0xDD) {
+    if ((b2 & 0xF8) == 0xC0) {
+      AppendToBuffer("ffree st%d", b2 & 0x7);
+      return 2;
+    } else {
+      int mod, regop, rm;
+      get_modrm(*(data + 1), &mod, &regop, &rm);
+      const char* mnem = "?";
+      switch (regop) {
+        case 0:
+          mnem = "fld_d";
+          break;
+        case 3:
+          mnem = "fstp_d";
+          break;
+        default:
+          UnimplementedInstruction();
+      }
+      AppendToBuffer("%s ", mnem);
+      int count = PrintRightOperand(data + 1);
+      return count + 1;
+    }
+  } else if (b1 == 0xDB) {
+    int mod, regop, rm;
+    get_modrm(*(data + 1), &mod, &regop, &rm);
+    const char* mnem = "?";
+    switch (regop) {
+      case 0:
+        mnem = "fild_s";
+        break;
+      case 2:
+        mnem = "fist_s";
+        break;
+      case 3:
+        mnem = "fistp_s";
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+    AppendToBuffer("%s ", mnem);
+    int count = PrintRightOperand(data + 1);
+    return count + 1;
+  } else if (b1 == 0xDF) {
+    if (b2 == 0xE0) {
+      AppendToBuffer("fnstsw_ax");
+      return 2;
+    }
+    int mod, regop, rm;
+    get_modrm(*(data + 1), &mod, &regop, &rm);
+    const char* mnem = "?";
+    switch (regop) {
+      case 5:
+        mnem = "fild_d";
+        break;
+      case 7:
+        mnem = "fistp_d";
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+    AppendToBuffer("%s ", mnem);
+    int count = PrintRightOperand(data + 1);
+    return count + 1;
+  } else if (b1 == 0xDC || b1 == 0xDE) {
+    bool is_pop = (b1 == 0xDE);
+    if (is_pop && b2 == 0xD9) {
+      AppendToBuffer("fcompp");
+      return 2;
+    }
+    const char* mnem = "FP0xDC";
+    switch (b2 & 0xF8) {
+      case 0xC0:
+        mnem = "fadd";
+        break;
+      case 0xE8:
+        mnem = "fsub";
+        break;
+      case 0xC8:
+        mnem = "fmul";
+        break;
+      case 0xF8:
+        mnem = "fdiv";
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+    AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
+    return 2;
+  } else if (b1 == 0xDA && b2 == 0xE9) {
+    const char* mnem = "fucompp";
+    AppendToBuffer("%s", mnem);
+    return 2;
+  }
+  AppendToBuffer("Unknown FP instruction");
+  return 2;
+}
+
+
+// Handle all two-byte opcodes, which start with 0x0F.
+// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
+// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
+int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
+  byte opcode = *(data + 1);
+  byte* current = data + 2;
+  // At return, "current" points to the start of the next instruction.
+  const char* mnemonic = TwoByteMnemonic(opcode);
+  if (opcode == 0x1F) {
+    // NOP
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    current++;
+    if (regop == 4) {  // SIB byte present.
+      current++;
+    }
+    if (mod == 1) {  // Byte displacement.
+      current += 1;
+    } else if (mod == 2) {  // 32-bit displacement.
+      current += 4;
+    }  // else no immediate displacement.
+    AppendToBuffer("nop");
+
+  } else  if (opcode == 0xA2 || opcode == 0x31) {
+    // RDTSC or CPUID
+    AppendToBuffer("%s", mnemonic);
+
+  } else if ((opcode & 0xF0) == 0x40) {
+    // CMOVcc: conditional move.
+    int condition = opcode & 0x0F;
+    const InstructionDesc& idesc = cmov_instructions[condition];
+    byte_size_operand_ = idesc.byte_size_operation;
+    current += PrintOperands(idesc.mnem, idesc.op_order_, current);
+
+  } else if ((opcode & 0xF0) == 0x80) {
+    // Jcc: Conditional jump (branch).
+    current = data + JumpConditional(data);
+
+  } else if (opcode == 0xBE || opcode == 0xBF || opcode == 0xB6 ||
+             opcode == 0xB7 || opcode == 0xAF) {
+    // Size-extending moves, IMUL.
+    current += PrintOperands(mnemonic, REG_OPER_OP_ORDER, current);
+
+  } else if ((opcode & 0xF0) == 0x90) {
+    // SETcc: Set byte on condition. Needs pointer to beginning of instruction.
+    current = data + SetCC(data);
+
+  } else if (opcode == 0xAB || opcode == 0xA5 || opcode == 0xAD) {
+    // SHLD, SHRD (double-precision shift), BTS (bit set).
+    AppendToBuffer("%s ", mnemonic);
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    current += PrintRightOperand(current);
+    if (opcode == 0xAB) {
+      AppendToBuffer(",%s", NameOfCPURegister(regop));
+    } else {
+      AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
+    }
+  } else if (group_1_prefix_ == 0xF2) {
+    // Beginning of instructions with prefix 0xF2.
+
+    if (opcode == 0x11 || opcode == 0x10) {
+      // MOVSD: Move scalar double-precision fp to/from/between XMM registers.
+      AppendToBuffer("movsd ");
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      if (opcode == 0x11) {
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s", NameOfXMMRegister(regop));
+      } else {
+        AppendToBuffer("%s,", NameOfXMMRegister(regop));
+        current += PrintRightOperand(current);
+      }
+    } else if (opcode == 0x2A) {
+      // CVTSI2SD: integer to XMM double conversion.
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+      data += PrintRightOperand(data);
+    } else if ((opcode & 0xF8) == 0x58) {
+      // XMM arithmetic. Mnemonic was retrieved at the start of this function.
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("%s %s,%s", mnemonic, NameOfXMMRegister(regop),
+                     NameOfXMMRegister(rm));
+    } else {
+      UnimplementedInstruction();
+    }
+  } else if (opcode == 0x2C && group_1_prefix_ == 0xF3) {
+    // Instruction with prefix 0xF3.
+
+    // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
+    // Assert that mod is not 3, so source is memory, not an XMM register.
+    ASSERT((*current & 0xC0) != 0xC0);
+    current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
+  } else {
+    UnimplementedInstruction();
+  }
+  return current - data;
+}
+
+
+// Mnemonics for two-byte opcode instructions starting with 0x0F.
+// The argument is the second byte of the two-byte opcode.
+// Returns NULL if the instruction is not handled here.
+const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
+  switch (opcode) {
+    case 0x1F:
+      return "nop";
+    case 0x2A:  // F2 prefix.
+      return "cvtsi2sd";
+    case 0x31:
+      return "rdtsc";
+    case 0x58:  // F2 prefix.
+      return "addsd";
+    case 0x59:  // F2 prefix.
+      return "mulsd";
+    case 0x5C:  // F2 prefix.
+      return "subsd";
+    case 0x5E:  // F2 prefix.
+      return "divsd";
+    case 0xA2:
+      return "cpuid";
+    case 0xA5:
+      return "shld";
+    case 0xAB:
+      return "bts";
+    case 0xAD:
+      return "shrd";
+    case 0xAF:
+      return "imul";
+    case 0xB6:
+      return "movzxb";
+    case 0xB7:
+      return "movzxw";
+    case 0xBE:
+      return "movsxb";
+    case 0xBF:
+      return "movsxw";
+    default:
+      return NULL;
+  }
+}
+
+
+// Disassembles the instruction at instr, and writes it into out_buffer.
+int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
+                                       byte* instr) {
+  tmp_buffer_pos_ = 0;  // starting to write as position 0
+  byte* data = instr;
+  bool processed = true;  // Will be set to false if the current instruction
+                          // is not in 'instructions' table.
+  byte current;
+
+  // Scan for prefixes.
+  while (true) {
+    current = *data;
+    if (current == 0x66) {  // Group 3 prefix.
+      operand_size_ = current;
+    } else if ((current & 0xF0) == 0x40) {  // REX prefix.
+      setRex(current);
+      if (rex_w()) AppendToBuffer("REX.W ");
+    } else if ((current & 0xFE) == 0xF2) {  // Group 1 prefix.
+      group_1_prefix_ = current;
+    } else {  // Not a prefix - an opcode.
+      break;
+    }
+    data++;
+  }
+
+  const InstructionDesc& idesc = instruction_table.Get(current);
+  byte_size_operand_ = idesc.byte_size_operation;
+  switch (idesc.type) {
+    case ZERO_OPERANDS_INSTR:
+      AppendToBuffer(idesc.mnem);
+      data++;
+      break;
+
+    case TWO_OPERANDS_INSTR:
+      data++;
+      data += PrintOperands(idesc.mnem, idesc.op_order_, data);
+      break;
+
+    case JUMP_CONDITIONAL_SHORT_INSTR:
+      data += JumpConditionalShort(data);
+      break;
+
+    case REGISTER_INSTR:
+      AppendToBuffer("%s%c %s",
+                     idesc.mnem,
+                     operand_size_code(),
+                     NameOfCPURegister(base_reg(current & 0x07)));
+      data++;
+      break;
+    case PUSHPOP_INSTR:
+      AppendToBuffer("%s %s",
+                     idesc.mnem,
+                     NameOfCPURegister(base_reg(current & 0x07)));
+      data++;
+      break;
+    case MOVE_REG_INSTR: {
+      byte* addr = NULL;
+      switch (operand_size()) {
+        case WORD_SIZE:
+          addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
+          data += 3;
+          break;
+        case DOUBLEWORD_SIZE:
+          addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+          data += 5;
+          break;
+        case QUADWORD_SIZE:
+          addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
+          data += 9;
+          break;
+        default:
+          UNREACHABLE();
+      }
+      AppendToBuffer("mov%c %s,%s",
+                     operand_size_code(),
+                     NameOfCPURegister(base_reg(current & 0x07)),
+                     NameOfAddress(addr));
+      break;
+    }
+
+    case CALL_JUMP_INSTR: {
+      byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
+      AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
+      data += 5;
+      break;
+    }
+
+    case SHORT_IMMEDIATE_INSTR: {
+      byte* addr =
+          reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+      AppendToBuffer("%s rax, %s", idesc.mnem, NameOfAddress(addr));
+      data += 5;
+      break;
+    }
+
+    case NO_INSTR:
+      processed = false;
+      break;
+
+    default:
+      UNIMPLEMENTED();  // This type is not implemented.
+  }
+
+  // The first byte didn't match any of the simple opcodes, so we
+  // need to do special processing on it.
+  if (!processed) {
+    switch (*data) {
+      case 0xC2:
+        AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data + 1));
+        data += 3;
+        break;
+
+      case 0x69:  // fall through
+      case 0x6B: {
+        int mod, regop, rm;
+        get_modrm(*(data + 1), &mod, &regop, &rm);
+        int32_t imm = *data == 0x6B ? *(data + 2)
+            : *reinterpret_cast<int32_t*>(data + 2);
+        AppendToBuffer("imul %s,%s,0x%x", NameOfCPURegister(regop),
+                       NameOfCPURegister(rm), imm);
+        data += 2 + (*data == 0x6B ? 1 : 4);
+        break;
+      }
+
+      case 0xF6: {
+        int mod, regop, rm;
+        get_modrm(*(data + 1), &mod, &regop, &rm);
+        if (mod == 3 && regop == 0) {
+          AppendToBuffer("testb %s,%d", NameOfCPURegister(rm), *(data + 2));
+        } else {
+          UnimplementedInstruction();
+        }
+        data += 3;
+        break;
+      }
+
+      case 0x81:  // fall through
+      case 0x83:  // 0x81 with sign extension bit set
+        data += PrintImmediateOp(data);
+        break;
+
+      case 0x0F:
+        data += TwoByteOpcodeInstruction(data);
+        break;
+
+      case 0x8F: {
+        data++;
+        int mod, regop, rm;
+        get_modrm(*data, &mod, &regop, &rm);
+        if (regop == 0) {
+          AppendToBuffer("pop ");
+          data += PrintRightOperand(data);
+        }
+      }
+        break;
+
+      case 0xFF: {
+        data++;
+        int mod, regop, rm;
+        get_modrm(*data, &mod, &regop, &rm);
+        const char* mnem = NULL;
+        switch (regop) {
+          case 0:
+            mnem = "inc";
+            break;
+          case 1:
+            mnem = "dec";
+            break;
+          case 2:
+            mnem = "call";
+            break;
+          case 4:
+            mnem = "jmp";
+            break;
+          case 6:
+            mnem = "push";
+            break;
+          default:
+            mnem = "???";
+        }
+        AppendToBuffer(((regop <= 1) ? "%s%c " : "%s "),
+                       mnem,
+                       operand_size_code());
+        data += PrintRightOperand(data);
+      }
+        break;
+
+      case 0xC7:  // imm32, fall through
+      case 0xC6:  // imm8
+      {
+        bool is_byte = *data == 0xC6;
+        data++;
+
+        AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code());
+        data += PrintRightOperand(data);
+        int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data);
+        AppendToBuffer(",0x%x", imm);
+        data += is_byte ? 1 : 4;
+      }
+        break;
+
+      case 0x80: {
+        data++;
+        AppendToBuffer("cmpb ");
+        data += PrintRightOperand(data);
+        int32_t imm = *data;
+        AppendToBuffer(",0x%x", imm);
+        data++;
+      }
+        break;
+
+      case 0x88:  // 8bit, fall through
+      case 0x89:  // 32bit
+      {
+        bool is_byte = *data == 0x88;
+        int mod, regop, rm;
+        data++;
+        get_modrm(*data, &mod, &regop, &rm);
+        AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code());
+        data += PrintRightOperand(data);
+        AppendToBuffer(",%s", NameOfCPURegister(regop));
+      }
+        break;
+
+      case 0x90:
+      case 0x91:
+      case 0x92:
+      case 0x93:
+      case 0x94:
+      case 0x95:
+      case 0x96:
+      case 0x97: {
+        int reg = (current & 0x7) | (rex_b() ? 8 : 0);
+        if (reg == 0) {
+          AppendToBuffer("nop");  // Common name for xchg rax,rax.
+        } else {
+          AppendToBuffer("xchg%c rax, %s",
+                         operand_size_code(),
+                         NameOfCPURegister(reg));
+        }
+      }
+
+
+      case 0xFE: {
+        data++;
+        int mod, regop, rm;
+        get_modrm(*data, &mod, &regop, &rm);
+        if (mod == 3 && regop == 1) {
+          AppendToBuffer("decb %s", NameOfCPURegister(rm));
+        } else {
+          UnimplementedInstruction();
+        }
+        data++;
+      }
+        break;
+
+      case 0x68:
+        AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
+        data += 5;
+        break;
+
+      case 0x6A:
+        AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+        data += 2;
+        break;
+
+      case 0xA1:  // Fall through.
+      case 0xA3:
+        switch (operand_size()) {
+          case DOUBLEWORD_SIZE: {
+            const char* memory_location = NameOfAddress(
+                reinterpret_cast<byte*>(
+                    *reinterpret_cast<int32_t*>(data + 1)));
+            if (*data == 0xA1) {  // Opcode 0xA1
+              AppendToBuffer("movzxlq rax,(%s)", memory_location);
+            } else {  // Opcode 0xA3
+              AppendToBuffer("movzxlq (%s),rax", memory_location);
+            }
+            data += 5;
+            break;
+          }
+          case QUADWORD_SIZE: {
+            // New x64 instruction mov rax,(imm_64).
+            const char* memory_location = NameOfAddress(
+                *reinterpret_cast<byte**>(data + 1));
+            if (*data == 0xA1) {  // Opcode 0xA1
+              AppendToBuffer("movq rax,(%s)", memory_location);
+            } else {  // Opcode 0xA3
+              AppendToBuffer("movq (%s),rax", memory_location);
+            }
+            data += 9;
+            break;
+          }
+          default:
+            UnimplementedInstruction();
+            data += 2;
+        }
+        break;
+
+      case 0xA8:
+        AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
+        data += 2;
+        break;
+
+      case 0xA9: {
+        int64_t value = 0;
+        switch (operand_size()) {
+          case WORD_SIZE:
+            value = *reinterpret_cast<uint16_t*>(data + 1);
+            data += 3;
+            break;
+          case DOUBLEWORD_SIZE:
+            value = *reinterpret_cast<uint32_t*>(data + 1);
+            data += 5;
+            break;
+          case QUADWORD_SIZE:
+            value = *reinterpret_cast<int32_t*>(data + 1);
+            data += 5;
+            break;
+          default:
+            UNREACHABLE();
+        }
+        AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"x",
+                       operand_size_code(),
+                       value);
+        break;
+      }
+      case 0xD1:  // fall through
+      case 0xD3:  // fall through
+      case 0xC1:
+        data += ShiftInstruction(data);
+        break;
+      case 0xD0:  // fall through
+      case 0xD2:  // fall through
+      case 0xC0:
+        byte_size_operand_ = true;
+        data += ShiftInstruction(data);
+        break;
+
+      case 0xD9:  // fall through
+      case 0xDA:  // fall through
+      case 0xDB:  // fall through
+      case 0xDC:  // fall through
+      case 0xDD:  // fall through
+      case 0xDE:  // fall through
+      case 0xDF:
+        data += FPUInstruction(data);
+        break;
+
+      case 0xEB:
+        data += JumpShort(data);
+        break;
+
+      case 0xF7:
+        data += F7Instruction(data);
+        break;
+
+      default:
+        UnimplementedInstruction();
+        data += 1;
+    }
+  }  // !processed
+
+  if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
+    tmp_buffer_[tmp_buffer_pos_] = '\0';
+  }
+
+  int instr_len = data - instr;
+  ASSERT(instr_len > 0);  // Ensure progress.
+
+  int outp = 0;
+  // Instruction bytes.
+  for (byte* bp = instr; bp < data; bp++) {
+    outp += v8::internal::OS::SNPrintF(out_buffer + outp, "%02x", *bp);
+  }
+  for (int i = 6 - instr_len; i >= 0; i--) {
+    outp += v8::internal::OS::SNPrintF(out_buffer + outp, "  ");
+  }
+
+  outp += v8::internal::OS::SNPrintF(out_buffer + outp, " %s",
+                                     tmp_buffer_.start());
+  return instr_len;
+}
+
+//------------------------------------------------------------------------------
+
+
+static const char* cpu_regs[16] = {
+  "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+  "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+};
+
+
+static const char* byte_cpu_regs[16] = {
+  "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
+  "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l"
+};
+
+
+static const char* xmm_regs[16] = {
+  "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+  "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
+};
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+  static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
+  v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
+  return tmp_buffer.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+  return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+  if (0 <= reg && reg < 16)
+    return cpu_regs[reg];
+  return "noreg";
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+  if (0 <= reg && reg < 16)
+    return byte_cpu_regs[reg];
+  return "noreg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+  if (0 <= reg && reg < 16)
+    return xmm_regs[reg];
+  return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+  // X64 does not embed debug strings at the moment.
+  UNREACHABLE();
+  return "";
+}
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+    : converter_(converter) { }
+
+Disassembler::~Disassembler() { }
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+                                    byte* instruction) {
+  DisassemblerX64 d(converter_, CONTINUE_ON_UNIMPLEMENTED_OPCODE);
+  return d.InstructionDecode(buffer, instruction);
+}
+
+
+// The X64 assembler does not use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
+  return -1;
+}
+
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+  NameConverter converter;
+  Disassembler d(converter);
+  for (byte* pc = begin; pc < end;) {
+    v8::internal::EmbeddedVector<char, 128> buffer;
+    buffer[0] = '\0';
+    byte* prev_pc = pc;
+    pc += d.InstructionDecode(buffer, pc);
+    fprintf(f, "%p", prev_pc);
+    fprintf(f, "    ");
+
+    for (byte* bp = prev_pc; bp < pc; bp++) {
+      fprintf(f, "%02x", *bp);
+    }
+    for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+      fprintf(f, "  ");
+    }
+    fprintf(f, "  %s\n", buffer.start());
+  }
+}
+
+}  // namespace disasm
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
new file mode 100644
index 0000000..fe224ad
--- /dev/null
+++ b/src/x64/frames-x64.cc
@@ -0,0 +1,113 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+StackFrame::Type StackFrame::ComputeType(State* state) {
+  ASSERT(state->fp != NULL);
+  if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+    return ARGUMENTS_ADAPTOR;
+  }
+  // The marker and function offsets overlap. If the marker isn't a
+  // smi then the frame is a JavaScript frame -- and the marker is
+  // really the function.
+  const int offset = StandardFrameConstants::kMarkerOffset;
+  Object* marker = Memory::Object_at(state->fp + offset);
+  if (!marker->IsSmi()) return JAVA_SCRIPT;
+  return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+  if (fp == 0) return NONE;
+  // Compute the stack pointer.
+  Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+  // Fill in the state.
+  state->fp = fp;
+  state->sp = sp;
+  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+  // Determine frame type.
+  if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
+    return EXIT_DEBUG;
+  } else {
+    return EXIT;
+  }
+}
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+  return ComputeParametersCount();
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* a) const {
+  // Exit frames on X64 do not contain any pointers. The arguments
+  // are traversed as part of the expression stack of the calling
+  // frame.
+}
+
+byte* InternalFrame::GetCallerStackPointer() const {
+  // Internal frames have no arguments. The stack pointer of the
+  // caller is at a fixed offset from the frame pointer.
+  return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+byte* JavaScriptFrame::GetCallerStackPointer() const {
+  int arguments;
+  if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
+    // The arguments for cooked frames are traversed as if they were
+    // expression stack elements of the calling frame. The reason for
+    // this rather strange decision is that we cannot access the
+    // function during mark-compact GCs when the stack is cooked.
+    // In fact accessing heap objects (like function->shared() below)
+    // at all during GC is problematic.
+    arguments = 0;
+  } else {
+    // Compute the number of arguments by getting the number of formal
+    // parameters of the function. We must remember to take the
+    // receiver into account (+1).
+    JSFunction* function = JSFunction::cast(this->function());
+    arguments = function->shared()->formal_parameter_count() + 1;
+  }
+  const int offset = StandardFrameConstants::kCallerSPOffset;
+  return fp() + offset + (arguments * kPointerSize);
+}
+
+
+byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+  const int arguments = Smi::cast(GetExpression(0))->value();
+  const int offset = StandardFrameConstants::kCallerSPOffset;
+  return fp() + offset + (arguments + 1) * kPointerSize;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
new file mode 100644
index 0000000..5442be9
--- /dev/null
+++ b/src/x64/frames-x64.h
@@ -0,0 +1,124 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_FRAMES_X64_H_
+#define V8_X64_FRAMES_X64_H_
+
+namespace v8 {
+namespace internal {
+
+// TODO(x64): This is a stub, mostly just a copy of the ia32 bit version.
+// This might all need to change to be correct for x64.
+
+static const int kNumRegs = 8;
+static const RegList kJSCallerSaved =
+    1 << 0 |  // rax
+    1 << 1 |  // rcx
+    1 << 2 |  // rdx
+    1 << 3 |  // rbx - used as a caller-saved register in JavaScript code
+    1 << 7;   // rdi - callee function
+
+static const int kNumJSCallerSaved = 5;
+
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+class StackHandlerConstants : public AllStatic {
+ public:
+  static const int kNextOffset  = 0 * kPointerSize;
+  static const int kFPOffset    = 1 * kPointerSize;
+  static const int kStateOffset = 2 * kPointerSize;
+  static const int kPCOffset    = 3 * kPointerSize;
+
+  static const int kSize = 4 * kPointerSize;
+};
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+  static const int kCallerFPOffset      = -10 * kPointerSize;
+  static const int kArgvOffset          = 6 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+  static const int kDebugMarkOffset = -2 * kPointerSize;
+  static const int kSPOffset        = -1 * kPointerSize;
+
+  static const int kCallerFPOffset  = +0 * kPointerSize;
+  static const int kCallerPCOffset  = +1 * kPointerSize;
+
+  // FP-relative displacement of the caller's SP.  It points just
+  // below the saved PC.
+  static const int kCallerSPDisplacement = +2 * kPointerSize;
+};
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+  static const int kExpressionsOffset = -3 * kPointerSize;
+  static const int kMarkerOffset      = -2 * kPointerSize;
+  static const int kContextOffset     = -1 * kPointerSize;
+  static const int kCallerFPOffset    =  0 * kPointerSize;
+  static const int kCallerPCOffset    = +1 * kPointerSize;
+  static const int kCallerSPOffset    = +2 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+  // Caller SP-relative.
+  static const int kParam0Offset   = -2 * kPointerSize;
+  static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+  static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+  const int offset = JavaScriptFrameConstants::kFunctionOffset;
+  return Memory::Object_at(fp() + offset);
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_FRAMES_X64_H_
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
new file mode 100644
index 0000000..8209091
--- /dev/null
+++ b/src/x64/ic-x64.cc
@@ -0,0 +1,1021 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+// Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if the receiver has fast properties,
+// or if name is not a symbol, and will jump to the miss_label in that case.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+                                   Register r0, Register r1, Register r2,
+                                   Register name) {
+  // Register use:
+  //
+  // r0   - used to hold the property dictionary.
+  //
+  // r1   - initially the receiver
+  //      - used for the index into the property dictionary
+  //      - holds the result on exit.
+  //
+  // r2   - used to hold the capacity of the property dictionary.
+  //
+  // name - holds the name of the property and is unchanged.
+
+  Label done;
+
+  // Check for the absence of an interceptor.
+  // Load the map into r0.
+  __ movq(r0, FieldOperand(r1, JSObject::kMapOffset));
+  // Test the has_named_interceptor bit in the map.
+  __ testl(FieldOperand(r0, Map::kInstanceAttributesOffset),
+          Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+
+  // Jump to miss if the interceptor bit is set.
+  __ j(not_zero, miss_label);
+
+  // Bail out if we have a JS global proxy object.
+  __ movzxbq(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
+  __ cmpb(r0, Immediate(JS_GLOBAL_PROXY_TYPE));
+  __ j(equal, miss_label);
+
+  // Possible work-around for http://crbug.com/16276.
+  __ cmpb(r0, Immediate(JS_GLOBAL_OBJECT_TYPE));
+  __ j(equal, miss_label);
+  __ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
+  __ j(equal, miss_label);
+
+  // Check that the properties array is a dictionary.
+  __ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
+  __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
+  __ j(not_equal, miss_label);
+
+  // Compute the capacity mask.
+  const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+  __ movq(r2, FieldOperand(r0, kCapacityOffset));
+  __ SmiToInteger32(r2, r2);
+  __ decl(r2);
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  static const int kProbes = 4;
+  const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  for (int i = 0; i < kProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ movl(r1, FieldOperand(name, String::kLengthOffset));
+    __ shrl(r1, Immediate(String::kHashShift));
+    if (i > 0) {
+      __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(r1, r2);
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
+
+    // Check if the key is identical to the name.
+    __ cmpq(name, Operand(r0, r1, times_pointer_size,
+                          kElementsStartOffset - kHeapObjectTag));
+    if (i != kProbes - 1) {
+      __ j(equal, &done);
+    } else {
+      __ j(not_equal, miss_label);
+    }
+  }
+
+  // Check that the value is a normal property.
+  __ bind(&done);
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
+           Immediate(Smi::FromInt(PropertyDetails::TypeField::mask())));
+  __ j(not_zero, miss_label);
+
+  // Get the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ movq(r1,
+          Operand(r0, r1, times_pointer_size, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to check that a value is either not an object
+// or is loaded if it is an object.
+static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
+                                           Register value) {
+  Label done;
+  // Check if the value is a Smi.
+  __ JumpIfSmi(value, &done);
+  // Check if the object has been loaded.
+  __ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
+  __ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
+           Immediate(1 << Map::kNeedsLoading));
+  __ j(not_zero, miss);
+  __ bind(&done);
+}
+
+
+// One byte opcode for test eax,0xXXXXXXXX.
+static const byte kTestEaxByte = 0xA9;
+
+
+static bool PatchInlinedMapCheck(Address address, Object* map) {
+  // Arguments are address of start of call sequence that called
+  // the IC,
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+  // The keyed load has a fast inlined case if the IC call instruction
+  // is immediately followed by a test instruction.
+  if (*test_instruction_address != kTestEaxByte) return false;
+
+  // Fetch the offset from the test instruction to the map compare
+  // instructions (starting with the 64-bit immediate mov of the map
+  // address). This offset is stored in the last 4 bytes of the 5
+  // byte test instruction.
+  Address delta_address = test_instruction_address + 1;
+  int delta = *reinterpret_cast<int*>(delta_address);
+  // Compute the map address.  The map address is in the last 8 bytes
+  // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
+  // to the offset to get the map address.
+  Address map_address = test_instruction_address + delta + 2;
+  // Patch the map check.
+  *(reinterpret_cast<Object**>(map_address)) = map;
+  return true;
+}
+
+
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  return PatchInlinedMapCheck(address, map);
+}
+
+
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  return PatchInlinedMapCheck(address, map);
+}
+
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {
+  // Insert null as the map to check for to make sure the map check fails
+  // sending control flow to the IC instead of the inlined version.
+  PatchInlinedLoad(address, Heap::null_value());
+}
+
+
+void KeyedStoreIC::ClearInlinedVersion(Address address) {
+  // Insert null as the elements map to check for.  This will make
+  // sure that the elements fast-case map check fails so that control
+  // flows to the IC instead of the inlined version.
+  PatchInlinedStore(address, Heap::null_value());
+}
+
+
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {
+  // Restore the fast-case elements map check so that the inlined
+  // version can be used again.
+  PatchInlinedStore(address, Heap::fixed_array_map());
+}
+
+
+void KeyedLoadIC::Generate(MacroAssembler* masm,
+                           ExternalReference const& f) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ pop(rbx);
+  __ push(rcx);  // receiver
+  __ push(rax);  // name
+  __ push(rbx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(f, 2, 1);
+}
+
+
+#ifdef DEBUG
+// For use in assert below.
+static int TenToThe(int exponent) {
+  ASSERT(exponent <= 9);
+  ASSERT(exponent >= 1);
+  int answer = 10;
+  for (int i = 1; i < exponent; i++) answer *= 10;
+  return answer;
+}
+#endif
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label slow, check_string, index_int, index_string, check_pixel_array;
+
+  // Load name and receiver.
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(rcx, &slow);
+
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing
+  // into string objects work as intended.
+  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
+  __ j(below, &slow);
+  // Check that the receiver does not require access checks.  We need
+  // to check this explicitly since this generic stub does not perform
+  // map checks.  The map is already in rdx.
+  __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow);
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rax, &check_string);
+  __ SmiToInteger32(rax, rax);
+  // Get the elements array of the object.
+  __ bind(&index_int);
+  __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
+  // Check that the object is in fast mode (not dictionary).
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &check_pixel_array);
+  // Check that the key (index) is within bounds.
+  __ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ j(above_equal, &slow);  // Unsigned comparison rejects negative indices.
+  // Fast case: Do the load.
+  __ movq(rax, Operand(rcx, rax, times_pointer_size,
+                      FixedArray::kHeaderSize - kHeapObjectTag));
+  __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ j(equal, &slow);
+  __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+  __ ret(0);
+
+  // Check whether the elements is a pixel array.
+  // rax: untagged index
+  // rcx: elements array
+  __ bind(&check_pixel_array);
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::kPixelArrayMapRootIndex);
+  __ j(not_equal, &slow);
+  __ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+  __ movb(rax, Operand(rcx, rax, times_1, 0));
+  __ Integer32ToSmi(rax, rax);
+  __ ret(0);
+
+  // Slow case: Load name and receiver from stack and jump to runtime.
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+  KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+  __ bind(&check_string);
+  // The key is not a smi.
+  // Is it a string?
+  __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
+  __ j(above_equal, &slow);
+  // Is the string an array index, with cached numeric value?
+  __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+  __ testl(rbx, Immediate(String::kIsArrayIndexMask));
+
+  // If the string is a symbol, do a quick inline probe of the receiver's
+  // dictionary, if it exists.
+  __ j(not_zero, &index_string);  // The value in rbx is used at jump target.
+  __ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
+           Immediate(kIsSymbolMask));
+  __ j(zero, &slow);
+  // Probe the dictionary leaving result in ecx.
+  GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
+  GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
+  __ movq(rax, rcx);
+  __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+  __ ret(0);
+  // Array index string: If short enough use cache in length/hash field (ebx).
+  // We assert that there are enough bits in an int32_t after the hash shift
+  // bits have been subtracted to allow space for the length and the cached
+  // array index.
+  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+         (1 << (String::kShortLengthShift - String::kHashShift)));
+  __ bind(&index_string);
+  const int kLengthFieldLimit =
+      (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
+  __ cmpl(rbx, Immediate(kLengthFieldLimit));
+  __ j(above_equal, &slow);
+  __ movl(rax, rbx);
+  __ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
+  __ shrl(rax, Immediate(String::kLongLengthShift));
+  __ jmp(&index_int);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+  // ----------- S t a t e -------------
+  //  -- rax     : value
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : key
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+
+  __ pop(rcx);
+  __ push(Operand(rsp, 1 * kPointerSize));  // receiver
+  __ push(Operand(rsp, 1 * kPointerSize));  // key
+  __ push(rax);  // value
+  __ push(rcx);  // return address
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(f, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax     : value
+  //  -- rcx     : transition map
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : key
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+
+  __ pop(rbx);
+  __ push(Operand(rsp, 1 * kPointerSize));  // receiver
+  __ push(rcx);  // transition map
+  __ push(rax);  // value
+  __ push(rbx);  // return address
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(
+      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : key
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label slow, fast, array, extra, check_pixel_array;
+
+  // Get the receiver from the stack.
+  __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // 2 ~ return address, key
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(rdx, &slow);
+  // Get the map from the receiver.
+  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow);
+  // Get the key from the stack.
+  __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // 1 ~ return address
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rbx, &slow);
+  // If it is a smi, make sure it is zero-extended, so it can be
+  // used as an index in a memory operand.
+  __ movl(rbx, rbx);  // Clear the high bits of rbx.
+
+  __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
+  __ j(equal, &array);
+  // Check that the object is some kind of JS object.
+  __ CmpInstanceType(rcx, FIRST_JS_OBJECT_TYPE);
+  __ j(below, &slow);
+
+  // Object case: Check key against length in the elements array.
+  // rax: value
+  // rdx: JSObject
+  // rbx: index (as a smi), zero-extended.
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  // Check that the object is in fast mode (not dictionary).
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &check_pixel_array);
+  // Untag the key (for checking against untagged length in the fixed array).
+  __ SmiToInteger32(rdx, rbx);
+  __ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
+  // rax: value
+  // rcx: FixedArray
+  // rbx: index (as a smi)
+  __ j(below, &fast);
+
+  // Slow case: Push extra copies of the arguments (3).
+  __ bind(&slow);
+  __ pop(rcx);
+  __ push(Operand(rsp, 1 * kPointerSize));
+  __ push(Operand(rsp, 1 * kPointerSize));
+  __ push(rax);
+  __ push(rcx);
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+
+  // Check whether the elements is a pixel array.
+  // rax: value
+  // rcx: elements array
+  // rbx: index (as a smi), zero-extended.
+  __ bind(&check_pixel_array);
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::kPixelArrayMapRootIndex);
+  __ j(not_equal, &slow);
+  // Check that the value is a smi. If a conversion is needed call into the
+  // runtime to convert and clamp.
+  __ JumpIfNotSmi(rax, &slow);
+  __ SmiToInteger32(rbx, rbx);
+  __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  __ movq(rdx, rax);  // Save the value.
+  __ SmiToInteger32(rax, rax);
+  {  // Clamp the value to [0..255].
+    Label done, is_negative;
+    __ testl(rax, Immediate(0xFFFFFF00));
+    __ j(zero, &done);
+    __ j(negative, &is_negative);
+    __ movl(rax, Immediate(255));
+    __ jmp(&done);
+    __ bind(&is_negative);
+    __ xorl(rax, rax);  // Clear rax.
+    __ bind(&done);
+  }
+  __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+  __ movb(Operand(rcx, rbx, times_1, 0), rax);
+  __ movq(rax, rdx);  // Return the original value.
+  __ ret(0);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // rax: value
+  // rdx: JSArray
+  // rcx: FixedArray
+  // rbx: index (as a smi)
+  // flags: compare (rbx, rdx.length())
+  __ j(not_equal, &slow);  // do not leave holes in the array
+  __ SmiToInteger64(rbx, rbx);
+  __ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  // Increment and restore smi-tag.
+  __ Integer64AddToSmi(rbx, rbx, 1);
+  __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
+  __ SmiSubConstant(rbx, rbx, 1, NULL);
+  __ jmp(&fast);
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode; if it is the
+  // length is always a smi.
+  __ bind(&array);
+  // rax: value
+  // rdx: JSArray
+  // rbx: index (as a smi)
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
+  __ j(not_equal, &slow);
+
+  // Check the key against the length in the array, compute the
+  // address to store into and fall through to fast case.
+  __ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
+  __ j(above_equal, &extra);
+
+  // Fast case: Do the store.
+  __ bind(&fast);
+  // rax: value
+  // rcx: FixedArray
+  // rbx: index (as a smi)
+  __ movq(Operand(rcx, rbx, times_half_pointer_size,
+                  FixedArray::kHeaderSize - kHeapObjectTag),
+         rax);
+  // Update write barrier for the elements array address.
+  __ movq(rdx, rax);
+  __ RecordWrite(rcx, 0, rdx, rbx);
+  __ ret(0);
+}
+
+
+void CallIC::Generate(MacroAssembler* masm,
+                      int argc,
+                      ExternalReference const& f) {
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+  // Get the name of the function to call from the stack.
+  // 2 ~ receiver, return address.
+  __ movq(rbx, Operand(rsp, (argc + 2) * kPointerSize));
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push the receiver and the name of the function.
+  __ push(rdx);
+  __ push(rbx);
+
+  // Call the entry.
+  CEntryStub stub(1);
+  __ movq(rax, Immediate(2));
+  __ movq(rbx, f);
+  __ CallStub(&stub);
+
+  // Move result to rdi and exit the internal frame.
+  __ movq(rdi, rax);
+  __ LeaveInternalFrame();
+
+  // Check if the receiver is a global object of some sort.
+  Label invoke, global;
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));  // receiver
+  __ JumpIfSmi(rdx, &invoke);
+  __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
+  __ j(equal, &global);
+  __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
+  __ j(not_equal, &invoke);
+
+  // Patch the receiver on the stack.
+  __ bind(&global);
+  __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+  __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ bind(&invoke);
+  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+}
+
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // rsp[0] return address
+  // rsp[8] argument argc
+  // rsp[16] argument argc - 1
+  // ...
+  // rsp[argc * 8] argument 1
+  // rsp[(argc + 1) * 8] argument 0 = reciever
+  // rsp[(argc + 2) * 8] function name
+  // -----------------------------------
+  Label number, non_number, non_string, boolean, probe, miss;
+
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+  // Get the name of the function from the stack; 2 ~ return address, receiver
+  __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
+
+  // Probe the stub cache.
+  Code::Flags flags =
+      Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+  StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, rax);
+
+  // If the stub cache probing failed, the receiver might be a value.
+  // For value objects, we use the map of the prototype objects for
+  // the corresponding JSValue for the cache and that is what we need
+  // to probe.
+  //
+  // Check for number.
+  __ JumpIfSmi(rdx, &number);
+  __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
+  __ j(not_equal, &non_number);
+  __ bind(&number);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::NUMBER_FUNCTION_INDEX, rdx);
+  __ jmp(&probe);
+
+  // Check for string.
+  __ bind(&non_number);
+  __ CmpInstanceType(rbx, FIRST_NONSTRING_TYPE);
+  __ j(above_equal, &non_string);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::STRING_FUNCTION_INDEX, rdx);
+  __ jmp(&probe);
+
+  // Check for boolean.
+  __ bind(&non_string);
+  __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
+  __ j(equal, &boolean);
+  __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
+  __ j(not_equal, &miss);
+  __ bind(&boolean);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::BOOLEAN_FUNCTION_INDEX, rdx);
+
+  // Probe the stub cache for the value object.
+  __ bind(&probe);
+  StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+static void GenerateNormalHelper(MacroAssembler* masm,
+                                 int argc,
+                                 bool is_global_object,
+                                 Label* miss) {
+  // Search dictionary - put result in register edx.
+  GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx);
+
+  // Move the result to register rdi and check that it isn't a smi.
+  __ movq(rdi, rdx);
+  __ JumpIfSmi(rdx, miss);
+
+  // Check that the value is a JavaScript function.
+  __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
+  __ j(not_equal, miss);
+  // Check that the function has been loaded.
+  __ testb(FieldOperand(rdx, Map::kBitField2Offset),
+           Immediate(1 << Map::kNeedsLoading));
+  __ j(not_zero, miss);
+
+  // Patch the receiver with the global proxy if necessary.
+  if (is_global_object) {
+    __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+    __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+  }
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // rsp[0] return address
+  // rsp[8] argument argc
+  // rsp[16] argument argc - 1
+  // ...
+  // rsp[argc * 8] argument 1
+  // rsp[(argc + 1) * 8] argument 0 = reciever
+  // rsp[(argc + 2) * 8] function name
+  // -----------------------------------
+
+  Label miss, global_object, non_global_object;
+
+  // Get the receiver of the function from the stack.
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+  // Get the name of the function from the stack.
+  __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(rdx, &miss);
+
+  // Check that the receiver is a valid JS object.
+  // Because there are so many map checks and type checks, do not
+  // use CmpObjectType, but load map and type into registers.
+  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ movb(rax, FieldOperand(rbx, Map::kInstanceTypeOffset));
+  __ cmpb(rax, Immediate(FIRST_JS_OBJECT_TYPE));
+  __ j(below, &miss);
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  // Check for access to global object.
+  __ cmpb(rax, Immediate(JS_GLOBAL_OBJECT_TYPE));
+  __ j(equal, &global_object);
+  __ cmpb(rax, Immediate(JS_BUILTINS_OBJECT_TYPE));
+  __ j(not_equal, &non_global_object);
+
+  // Accessing global object: Load and invoke.
+  __ bind(&global_object);
+  // Check that the global object does not require access checks.
+  __ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
+  __ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_equal, &miss);
+  GenerateNormalHelper(masm, argc, true, &miss);
+
+  // Accessing non-global object: Check for access to global proxy.
+  Label global_proxy, invoke;
+  __ bind(&non_global_object);
+  __ cmpb(rax, Immediate(JS_GLOBAL_PROXY_TYPE));
+  __ j(equal, &global_proxy);
+  // Check that the non-global, non-global-proxy object does not
+  // require access checks.
+  __ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
+  __ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_equal, &miss);
+  __ bind(&invoke);
+  GenerateNormalHelper(masm, argc, false, &miss);
+
+  // Global object proxy access: Check access rights.
+  __ bind(&global_proxy);
+  __ CheckAccessGlobalProxy(rdx, rax, &miss);
+  __ jmp(&invoke);
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+// The offset from the inlined patch site to the start of the
+// inlined load instruction.
+const int LoadIC::kOffsetToLoadInstruction = 20;
+
+
+void LoadIC::ClearInlinedVersion(Address address) {
+  // Reset the map check of the inlined inobject property load (if
+  // present) to guarantee failure by holding an invalid map (the null
+  // value).  The offset can be patched to anything.
+  PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
+}
+
+
+void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+
+  __ pop(rbx);
+  __ push(rax);  // receiver
+  __ push(rcx);  // name
+  __ push(rbx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(f, 2, 1);
+}
+
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+
+  StubCompiler::GenerateLoadArrayLength(masm, rax, rdx, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+
+  StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+
+  // Probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
+  StubCache::GenerateProbe(masm, flags, rax, rcx, rbx, rdx);
+
+  // Cache miss: Jump to runtime.
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  Label miss, probe, global;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(rax, &miss);
+
+  // Check that the receiver is a valid JS object.
+  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+  __ j(below, &miss);
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  // Check for access to global object (unlikely).
+  __ CmpInstanceType(rbx, JS_GLOBAL_PROXY_TYPE);
+  __ j(equal, &global);
+
+  // Check for non-global object that requires access check.
+  __ testl(FieldOperand(rbx, Map::kBitFieldOffset),
+          Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &miss);
+
+  // Search the dictionary placing the result in eax.
+  __ bind(&probe);
+  GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx);
+  GenerateCheckNonObjectOrLoaded(masm, &miss, rax);
+  __ ret(0);
+
+  // Global object access: Check access rights.
+  __ bind(&global);
+  __ CheckAccessGlobalProxy(rax, rdx, &miss);
+  __ jmp(&probe);
+
+  // Cache miss: Restore receiver from stack and jump to runtime.
+  __ bind(&miss);
+  __ movq(rax, Operand(rsp, 1 * kPointerSize));
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+
+  StubCompiler::GenerateLoadStringLength(masm, rax, rdx, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+  // If the instruction following the call is not a test eax, nothing
+  // was inlined.
+  if (*test_instruction_address != kTestEaxByte) return false;
+
+  Address delta_address = test_instruction_address + 1;
+  // The delta to the start of the map check instruction.
+  int delta = *reinterpret_cast<int*>(delta_address);
+
+  // The map address is the last 8 bytes of the 10-byte
+  // immediate move instruction, so we add 2 to get the
+  // offset to the last 8 bytes.
+  Address map_address = test_instruction_address + delta + 2;
+  *(reinterpret_cast<Object**>(map_address)) = map;
+
+  // The offset is in the 32-bit displacement of a seven byte
+  // memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
+  // so we add 3 to get the offset of the displacement.
+  Address offset_address =
+      test_instruction_address + delta + kOffsetToLoadInstruction + 3;
+  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+  return true;
+}
+
+void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  __ pop(rbx);
+  __ push(Operand(rsp, 0));  // receiver
+  __ push(rcx);  // name
+  __ push(rax);  // value
+  __ push(rbx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(f, 3, 1);
+}
+
+void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : Map (target of map transition)
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  __ pop(rbx);
+  __ push(Operand(rsp, 0));  // receiver
+  __ push(rcx);  // transition map
+  __ push(rax);  // value
+  __ push(rbx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(
+      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+}
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  // Get the receiver from the stack and probe the stub cache.
+  __ movq(rdx, Operand(rsp, kPointerSize));
+  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
+  StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+
+  // Cache miss: Jump to runtime.
+  Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+}
+
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/x64/jump-target-x64.cc b/src/x64/jump-target-x64.cc
new file mode 100644
index 0000000..dd2f6d6
--- /dev/null
+++ b/src/x64/jump-target-x64.cc
@@ -0,0 +1,432 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ ACCESS_MASM(cgen()->masm())
+
+void JumpTarget::DoJump() {
+  ASSERT(cgen()->has_valid_frame());
+  // Live non-frame registers are not allowed at unconditional jumps
+  // because we have no way of invalidating the corresponding results
+  // which are still live in the C++ code.
+  ASSERT(cgen()->HasValidEntryRegisters());
+
+  if (is_bound()) {
+    // Backward jump.  There is an expected frame to merge to.
+    ASSERT(direction_ == BIDIRECTIONAL);
+    cgen()->frame()->PrepareMergeTo(entry_frame_);
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
+    __ jmp(&entry_label_);
+  } else if (entry_frame_ != NULL) {
+    // Forward jump with a preconfigured entry frame.  Assert the
+    // current frame matches the expected one and jump to the block.
+    ASSERT(cgen()->frame()->Equals(entry_frame_));
+    cgen()->DeleteFrame();
+    __ jmp(&entry_label_);
+  } else {
+    // Forward jump.  Remember the current frame and emit a jump to
+    // its merge code.
+    AddReachingFrame(cgen()->frame());
+    RegisterFile empty;
+    cgen()->SetFrame(NULL, &empty);
+    __ jmp(&merge_labels_.last());
+  }
+}
+
+
+void JumpTarget::DoBranch(Condition cc, Hint b) {
+  ASSERT(cgen() != NULL);
+  ASSERT(cgen()->has_valid_frame());
+
+  if (is_bound()) {
+    ASSERT(direction_ == BIDIRECTIONAL);
+    // Backward branch.  We have an expected frame to merge to on the
+    // backward edge.
+
+    // Swap the current frame for a copy (we do the swapping to get
+    // the off-frame registers off the fall through) to use for the
+    // branch.
+    VirtualFrame* fall_through_frame = cgen()->frame();
+    VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
+    RegisterFile non_frame_registers;
+    cgen()->SetFrame(branch_frame, &non_frame_registers);
+
+    // Check if we can avoid merge code.
+    cgen()->frame()->PrepareMergeTo(entry_frame_);
+    if (cgen()->frame()->Equals(entry_frame_)) {
+      // Branch right in to the block.
+      cgen()->DeleteFrame();
+      __ j(cc, &entry_label_);
+      cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+      return;
+    }
+
+    // Check if we can reuse existing merge code.
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      if (reaching_frames_[i] != NULL &&
+          cgen()->frame()->Equals(reaching_frames_[i])) {
+        // Branch to the merge code.
+        cgen()->DeleteFrame();
+        __ j(cc, &merge_labels_[i]);
+        cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+        return;
+      }
+    }
+
+    // To emit the merge code here, we negate the condition and branch
+    // around the merge code on the fall through path.
+    Label original_fall_through;
+    __ j(NegateCondition(cc), &original_fall_through);
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
+    __ jmp(&entry_label_);
+    cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+    __ bind(&original_fall_through);
+
+  } else if (entry_frame_ != NULL) {
+    // Forward branch with a preconfigured entry frame.  Assert the
+    // current frame matches the expected one and branch to the block.
+    ASSERT(cgen()->frame()->Equals(entry_frame_));
+    // Explicitly use the macro assembler instead of __ as forward
+    // branches are expected to be a fixed size (no inserted
+    // coverage-checking instructions please).  This is used in
+    // Reference::GetValue.
+    cgen()->masm()->j(cc, &entry_label_);
+
+  } else {
+    // Forward branch.  A copy of the current frame is remembered and
+    // a branch to the merge code is emitted.  Explicitly use the
+    // macro assembler instead of __ as forward branches are expected
+    // to be a fixed size (no inserted coverage-checking instructions
+    // please).  This is used in Reference::GetValue.
+    AddReachingFrame(new VirtualFrame(cgen()->frame()));
+    cgen()->masm()->j(cc, &merge_labels_.last());
+  }
+}
+
+
+void JumpTarget::Call() {
+  // Call is used to push the address of the catch block on the stack as
+  // a return address when compiling try/catch and try/finally.  We
+  // fully spill the frame before making the call.  The expected frame
+  // at the label (which should be the only one) is the spilled current
+  // frame plus an in-memory return address.  The "fall-through" frame
+  // at the return site is the spilled current frame.
+  ASSERT(cgen() != NULL);
+  ASSERT(cgen()->has_valid_frame());
+  // There are no non-frame references across the call.
+  ASSERT(cgen()->HasValidEntryRegisters());
+  ASSERT(!is_linked());
+
+  cgen()->frame()->SpillAll();
+  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
+  target_frame->Adjust(1);
+  // We do not expect a call with a preconfigured entry frame.
+  ASSERT(entry_frame_ == NULL);
+  AddReachingFrame(target_frame);
+  __ call(&merge_labels_.last());
+}
+
+
+void JumpTarget::DoBind() {
+  ASSERT(cgen() != NULL);
+  ASSERT(!is_bound());
+
+  // Live non-frame registers are not allowed at the start of a basic
+  // block.
+  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
+
+  // Fast case: the jump target was manually configured with an entry
+  // frame to use.
+  if (entry_frame_ != NULL) {
+    // Assert no reaching frames to deal with.
+    ASSERT(reaching_frames_.is_empty());
+    ASSERT(!cgen()->has_valid_frame());
+
+    RegisterFile empty;
+    if (direction_ == BIDIRECTIONAL) {
+      // Copy the entry frame so the original can be used for a
+      // possible backward jump.
+      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+    } else {
+      // Take ownership of the entry frame.
+      cgen()->SetFrame(entry_frame_, &empty);
+      entry_frame_ = NULL;
+    }
+    __ bind(&entry_label_);
+    return;
+  }
+
+  if (!is_linked()) {
+    ASSERT(cgen()->has_valid_frame());
+    if (direction_ == FORWARD_ONLY) {
+      // Fast case: no forward jumps and no possible backward jumps.
+      // The stack pointer can be floating above the top of the
+      // virtual frame before the bind.  Afterward, it should not.
+      VirtualFrame* frame = cgen()->frame();
+      int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+      if (difference > 0) {
+        frame->stack_pointer_ -= difference;
+        __ addq(rsp, Immediate(difference * kPointerSize));
+      }
+    } else {
+      ASSERT(direction_ == BIDIRECTIONAL);
+      // Fast case: no forward jumps, possible backward ones.  Remove
+      // constants and copies above the watermark on the fall-through
+      // frame and use it as the entry frame.
+      cgen()->frame()->MakeMergable();
+      entry_frame_ = new VirtualFrame(cgen()->frame());
+    }
+    __ bind(&entry_label_);
+    return;
+  }
+
+  if (direction_ == FORWARD_ONLY &&
+      !cgen()->has_valid_frame() &&
+      reaching_frames_.length() == 1) {
+    // Fast case: no fall-through, a single forward jump, and no
+    // possible backward jumps.  Pick up the only reaching frame, take
+    // ownership of it, and use it for the block about to be emitted.
+    VirtualFrame* frame = reaching_frames_[0];
+    RegisterFile empty;
+    cgen()->SetFrame(frame, &empty);
+    reaching_frames_[0] = NULL;
+    __ bind(&merge_labels_[0]);
+
+    // The stack pointer can be floating above the top of the
+    // virtual frame before the bind.  Afterward, it should not.
+    int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+    if (difference > 0) {
+      frame->stack_pointer_ -= difference;
+      __ addq(rsp, Immediate(difference * kPointerSize));
+    }
+
+    __ bind(&entry_label_);
+    return;
+  }
+
+  // If there is a current frame, record it as the fall-through.  It
+  // is owned by the reaching frames for now.
+  bool had_fall_through = false;
+  if (cgen()->has_valid_frame()) {
+    had_fall_through = true;
+    AddReachingFrame(cgen()->frame());  // Return value ignored.
+    RegisterFile empty;
+    cgen()->SetFrame(NULL, &empty);
+  }
+
+  // Compute the frame to use for entry to the block.
+  ComputeEntryFrame();
+
+  // Some moves required to merge to an expected frame require purely
+  // frame state changes, and do not require any code generation.
+  // Perform those first to increase the possibility of finding equal
+  // frames below.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    if (reaching_frames_[i] != NULL) {
+      reaching_frames_[i]->PrepareMergeTo(entry_frame_);
+    }
+  }
+
+  if (is_linked()) {
+    // There were forward jumps.  Handle merging the reaching frames
+    // to the entry frame.
+
+    // Loop over the (non-null) reaching frames and process any that
+    // need merge code.  Iterate backwards through the list to handle
+    // the fall-through frame first.  Set frames that will be
+    // processed after 'i' to NULL if we want to avoid processing
+    // them.
+    for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
+      VirtualFrame* frame = reaching_frames_[i];
+
+      if (frame != NULL) {
+        // Does the frame (probably) need merge code?
+        if (!frame->Equals(entry_frame_)) {
+          // We could have a valid frame as the fall through to the
+          // binding site or as the fall through from a previous merge
+          // code block.  Jump around the code we are about to
+          // generate.
+          if (cgen()->has_valid_frame()) {
+            cgen()->DeleteFrame();
+            __ jmp(&entry_label_);
+          }
+          // Pick up the frame for this block.  Assume ownership if
+          // there cannot be backward jumps.
+          RegisterFile empty;
+          if (direction_ == BIDIRECTIONAL) {
+            cgen()->SetFrame(new VirtualFrame(frame), &empty);
+          } else {
+            cgen()->SetFrame(frame, &empty);
+            reaching_frames_[i] = NULL;
+          }
+          __ bind(&merge_labels_[i]);
+
+          // Loop over the remaining (non-null) reaching frames,
+          // looking for any that can share merge code with this one.
+          for (int j = 0; j < i; j++) {
+            VirtualFrame* other = reaching_frames_[j];
+            if (other != NULL && other->Equals(cgen()->frame())) {
+              // Set the reaching frame element to null to avoid
+              // processing it later, and then bind its entry label.
+              reaching_frames_[j] = NULL;
+              __ bind(&merge_labels_[j]);
+            }
+          }
+
+          // Emit the merge code.
+          cgen()->frame()->MergeTo(entry_frame_);
+        } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
+          // If this is the fall through frame, and it didn't need
+          // merge code, we need to pick up the frame so we can jump
+          // around subsequent merge blocks if necessary.
+          RegisterFile empty;
+          cgen()->SetFrame(frame, &empty);
+          reaching_frames_[i] = NULL;
+        }
+      }
+    }
+
+    // The code generator may not have a current frame if there was no
+    // fall through and none of the reaching frames needed merging.
+    // In that case, clone the entry frame as the current frame.
+    if (!cgen()->has_valid_frame()) {
+      RegisterFile empty;
+      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+    }
+
+    // There may be unprocessed reaching frames that did not need
+    // merge code.  They will have unbound merge labels.  Bind their
+    // merge labels to be the same as the entry label and deallocate
+    // them.
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      if (!merge_labels_[i].is_bound()) {
+        reaching_frames_[i] = NULL;
+        __ bind(&merge_labels_[i]);
+      }
+    }
+
+    // There are non-NULL reaching frames with bound labels for each
+    // merge block, but only on backward targets.
+  } else {
+    // There were no forward jumps.  There must be a current frame and
+    // this must be a bidirectional target.
+    ASSERT(reaching_frames_.length() == 1);
+    ASSERT(reaching_frames_[0] != NULL);
+    ASSERT(direction_ == BIDIRECTIONAL);
+
+    // Use a copy of the reaching frame so the original can be saved
+    // for possible reuse as a backward merge block.
+    RegisterFile empty;
+    cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
+    __ bind(&merge_labels_[0]);
+    cgen()->frame()->MergeTo(entry_frame_);
+  }
+
+  __ bind(&entry_label_);
+}
+
+
+void BreakTarget::Jump() {
+  // Drop leftover statement state from the frame before merging, without
+  // emitting code.
+  ASSERT(cgen()->has_valid_frame());
+  int count = cgen()->frame()->height() - expected_height_;
+  cgen()->frame()->ForgetElements(count);
+  DoJump();
+}
+
+
+void BreakTarget::Jump(Result* arg) {
+  // Drop leftover statement state from the frame before merging, without
+  // emitting code.
+  ASSERT(cgen()->has_valid_frame());
+  int count = cgen()->frame()->height() - expected_height_;
+  cgen()->frame()->ForgetElements(count);
+  cgen()->frame()->Push(arg);
+  DoJump();
+}
+
+
+void BreakTarget::Bind() {
+#ifdef DEBUG
+  // All the forward-reaching frames should have been adjusted at the
+  // jumps to this target.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i] == NULL ||
+           reaching_frames_[i]->height() == expected_height_);
+  }
+#endif
+  // Drop leftover statement state from the frame before merging, even on
+  // the fall through.  This is so we can bind the return target with state
+  // on the frame.
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    cgen()->frame()->ForgetElements(count);
+  }
+  DoBind();
+}
+
+
+void BreakTarget::Bind(Result* arg) {
+#ifdef DEBUG
+  // All the forward-reaching frames should have been adjusted at the
+  // jumps to this target.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i] == NULL ||
+           reaching_frames_[i]->height() == expected_height_ + 1);
+  }
+#endif
+  // Drop leftover statement state from the frame before merging, even on
+  // the fall through.  This is so we can bind the return target with state
+  // on the frame.
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    cgen()->frame()->ForgetElements(count);
+    cgen()->frame()->Push(arg);
+  }
+  DoBind();
+  *arg = cgen()->frame()->Pop();
+}
+
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
new file mode 100644
index 0000000..38ada92
--- /dev/null
+++ b/src/x64/macro-assembler-x64.cc
@@ -0,0 +1,2186 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "assembler-x64.h"
+#include "macro-assembler-x64.h"
+#include "serialize.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+  : Assembler(buffer, size),
+    unresolved_(0),
+    generating_stub_(false),
+    allow_stub_calls_(true),
+    code_object_(Heap::undefined_value()) {
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+                              Heap::RootListIndex index) {
+  movq(destination, Operand(r13, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::PushRoot(Heap::RootListIndex index) {
+  push(Operand(r13, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::CompareRoot(Register with,
+                                 Heap::RootListIndex index) {
+  cmpq(with, Operand(r13, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::CompareRoot(Operand with,
+                                 Heap::RootListIndex index) {
+  LoadRoot(kScratchRegister, index);
+  cmpq(with, kScratchRegister);
+}
+
+
+static void RecordWriteHelper(MacroAssembler* masm,
+                              Register object,
+                              Register addr,
+                              Register scratch) {
+  Label fast;
+
+  // Compute the page start address from the heap object pointer, and reuse
+  // the 'object' register for it.
+  ASSERT(is_int32(~Page::kPageAlignmentMask));
+  masm->and_(object,
+             Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
+  Register page_start = object;
+
+  // Compute the bit addr in the remembered set/index of the pointer in the
+  // page. Reuse 'addr' as pointer_offset.
+  masm->subq(addr, page_start);
+  masm->shr(addr, Immediate(kPointerSizeLog2));
+  Register pointer_offset = addr;
+
+  // If the bit offset lies beyond the normal remembered set range, it is in
+  // the extra remembered set area of a large object.
+  masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
+  masm->j(less, &fast);
+
+  // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
+  // extra remembered set after the large object.
+
+  // Load the array length into 'scratch'.
+  masm->movl(scratch,
+             Operand(page_start,
+                     Page::kObjectStartOffset + FixedArray::kLengthOffset));
+  Register array_length = scratch;
+
+  // Extra remembered set starts right after the large object (a FixedArray), at
+  //   page_start + kObjectStartOffset + objectSize
+  // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
+  // Add the delta between the end of the normal RSet and the start of the
+  // extra RSet to 'page_start', so that addressing the bit using
+  // 'pointer_offset' hits the extra RSet words.
+  masm->lea(page_start,
+            Operand(page_start, array_length, times_pointer_size,
+                    Page::kObjectStartOffset + FixedArray::kHeaderSize
+                        - Page::kRSetEndOffset));
+
+  // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+  // to limit code size. We should probably evaluate this decision by
+  // measuring the performance of an equivalent implementation using
+  // "simpler" instructions
+  masm->bind(&fast);
+  masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
+}
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+  RecordWriteStub(Register object, Register addr, Register scratch)
+      : object_(object), addr_(addr), scratch_(scratch) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Register object_;
+  Register addr_;
+  Register scratch_;
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+           object_.code(), addr_.code(), scratch_.code());
+  }
+#endif
+
+  // Minor key encoding in 12 bits of three registers (object, address and
+  // scratch) OOOOAAAASSSS.
+  class ScratchBits: public BitField<uint32_t, 0, 4> {};
+  class AddressBits: public BitField<uint32_t, 4, 4> {};
+  class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    // Encode the registers.
+    return ObjectBits::encode(object_.code()) |
+           AddressBits::encode(addr_.code()) |
+           ScratchBits::encode(scratch_.code());
+  }
+};
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  RecordWriteHelper(masm, object_, addr_, scratch_);
+  masm->ret(0);
+}
+
+
+// Set the remembered set bit for [object+offset].
+// object is the object being stored into, value is the object being stored.
+// If offset is zero, then the scratch register contains the array index into
+// the elements array represented as a Smi.
+// All registers are clobbered by the operation.
+void MacroAssembler::RecordWrite(Register object,
+                                 int offset,
+                                 Register value,
+                                 Register scratch) {
+  // First, check if a remembered set write is even needed. The tests below
+  // catch stores of Smis and stores into young gen (which does not have space
+  // for the remembered set bits.
+  Label done;
+
+  // Test that the object address is not in the new space.  We cannot
+  // set remembered set bits in the new space.
+  movq(value, object);
+  ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+  and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+  movq(kScratchRegister, ExternalReference::new_space_start());
+  cmpq(value, kScratchRegister);
+  j(equal, &done);
+
+  if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
+    // Compute the bit offset in the remembered set, leave it in 'value'.
+    lea(value, Operand(object, offset));
+    ASSERT(is_int32(Page::kPageAlignmentMask));
+    and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
+    shr(value, Immediate(kObjectAlignmentBits));
+
+    // Compute the page address from the heap object pointer, leave it in
+    // 'object' (immediate value is sign extended).
+    and_(object, Immediate(~Page::kPageAlignmentMask));
+
+    // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+    // to limit code size. We should probably evaluate this decision by
+    // measuring the performance of an equivalent implementation using
+    // "simpler" instructions
+    bts(Operand(object, Page::kRSetOffset), value);
+  } else {
+    Register dst = scratch;
+    if (offset != 0) {
+      lea(dst, Operand(object, offset));
+    } else {
+      // array access: calculate the destination address in the same manner as
+      // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 4 to get an offset
+      // into an array of pointers.
+      lea(dst, Operand(object, dst, times_half_pointer_size,
+                       FixedArray::kHeaderSize - kHeapObjectTag));
+    }
+    // If we are already generating a shared stub, not inlining the
+    // record write code isn't going to save us any memory.
+    if (generating_stub()) {
+      RecordWriteHelper(this, object, dst, value);
+    } else {
+      RecordWriteStub stub(object, dst, value);
+      CallStub(&stub);
+    }
+  }
+
+  bind(&done);
+}
+
+
+void MacroAssembler::Assert(Condition cc, const char* msg) {
+  if (FLAG_debug_code) Check(cc, msg);
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg) {
+  Label L;
+  j(cc, &L);
+  Abort(msg);
+  // will not return here
+  bind(&L);
+}
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+                                      Register op,
+                                      Label* then_label) {
+  Label ok;
+  testl(result, result);
+  j(not_zero, &ok);
+  testl(op, op);
+  j(sign, then_label);
+  bind(&ok);
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+  // We want to pass the msg string like a smi to avoid GC
+  // problems, however msg is not guaranteed to be aligned
+  // properly. Instead, we pass an aligned pointer that is
+  // a proper v8 smi, but also pass the alignment difference
+  // from the real pointer as a smi.
+  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+  // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
+  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+  if (msg != NULL) {
+    RecordComment("Abort message: ");
+    RecordComment(msg);
+  }
+#endif
+  push(rax);
+  movq(kScratchRegister, p0, RelocInfo::NONE);
+  push(kScratchRegister);
+  movq(kScratchRegister,
+       reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)),
+       RelocInfo::NONE);
+  push(kScratchRegister);
+  CallRuntime(Runtime::kAbort, 2);
+  // will not return here
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+  ASSERT(argc >= 1 && generating_stub());
+  ret((argc - 1) * kPointerSize);
+}
+
+
+void MacroAssembler::IllegalOperation(int num_arguments) {
+  if (num_arguments > 0) {
+    addq(rsp, Immediate(num_arguments * kPointerSize));
+  }
+  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
+  CallRuntime(Runtime::FunctionForId(id), num_arguments);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+  // If the expected number of arguments of the runtime function is
+  // constant, we check that the actual number of arguments match the
+  // expectation.
+  if (f->nargs >= 0 && f->nargs != num_arguments) {
+    IllegalOperation(num_arguments);
+    return;
+  }
+
+  Runtime::FunctionId function_id =
+      static_cast<Runtime::FunctionId>(f->stub_id);
+  RuntimeStub stub(function_id, num_arguments);
+  CallStub(&stub);
+}
+
+
+void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
+                                     int num_arguments,
+                                     int result_size) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : argument num_arguments - 1
+  //  ...
+  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
+  // -----------------------------------
+
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  movq(rax, Immediate(num_arguments));
+  JumpToRuntime(ext, result_size);
+}
+
+
+void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
+                                   int result_size) {
+  // Set the entry point and jump to the C entry runtime stub.
+  movq(rbx, ext);
+  CEntryStub ces(result_size);
+  movq(kScratchRegister, ces.GetCode(), RelocInfo::CODE_TARGET);
+  jmp(kScratchRegister);
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  bool resolved;
+  Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+  const char* name = Builtins::GetName(id);
+  int argc = Builtins::GetArgumentsCount(id);
+
+  movq(target, code, RelocInfo::EMBEDDED_OBJECT);
+  if (!resolved) {
+    uint32_t flags =
+        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+        Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
+        Bootstrapper::FixupFlagsUseCodeObject::encode(true);
+    Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
+    unresolved_.Add(entry);
+  }
+  addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
+                                            bool* resolved) {
+  // Move the builtin function into the temporary function slot by
+  // reading it from the builtins object. NOTE: We should be able to
+  // reduce this to two instructions by putting the function table in
+  // the global object instead of the "builtins" object and by using a
+  // real register for the function.
+  movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  movq(rdx, FieldOperand(rdx, GlobalObject::kBuiltinsOffset));
+  int builtins_offset =
+      JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
+  movq(rdi, FieldOperand(rdx, builtins_offset));
+
+
+  return Builtins::GetCode(id, resolved);
+}
+
+
+void MacroAssembler::Set(Register dst, int64_t x) {
+  if (x == 0) {
+    xor_(dst, dst);
+  } else if (is_int32(x)) {
+    movq(dst, Immediate(x));
+  } else if (is_uint32(x)) {
+    movl(dst, Immediate(x));
+  } else {
+    movq(dst, x, RelocInfo::NONE);
+  }
+}
+
+
+void MacroAssembler::Set(const Operand& dst, int64_t x) {
+  if (x == 0) {
+    xor_(kScratchRegister, kScratchRegister);
+    movq(dst, kScratchRegister);
+  } else if (is_int32(x)) {
+    movq(dst, Immediate(x));
+  } else if (is_uint32(x)) {
+    movl(dst, Immediate(x));
+  } else {
+    movq(kScratchRegister, x, RelocInfo::NONE);
+    movq(dst, kScratchRegister);
+  }
+}
+
+
+// ----------------------------------------------------------------------------
+// Smi tagging, untagging and tag detection.
+
+
+void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
+  ASSERT_EQ(1, kSmiTagSize);
+  ASSERT_EQ(0, kSmiTag);
+#ifdef DEBUG
+    cmpq(src, Immediate(0xC0000000u));
+    Check(positive, "Smi conversion overflow");
+#endif
+  if (dst.is(src)) {
+    addl(dst, src);
+  } else {
+    lea(dst, Operand(src, src, times_1, 0));
+  }
+}
+
+
+void MacroAssembler::Integer32ToSmi(Register dst,
+                                    Register src,
+                                    Label* on_overflow) {
+  ASSERT_EQ(1, kSmiTagSize);
+  ASSERT_EQ(0, kSmiTag);
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  addl(dst, src);
+  j(overflow, on_overflow);
+}
+
+
+void MacroAssembler::Integer64AddToSmi(Register dst,
+                                       Register src,
+                                       int constant) {
+#ifdef DEBUG
+  movl(kScratchRegister, src);
+  addl(kScratchRegister, Immediate(constant));
+  Check(no_overflow, "Add-and-smi-convert overflow");
+  Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
+  Check(valid, "Add-and-smi-convert overflow");
+#endif
+  lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
+}
+
+
+void MacroAssembler::SmiToInteger32(Register dst, Register src) {
+  ASSERT_EQ(1, kSmiTagSize);
+  ASSERT_EQ(0, kSmiTag);
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  sarl(dst, Immediate(kSmiTagSize));
+}
+
+
+void MacroAssembler::SmiToInteger64(Register dst, Register src) {
+  ASSERT_EQ(1, kSmiTagSize);
+  ASSERT_EQ(0, kSmiTag);
+  movsxlq(dst, src);
+  sar(dst, Immediate(kSmiTagSize));
+}
+
+
+void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
+                                                           Register src,
+                                                           int power) {
+  ASSERT(power >= 0);
+  ASSERT(power < 64);
+  if (power == 0) {
+    SmiToInteger64(dst, src);
+    return;
+  }
+  movsxlq(dst, src);
+  shl(dst, Immediate(power - 1));
+}
+
+void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
+  ASSERT_EQ(0, kSmiTag);
+  testl(src, Immediate(kSmiTagMask));
+  j(zero, on_smi);
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
+  Condition not_smi = CheckNotSmi(src);
+  j(not_smi, on_not_smi);
+}
+
+
+void MacroAssembler::JumpIfNotPositiveSmi(Register src,
+                                          Label* on_not_positive_smi) {
+  Condition not_positive_smi = CheckNotPositiveSmi(src);
+  j(not_positive_smi, on_not_positive_smi);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+                                             int constant,
+                                             Label* on_equals) {
+  if (Smi::IsValid(constant)) {
+    Condition are_equal = CheckSmiEqualsConstant(src, constant);
+    j(are_equal, on_equals);
+  }
+}
+
+
+void MacroAssembler::JumpIfSmiGreaterEqualsConstant(Register src,
+                                                    int constant,
+                                                    Label* on_greater_equals) {
+  if (Smi::IsValid(constant)) {
+    Condition are_greater_equal = CheckSmiGreaterEqualsConstant(src, constant);
+    j(are_greater_equal, on_greater_equals);
+  } else if (constant < Smi::kMinValue) {
+    jmp(on_greater_equals);
+  }
+}
+
+
+void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
+  Condition is_valid = CheckInteger32ValidSmiValue(src);
+  j(ReverseCondition(is_valid), on_invalid);
+}
+
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+                                      Register src2,
+                                      Label* on_not_both_smi) {
+  Condition not_both_smi = CheckNotBothSmi(src1, src2);
+  j(not_both_smi, on_not_both_smi);
+}
+
+Condition MacroAssembler::CheckSmi(Register src) {
+  testb(src, Immediate(kSmiTagMask));
+  return zero;
+}
+
+
+Condition MacroAssembler::CheckNotSmi(Register src) {
+  ASSERT_EQ(0, kSmiTag);
+  testb(src, Immediate(kSmiTagMask));
+  return not_zero;
+}
+
+
+Condition MacroAssembler::CheckPositiveSmi(Register src) {
+  ASSERT_EQ(0, kSmiTag);
+  testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
+  return zero;
+}
+
+
+Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
+  ASSERT_EQ(0, kSmiTag);
+  testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
+  return not_zero;
+}
+
+
+Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
+  if (first.is(second)) {
+    return CheckSmi(first);
+  }
+  movl(kScratchRegister, first);
+  orl(kScratchRegister, second);
+  return CheckSmi(kScratchRegister);
+}
+
+
+Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
+  ASSERT_EQ(0, kSmiTag);
+  if (first.is(second)) {
+    return CheckNotSmi(first);
+  }
+  movl(kScratchRegister, first);
+  or_(kScratchRegister, second);
+  return CheckNotSmi(kScratchRegister);
+}
+
+
+Condition MacroAssembler::CheckIsMinSmi(Register src) {
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  cmpl(src, Immediate(0x40000000));
+  return equal;
+}
+
+Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
+  if (constant == 0) {
+    testl(src, src);
+    return zero;
+  }
+  if (Smi::IsValid(constant)) {
+    cmpl(src, Immediate(Smi::FromInt(constant)));
+    return zero;
+  }
+  // Can't be equal.
+  UNREACHABLE();
+  return no_condition;
+}
+
+
+Condition MacroAssembler::CheckSmiGreaterEqualsConstant(Register src,
+                                                        int constant) {
+  if (constant == 0) {
+    testl(src, Immediate(static_cast<uint32_t>(0x80000000u)));
+    return positive;
+  }
+  if (Smi::IsValid(constant)) {
+    cmpl(src, Immediate(Smi::FromInt(constant)));
+    return greater_equal;
+  }
+  // Can't be equal.
+  UNREACHABLE();
+  return no_condition;
+}
+
+
+Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
+  // A 32-bit integer value can be converted to a smi if it is in the
+  // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
+  // representation have bits 30 and 31 be equal.
+  cmpl(src, Immediate(0xC0000000u));
+  return positive;
+}
+
+
+void MacroAssembler::SmiNeg(Register dst,
+                            Register src,
+                            Label* on_not_smi_result) {
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  negl(dst);
+  testl(dst, Immediate(0x7fffffff));
+  // If the result is zero or 0x80000000, negation failed to create a smi.
+  j(equal, on_not_smi_result);
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result) {
+  ASSERT(!dst.is(src2));
+  if (!dst.is(src1)) {
+    movl(dst, src1);
+  }
+  addl(dst, src2);
+  if (!dst.is(src1)) {
+    j(overflow, on_not_smi_result);
+  } else {
+    Label smi_result;
+    j(no_overflow, &smi_result);
+    // Restore src1.
+    subl(src1, src2);
+    jmp(on_not_smi_result);
+    bind(&smi_result);
+  }
+}
+
+
+
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result) {
+  ASSERT(!dst.is(src2));
+  if (!dst.is(src1)) {
+    movl(dst, src1);
+  }
+  subl(dst, src2);
+  if (!dst.is(src1)) {
+    j(overflow, on_not_smi_result);
+  } else {
+    Label smi_result;
+    j(no_overflow, &smi_result);
+    // Restore src1.
+    addl(src1, src2);
+    jmp(on_not_smi_result);
+    bind(&smi_result);
+  }
+}
+
+
+void MacroAssembler::SmiMul(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result) {
+  ASSERT(!dst.is(src2));
+
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(dst, src1);
+
+  imull(dst, src2);
+  j(overflow, on_not_smi_result);
+
+  // Check for negative zero result.  If product is zero, and one
+  // argument is negative, go to slow case.  The frame is unchanged
+  // in this block, so local control flow can use a Label rather
+  // than a JumpTarget.
+  Label non_zero_result;
+  testl(dst, dst);
+  j(not_zero, &non_zero_result);
+
+  // Test whether either operand is negative (the other must be zero).
+  orl(kScratchRegister, src2);
+  j(negative, on_not_smi_result);
+  bind(&non_zero_result);
+}
+
+
+void MacroAssembler::SmiTryAddConstant(Register dst,
+                                       Register src,
+                                       int32_t constant,
+                                       Label* on_not_smi_result) {
+  // Does not assume that src is a smi.
+  ASSERT_EQ(1, kSmiTagMask);
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT(Smi::IsValid(constant));
+
+  Register tmp = (src.is(dst) ? kScratchRegister : dst);
+  movl(tmp, src);
+  addl(tmp, Immediate(Smi::FromInt(constant)));
+  if (tmp.is(kScratchRegister)) {
+    j(overflow, on_not_smi_result);
+    testl(tmp, Immediate(kSmiTagMask));
+    j(not_zero, on_not_smi_result);
+    movl(dst, tmp);
+  } else {
+    movl(kScratchRegister, Immediate(kSmiTagMask));
+    cmovl(overflow, dst, kScratchRegister);
+    testl(dst, kScratchRegister);
+    j(not_zero, on_not_smi_result);
+  }
+}
+
+
+void MacroAssembler::SmiAddConstant(Register dst,
+                                    Register src,
+                                    int32_t constant,
+                                    Label* on_not_smi_result) {
+  ASSERT(Smi::IsValid(constant));
+  if (on_not_smi_result == NULL) {
+    if (dst.is(src)) {
+      movl(dst, src);
+    } else {
+      lea(dst, Operand(src, constant << kSmiTagSize));
+    }
+  } else {
+    if (!dst.is(src)) {
+      movl(dst, src);
+    }
+    addl(dst, Immediate(Smi::FromInt(constant)));
+    if (!dst.is(src)) {
+      j(overflow, on_not_smi_result);
+    } else {
+      Label result_ok;
+      j(no_overflow, &result_ok);
+      subl(dst, Immediate(Smi::FromInt(constant)));
+      jmp(on_not_smi_result);
+      bind(&result_ok);
+    }
+  }
+}
+
+
+void MacroAssembler::SmiSubConstant(Register dst,
+                                    Register src,
+                                    int32_t constant,
+                                    Label* on_not_smi_result) {
+  ASSERT(Smi::IsValid(constant));
+  Smi* smi_value = Smi::FromInt(constant);
+  if (dst.is(src)) {
+    // Optimistic subtract - may change value of dst register,
+    // if it has garbage bits in the higher half, but will not change
+    // the value as a tagged smi.
+    subl(dst, Immediate(smi_value));
+    if (on_not_smi_result != NULL) {
+      Label add_success;
+      j(no_overflow, &add_success);
+      addl(dst, Immediate(smi_value));
+      jmp(on_not_smi_result);
+      bind(&add_success);
+    }
+  } else {
+    UNIMPLEMENTED();  // Not used yet.
+  }
+}
+
+
+void MacroAssembler::SmiDiv(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result) {
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+
+  // Check for 0 divisor (result is +/-Infinity).
+  Label positive_divisor;
+  testl(src2, src2);
+  j(zero, on_not_smi_result);
+  j(positive, &positive_divisor);
+  // Check for negative zero result.  If the dividend is zero, and the
+  // divisor is negative, return a floating point negative zero.
+  testl(src1, src1);
+  j(zero, on_not_smi_result);
+  bind(&positive_divisor);
+
+  // Sign extend src1 into edx:eax.
+  if (!src1.is(rax)) {
+    movl(rax, src1);
+  }
+  cdq();
+
+  idivl(src2);
+  // Check for the corner case of dividing the most negative smi by
+  // -1. We cannot use the overflow flag, since it is not set by
+  // idiv instruction.
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  cmpl(rax, Immediate(0x40000000));
+  j(equal, on_not_smi_result);
+  // Check that the remainder is zero.
+  testl(rdx, rdx);
+  j(not_zero, on_not_smi_result);
+  // Tag the result and store it in the destination register.
+  Integer32ToSmi(dst, rax);
+}
+
+
+void MacroAssembler::SmiMod(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+
+  testl(src2, src2);
+  j(zero, on_not_smi_result);
+
+  if (src1.is(rax)) {
+    // Mist remember the value to see if a zero result should
+    // be a negative zero.
+    movl(kScratchRegister, rax);
+  } else {
+    movl(rax, src1);
+  }
+  // Sign extend eax into edx:eax.
+  cdq();
+  idivl(src2);
+  // Check for a negative zero result.  If the result is zero, and the
+  // dividend is negative, return a floating point negative zero.
+  Label non_zero_result;
+  testl(rdx, rdx);
+  j(not_zero, &non_zero_result);
+  if (src1.is(rax)) {
+    testl(kScratchRegister, kScratchRegister);
+  } else {
+    testl(src1, src1);
+  }
+  j(negative, on_not_smi_result);
+  bind(&non_zero_result);
+  if (!dst.is(rdx)) {
+    movl(dst, rdx);
+  }
+}
+
+
+void MacroAssembler::SmiNot(Register dst, Register src) {
+  if (dst.is(src)) {
+    not_(dst);
+    // Remove inverted smi-tag.  The mask is sign-extended to 64 bits.
+    xor_(src, Immediate(kSmiTagMask));
+  } else {
+    ASSERT_EQ(0, kSmiTag);
+    lea(dst, Operand(src, kSmiTagMask));
+    not_(dst);
+  }
+}
+
+
+void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1)) {
+    movl(dst, src1);
+  }
+  and_(dst, src2);
+}
+
+
+void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
+  ASSERT(Smi::IsValid(constant));
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  and_(dst, Immediate(Smi::FromInt(constant)));
+}
+
+
+void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1)) {
+    movl(dst, src1);
+  }
+  or_(dst, src2);
+}
+
+
+void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
+  ASSERT(Smi::IsValid(constant));
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  or_(dst, Immediate(Smi::FromInt(constant)));
+}
+
+void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1)) {
+    movl(dst, src1);
+  }
+  xor_(dst, src2);
+}
+
+
+void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
+  ASSERT(Smi::IsValid(constant));
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  xor_(dst, Immediate(Smi::FromInt(constant)));
+}
+
+
+
+void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
+                                                     Register src,
+                                                     int shift_value) {
+  if (shift_value > 0) {
+    if (dst.is(src)) {
+      sarl(dst, Immediate(shift_value));
+      and_(dst, Immediate(~kSmiTagMask));
+    } else {
+      UNIMPLEMENTED();  // Not used.
+    }
+  }
+}
+
+
+void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
+                                                  Register src,
+                                                  int shift_value,
+                                                  Label* on_not_smi_result) {
+  // Logic right shift interprets its result as an *unsigned* number.
+  if (dst.is(src)) {
+    UNIMPLEMENTED();  // Not used.
+  } else {
+    movl(dst, src);
+    // Untag the smi.
+    sarl(dst, Immediate(kSmiTagSize));
+    if (shift_value < 2) {
+      // A negative Smi shifted right two is in the positive Smi range,
+      // but if shifted only by zero or one, it never is.
+      j(negative, on_not_smi_result);
+    }
+    if (shift_value > 0) {
+      // Do the right shift on the integer value.
+      shrl(dst, Immediate(shift_value));
+    }
+    // Re-tag the result.
+    addl(dst, dst);
+  }
+}
+
+
+void MacroAssembler::SmiShiftLeftConstant(Register dst,
+                                          Register src,
+                                          int shift_value,
+                                          Label* on_not_smi_result) {
+  if (dst.is(src)) {
+    UNIMPLEMENTED();  // Not used.
+  } else {
+    movl(dst, src);
+    if (shift_value > 0) {
+      // Treat dst as an untagged integer value equal to two times the
+      // smi value of src, i.e., already shifted left by one.
+      if (shift_value > 1) {
+        shll(dst, Immediate(shift_value - 1));
+      }
+      // Convert int result to Smi, checking that it is in smi range.
+      ASSERT(kSmiTagSize == 1);  // adjust code if not the case
+      Integer32ToSmi(dst, dst, on_not_smi_result);
+    }
+  }
+}
+
+
+void MacroAssembler::SmiShiftLeft(Register dst,
+                                  Register src1,
+                                  Register src2,
+                                  Label* on_not_smi_result) {
+  ASSERT(!dst.is(rcx));
+  Label result_ok;
+  // Untag both operands.
+  SmiToInteger32(dst, src1);
+  SmiToInteger32(rcx, src2);
+  shll(dst);
+  // Check that the *signed* result fits in a smi.
+  Condition is_valid = CheckInteger32ValidSmiValue(dst);
+  j(is_valid, &result_ok);
+  // Restore the relevant bits of the source registers
+  // and call the slow version.
+  if (dst.is(src1)) {
+    shrl(dst);
+    Integer32ToSmi(dst, dst);
+  }
+  Integer32ToSmi(rcx, rcx);
+  jmp(on_not_smi_result);
+  bind(&result_ok);
+  Integer32ToSmi(dst, dst);
+}
+
+
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+                                          Register src1,
+                                          Register src2,
+                                          Label* on_not_smi_result) {
+  ASSERT(!dst.is(rcx));
+  Label result_ok;
+  // Untag both operands.
+  SmiToInteger32(dst, src1);
+  SmiToInteger32(rcx, src2);
+
+  shrl(dst);
+  // Check that the *unsigned* result fits in a smi.
+  // I.e., that it is a valid positive smi value. The positive smi
+  // values are  0..0x3fffffff, i.e., neither of the top-most two
+  // bits can be set.
+  //
+  // These two cases can only happen with shifts by 0 or 1 when
+  // handed a valid smi.  If the answer cannot be represented by a
+  // smi, restore the left and right arguments, and jump to slow
+  // case.  The low bit of the left argument may be lost, but only
+  // in a case where it is dropped anyway.
+  testl(dst, Immediate(0xc0000000));
+  j(zero, &result_ok);
+  if (dst.is(src1)) {
+    shll(dst);
+    Integer32ToSmi(dst, dst);
+  }
+  Integer32ToSmi(rcx, rcx);
+  jmp(on_not_smi_result);
+  bind(&result_ok);
+  // Smi-tag the result in answer.
+  Integer32ToSmi(dst, dst);
+}
+
+
+void MacroAssembler::SmiShiftArithmeticRight(Register dst,
+                                             Register src1,
+                                             Register src2) {
+  ASSERT(!dst.is(rcx));
+  // Untag both operands.
+  SmiToInteger32(dst, src1);
+  SmiToInteger32(rcx, src2);
+  // Shift as integer.
+  sarl(dst);
+  // Retag result.
+  Integer32ToSmi(dst, dst);
+}
+
+
+void MacroAssembler::SelectNonSmi(Register dst,
+                                  Register src1,
+                                  Register src2,
+                                  Label* on_not_smis) {
+  ASSERT(!dst.is(src1));
+  ASSERT(!dst.is(src2));
+  // Both operands must not be smis.
+#ifdef DEBUG
+  Condition not_both_smis = CheckNotBothSmi(src1, src2);
+  Check(not_both_smis, "Both registers were smis.");
+#endif
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT_EQ(0, Smi::FromInt(0));
+  movq(kScratchRegister, Immediate(kSmiTagMask));
+  and_(kScratchRegister, src1);
+  testl(kScratchRegister, src2);
+  j(not_zero, on_not_smis);
+  // One operand is a smi.
+
+  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+  subq(kScratchRegister, Immediate(1));
+  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+  movq(dst, src1);
+  xor_(dst, src2);
+  and_(dst, kScratchRegister);
+  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+  xor_(dst, src1);
+  // If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
+}
+
+
+SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
+  ASSERT(is_uint6(shift));
+  if (shift == 0) {  // times_1.
+    SmiToInteger32(dst, src);
+    return SmiIndex(dst, times_1);
+  }
+  if (shift <= 4) {  // 2 - 16 times multiplier is handled using ScaleFactor.
+    // We expect that all smis are actually zero-padded. If this holds after
+    // checking, this line can be omitted.
+    movl(dst, src);  // Ensure that the smi is zero-padded.
+    return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
+  }
+  // Shift by shift-kSmiTagSize.
+  movl(dst, src);  // Ensure that the smi is zero-padded.
+  shl(dst, Immediate(shift - kSmiTagSize));
+  return SmiIndex(dst, times_1);
+}
+
+
+SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
+                                            Register src,
+                                            int shift) {
+  // Register src holds a positive smi.
+  ASSERT(is_uint6(shift));
+  if (shift == 0) {  // times_1.
+    SmiToInteger32(dst, src);
+    neg(dst);
+    return SmiIndex(dst, times_1);
+  }
+  if (shift <= 4) {  // 2 - 16 times multiplier is handled using ScaleFactor.
+    movl(dst, src);
+    neg(dst);
+    return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
+  }
+  // Shift by shift-kSmiTagSize.
+  movl(dst, src);
+  neg(dst);
+  shl(dst, Immediate(shift - kSmiTagSize));
+  return SmiIndex(dst, times_1);
+}
+
+
+
+bool MacroAssembler::IsUnsafeSmi(Smi* value) {
+  return false;
+}
+
+void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) {
+  UNIMPLEMENTED();
+}
+
+
+void MacroAssembler::Move(Register dst, Handle<Object> source) {
+  ASSERT(!source->IsFailure());
+  if (source->IsSmi()) {
+    if (IsUnsafeSmi(source)) {
+      LoadUnsafeSmi(dst, source);
+    } else {
+      int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
+      movq(dst, Immediate(smi));
+    }
+  } else {
+    movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
+  }
+}
+
+
+void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
+  if (source->IsSmi()) {
+    int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
+    movq(dst, Immediate(smi));
+  } else {
+    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+    movq(dst, kScratchRegister);
+  }
+}
+
+
+void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
+  Move(kScratchRegister, source);
+  cmpq(dst, kScratchRegister);
+}
+
+
+void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
+  if (source->IsSmi()) {
+    if (IsUnsafeSmi(source)) {
+      LoadUnsafeSmi(kScratchRegister, source);
+      cmpl(dst, kScratchRegister);
+    } else {
+      // For smi-comparison, it suffices to compare the low 32 bits.
+      int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
+      cmpl(dst, Immediate(smi));
+    }
+  } else {
+    ASSERT(source->IsHeapObject());
+    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+    cmpq(dst, kScratchRegister);
+  }
+}
+
+
+void MacroAssembler::Push(Handle<Object> source) {
+  if (source->IsSmi()) {
+    if (IsUnsafeSmi(source)) {
+      LoadUnsafeSmi(kScratchRegister, source);
+      push(kScratchRegister);
+    } else {
+      int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
+      push(Immediate(smi));
+    }
+  } else {
+    ASSERT(source->IsHeapObject());
+    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+    push(kScratchRegister);
+  }
+}
+
+
+void MacroAssembler::Push(Smi* source) {
+  if (IsUnsafeSmi(source)) {
+    LoadUnsafeSmi(kScratchRegister, source);
+    push(kScratchRegister);
+  } else {
+    int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source));
+    push(Immediate(smi));
+  }
+}
+
+
+void MacroAssembler::Jump(ExternalReference ext) {
+  movq(kScratchRegister, ext);
+  jmp(kScratchRegister);
+}
+
+
+void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
+  movq(kScratchRegister, destination, rmode);
+  jmp(kScratchRegister);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  movq(kScratchRegister, code_object, rmode);
+#ifdef DEBUG
+  Label target;
+  bind(&target);
+#endif
+  jmp(kScratchRegister);
+#ifdef DEBUG
+  ASSERT_EQ(kCallTargetAddressOffset,
+            SizeOfCodeGeneratedSince(&target) + kPointerSize);
+#endif
+}
+
+
+void MacroAssembler::Call(ExternalReference ext) {
+  movq(kScratchRegister, ext);
+  call(kScratchRegister);
+}
+
+
+void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
+  movq(kScratchRegister, destination, rmode);
+  call(kScratchRegister);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  WriteRecordedPositions();
+  movq(kScratchRegister, code_object, rmode);
+#ifdef DEBUG
+  // Patch target is kPointer size bytes *before* target label.
+  Label target;
+  bind(&target);
+#endif
+  call(kScratchRegister);
+#ifdef DEBUG
+  ASSERT_EQ(kCallTargetAddressOffset,
+            SizeOfCodeGeneratedSince(&target) + kPointerSize);
+#endif
+}
+
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+                                    HandlerType type) {
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // The pc (return address) is already on TOS.  This code pushes state,
+  // frame pointer and current handler.  Check that they are expected
+  // next on the stack, in that order.
+  ASSERT_EQ(StackHandlerConstants::kStateOffset,
+            StackHandlerConstants::kPCOffset - kPointerSize);
+  ASSERT_EQ(StackHandlerConstants::kFPOffset,
+            StackHandlerConstants::kStateOffset - kPointerSize);
+  ASSERT_EQ(StackHandlerConstants::kNextOffset,
+            StackHandlerConstants::kFPOffset - kPointerSize);
+
+  if (try_location == IN_JAVASCRIPT) {
+    if (type == TRY_CATCH_HANDLER) {
+      push(Immediate(StackHandler::TRY_CATCH));
+    } else {
+      push(Immediate(StackHandler::TRY_FINALLY));
+    }
+    push(rbp);
+  } else {
+    ASSERT(try_location == IN_JS_ENTRY);
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for rbp. We expect the code throwing an exception to check rbp
+    // before dereferencing it to restore the context.
+    push(Immediate(StackHandler::ENTRY));
+    push(Immediate(0));  // NULL frame pointer.
+  }
+  // Save the current handler.
+  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+  push(Operand(kScratchRegister, 0));
+  // Link this handler.
+  movq(Operand(kScratchRegister, 0), rsp);
+}
+
+
+void MacroAssembler::Ret() {
+  ret(0);
+}
+
+
+void MacroAssembler::FCmp() {
+  fucompp();
+  push(rax);
+  fnstsw_ax();
+  if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
+    sahf();
+  } else {
+    shrl(rax, Immediate(8));
+    and_(rax, Immediate(0xFF));
+    push(rax);
+    popfq();
+  }
+  pop(rax);
+}
+
+
+void MacroAssembler::CmpObjectType(Register heap_object,
+                                   InstanceType type,
+                                   Register map) {
+  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+  CmpInstanceType(map, type);
+}
+
+
+void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
+  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
+       Immediate(static_cast<int8_t>(type)));
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+                                             Register result,
+                                             Label* miss) {
+  // Check that the receiver isn't a smi.
+  testl(function, Immediate(kSmiTagMask));
+  j(zero, miss);
+
+  // Check that the function really is a function.
+  CmpObjectType(function, JS_FUNCTION_TYPE, result);
+  j(not_equal, miss);
+
+  // Make sure that the function has an instance prototype.
+  Label non_instance;
+  testb(FieldOperand(result, Map::kBitFieldOffset),
+        Immediate(1 << Map::kHasNonInstancePrototype));
+  j(not_zero, &non_instance);
+
+  // Get the prototype or initial map from the function.
+  movq(result,
+       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // If the prototype or initial map is the hole, don't return it and
+  // simply miss the cache instead. This will allow us to allocate a
+  // prototype object on-demand in the runtime system.
+  CompareRoot(result, Heap::kTheHoleValueRootIndex);
+  j(equal, miss);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  CmpObjectType(result, MAP_TYPE, kScratchRegister);
+  j(not_equal, &done);
+
+  // Get the prototype from the initial map.
+  movq(result, FieldOperand(result, Map::kPrototypeOffset));
+  jmp(&done);
+
+  // Non-instance prototype: Fetch prototype from constructor field
+  // in initial map.
+  bind(&non_instance);
+  movq(result, FieldOperand(result, Map::kConstructorOffset));
+
+  // All done.
+  bind(&done);
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    movq(kScratchRegister, ExternalReference(counter));
+    movl(Operand(kScratchRegister, 0), Immediate(value));
+  }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    movq(kScratchRegister, ExternalReference(counter));
+    Operand operand(kScratchRegister, 0);
+    if (value == 1) {
+      incl(operand);
+    } else {
+      addl(operand, Immediate(value));
+    }
+  }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    movq(kScratchRegister, ExternalReference(counter));
+    Operand operand(kScratchRegister, 0);
+    if (value == 1) {
+      decl(operand);
+    } else {
+      subl(operand, Immediate(value));
+    }
+  }
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+void MacroAssembler::PushRegistersFromMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Push the content of the memory location to the stack.
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      movq(kScratchRegister, reg_addr);
+      push(Operand(kScratchRegister, 0));
+    }
+  }
+}
+
+void MacroAssembler::SaveRegistersToMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of registers to memory location.
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      Register reg = { r };
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      movq(kScratchRegister, reg_addr);
+      movq(Operand(kScratchRegister, 0), reg);
+    }
+  }
+}
+
+
+void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of memory location to registers.
+  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      Register reg = { r };
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      movq(kScratchRegister, reg_addr);
+      movq(reg, Operand(kScratchRegister, 0));
+    }
+  }
+}
+
+
+void MacroAssembler::PopRegistersToMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Pop the content from the stack to the memory location.
+  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      movq(kScratchRegister, reg_addr);
+      pop(Operand(kScratchRegister, 0));
+    }
+  }
+}
+
+
+void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
+                                                    Register scratch,
+                                                    RegList regs) {
+  ASSERT(!scratch.is(kScratchRegister));
+  ASSERT(!base.is(kScratchRegister));
+  ASSERT(!base.is(scratch));
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of the stack to the memory location and adjust base.
+  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      movq(scratch, Operand(base, 0));
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      movq(kScratchRegister, reg_addr);
+      movq(Operand(kScratchRegister, 0), scratch);
+      lea(base, Operand(base, kPointerSize));
+    }
+  }
+}
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+  bool resolved;
+  Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+  // Calls are not allowed in some stubs.
+  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+
+  // Rely on the assertion to check that the number of provided
+  // arguments match the expected number of arguments. Fake a
+  // parameter count to avoid emitting code to do the check.
+  ParameterCount expected(0);
+  InvokeCode(Handle<Code>(code), expected, expected,
+             RelocInfo::CODE_TARGET, flag);
+
+  const char* name = Builtins::GetName(id);
+  int argc = Builtins::GetArgumentsCount(id);
+  // The target address for the jump is stored as an immediate at offset
+  // kInvokeCodeAddressOffset.
+  if (!resolved) {
+    uint32_t flags =
+        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+        Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
+        Bootstrapper::FixupFlagsUseCodeObject::encode(false);
+    Unresolved entry =
+        { pc_offset() - kCallTargetAddressOffset, flags, name };
+    unresolved_.Add(entry);
+  }
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_register,
+                                    Label* done,
+                                    InvokeFlag flag) {
+  bool definitely_matches = false;
+  Label invoke;
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      movq(rax, Immediate(actual.immediate()));
+      if (expected.immediate() ==
+          SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+        // Don't worry about adapting arguments for built-ins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        movq(rbx, Immediate(expected.immediate()));
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      // Expected is in register, actual is immediate. This is the
+      // case when we invoke function values without going through the
+      // IC mechanism.
+      cmpq(expected.reg(), Immediate(actual.immediate()));
+      j(equal, &invoke);
+      ASSERT(expected.reg().is(rbx));
+      movq(rax, Immediate(actual.immediate()));
+    } else if (!expected.reg().is(actual.reg())) {
+      // Both expected and actual are in (different) registers. This
+      // is the case when we invoke functions using call and apply.
+      cmpq(expected.reg(), actual.reg());
+      j(equal, &invoke);
+      ASSERT(actual.reg().is(rax));
+      ASSERT(expected.reg().is(rbx));
+    }
+  }
+
+  if (!definitely_matches) {
+    Handle<Code> adaptor =
+        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+    if (!code_constant.is_null()) {
+      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    } else if (!code_register.is(rdx)) {
+      movq(rdx, code_register);
+    }
+
+    if (flag == CALL_FUNCTION) {
+      Call(adaptor, RelocInfo::CODE_TARGET);
+      jmp(done);
+    } else {
+      Jump(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&invoke);
+  }
+}
+
+
+void MacroAssembler::InvokeCode(Register code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                InvokeFlag flag) {
+  Label done;
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    call(code);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    jmp(code);
+  }
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeCode(Handle<Code> code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                RelocInfo::Mode rmode,
+                                InvokeFlag flag) {
+  Label done;
+  Register dummy = rax;
+  InvokePrologue(expected, actual, code, dummy, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    Call(code, rmode);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    Jump(code, rmode);
+  }
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag) {
+  ASSERT(function.is(rdi));
+  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
+  movsxlq(rbx,
+          FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+  movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+  // Advances rdx to the end of the Code object header, to the start of
+  // the executable code.
+  lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+
+  ParameterCount expected(rbx);
+  InvokeCode(rdx, expected, actual, flag);
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+  push(rbp);
+  movq(rbp, rsp);
+  push(rsi);  // Context.
+  push(Immediate(Smi::FromInt(type)));
+  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+  push(kScratchRegister);
+  if (FLAG_debug_code) {
+    movq(kScratchRegister,
+         Factory::undefined_value(),
+         RelocInfo::EMBEDDED_OBJECT);
+    cmpq(Operand(rsp, 0), kScratchRegister);
+    Check(not_equal, "code object not properly patched");
+  }
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+  if (FLAG_debug_code) {
+    movq(kScratchRegister, Immediate(Smi::FromInt(type)));
+    cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+    Check(equal, "stack frame types must match");
+  }
+  movq(rsp, rbp);
+  pop(rbp);
+}
+
+
+
+void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
+  ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
+
+  // Setup the frame structure on the stack.
+  // All constants are relative to the frame pointer of the exit frame.
+  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
+  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
+  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
+  push(rbp);
+  movq(rbp, rsp);
+
+  // Reserve room for entry stack pointer and push the debug marker.
+  ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
+  push(Immediate(0));  // saved entry sp, patched before call
+  push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+
+  // Save the frame pointer and the context in top.
+  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+  ExternalReference context_address(Top::k_context_address);
+  movq(r14, rax);  // Backup rax before we use it.
+
+  movq(rax, rbp);
+  store_rax(c_entry_fp_address);
+  movq(rax, rsi);
+  store_rax(context_address);
+
+  // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
+  // so it must be retained across the C-call.
+  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+  lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Save the state of all registers to the stack from the memory
+  // location. This is needed to allow nested break points.
+  if (type == StackFrame::EXIT_DEBUG) {
+    // TODO(1243899): This should be symmetric to
+    // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
+    // correct here, but computed for the other call. Very error
+    // prone! FIX THIS.  Actually there are deeper problems with
+    // register saving than this asymmetry (see the bug report
+    // associated with this issue).
+    PushRegistersFromMemory(kJSCallerSaved);
+  }
+#endif
+
+#ifdef _WIN64
+  // Reserve space on stack for result and argument structures, if necessary.
+  int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize;
+  // Reserve space for the Arguments object.  The Windows 64-bit ABI
+  // requires us to pass this structure as a pointer to its location on
+  // the stack.  The structure contains 2 values.
+  int argument_stack_space = 2 * kPointerSize;
+  // We also need backing space for 4 parameters, even though
+  // we only pass one or two parameter, and it is in a register.
+  int argument_mirror_space = 4 * kPointerSize;
+  int total_stack_space =
+      argument_mirror_space + argument_stack_space + result_stack_space;
+  subq(rsp, Immediate(total_stack_space));
+#endif
+
+  // Get the required frame alignment for the OS.
+  static const int kFrameAlignment = OS::ActivationFrameAlignment();
+  if (kFrameAlignment > 0) {
+    ASSERT(IsPowerOf2(kFrameAlignment));
+    movq(kScratchRegister, Immediate(-kFrameAlignment));
+    and_(rsp, kScratchRegister);
+  }
+
+  // Patch the saved entry sp.
+  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
+}
+
+
+void MacroAssembler::LeaveExitFrame(StackFrame::Type type, int result_size) {
+  // Registers:
+  // r15 : argv
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Restore the memory copy of the registers by digging them out from
+  // the stack. This is needed to allow nested break points.
+  if (type == StackFrame::EXIT_DEBUG) {
+    // It's okay to clobber register rbx below because we don't need
+    // the function pointer after this.
+    const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
+    int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+    lea(rbx, Operand(rbp, kOffset));
+    CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
+  }
+#endif
+
+  // Get the return address from the stack and restore the frame pointer.
+  movq(rcx, Operand(rbp, 1 * kPointerSize));
+  movq(rbp, Operand(rbp, 0 * kPointerSize));
+
+#ifdef _WIN64
+  // If return value is on the stack, pop it to registers.
+  if (result_size > 1) {
+    ASSERT_EQ(2, result_size);
+    // Position above 4 argument mirrors and arguments object.
+    movq(rax, Operand(rsp, 6 * kPointerSize));
+    movq(rdx, Operand(rsp, 7 * kPointerSize));
+  }
+#endif
+
+  // Pop everything up to and including the arguments and the receiver
+  // from the caller stack.
+  lea(rsp, Operand(r15, 1 * kPointerSize));
+
+  // Restore current context from top and clear it in debug mode.
+  ExternalReference context_address(Top::k_context_address);
+  movq(kScratchRegister, context_address);
+  movq(rsi, Operand(kScratchRegister, 0));
+#ifdef DEBUG
+  movq(Operand(kScratchRegister, 0), Immediate(0));
+#endif
+
+  // Push the return address to get ready to return.
+  push(rcx);
+
+  // Clear the top frame.
+  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+  movq(kScratchRegister, c_entry_fp_address);
+  movq(Operand(kScratchRegister, 0), Immediate(0));
+}
+
+
+Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
+                                   JSObject* holder, Register holder_reg,
+                                   Register scratch,
+                                   Label* miss) {
+  // Make sure there's no overlap between scratch and the other
+  // registers.
+  ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
+
+  // Keep track of the current object in register reg.  On the first
+  // iteration, reg is an alias for object_reg, on later iterations,
+  // it is an alias for holder_reg.
+  Register reg = object_reg;
+  int depth = 1;
+
+  // Check the maps in the prototype chain.
+  // Traverse the prototype chain from the object and do map checks.
+  while (object != holder) {
+    depth++;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+    JSObject* prototype = JSObject::cast(object->GetPrototype());
+    if (Heap::InNewSpace(prototype)) {
+      // Get the map of the current object.
+      movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+      Cmp(scratch, Handle<Map>(object->map()));
+      // Branch on the result of the map check.
+      j(not_equal, miss);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (object->IsJSGlobalProxy()) {
+        CheckAccessGlobalProxy(reg, scratch, miss);
+
+        // Restore scratch register to be the map of the object.
+        // We load the prototype from the map in the scratch register.
+        movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+      }
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      reg = holder_reg;  // from now the object is in holder_reg
+      movq(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+
+    } else {
+      // Check the map of the current object.
+      Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+          Handle<Map>(object->map()));
+      // Branch on the result of the map check.
+      j(not_equal, miss);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (object->IsJSGlobalProxy()) {
+        CheckAccessGlobalProxy(reg, scratch, miss);
+      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // from now the object is in holder_reg
+      Move(reg, Handle<JSObject>(prototype));
+    }
+
+    // Go to the next object in the prototype chain.
+    object = prototype;
+  }
+
+  // Check the holder map.
+  Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+      Handle<Map>(holder->map()));
+  j(not_equal, miss);
+
+  // Log the check depth.
+  LOG(IntEvent("check-maps-depth", depth));
+
+  // Perform security check for access to the global object and return
+  // the holder register.
+  ASSERT(object == holder);
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+  if (object->IsJSGlobalProxy()) {
+    CheckAccessGlobalProxy(reg, scratch, miss);
+  }
+  return reg;
+}
+
+
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+                                            Register scratch,
+                                            Label* miss) {
+  Label same_contexts;
+
+  ASSERT(!holder_reg.is(scratch));
+  ASSERT(!scratch.is(kScratchRegister));
+  // Load current lexical context from the stack frame.
+  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+  // When generating debug code, make sure the lexical context is set.
+  if (FLAG_debug_code) {
+    cmpq(scratch, Immediate(0));
+    Check(not_equal, "we should not have an empty lexical context");
+  }
+  // Load the global context of the current context.
+  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  movq(scratch, FieldOperand(scratch, offset));
+  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
+        Factory::global_context_map());
+    Check(equal, "JSGlobalObject::global_context should be a global context.");
+  }
+
+  // Check if both contexts are the same.
+  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+  j(equal, &same_contexts);
+
+  // Compare security tokens.
+  // Check that the security token in the calling global object is
+  // compatible with the security token in the receiving global
+  // object.
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    // Preserve original value of holder_reg.
+    push(holder_reg);
+    movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
+    Check(not_equal, "JSGlobalProxy::context() should not be null.");
+
+    // Read the first word and compare to global_context_map(),
+    movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
+    CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
+    Check(equal, "JSGlobalObject::global_context should be a global context.");
+    pop(holder_reg);
+  }
+
+  movq(kScratchRegister,
+       FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+  int token_offset = Context::kHeaderSize +
+                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
+  movq(scratch, FieldOperand(scratch, token_offset));
+  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
+  j(not_equal, miss);
+
+  bind(&same_contexts);
+}
+
+
+void MacroAssembler::LoadAllocationTopHelper(Register result,
+                                             Register result_end,
+                                             Register scratch,
+                                             AllocationFlags flags) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+
+  // Just return if allocation top is already known.
+  if ((flags & RESULT_CONTAINS_TOP) != 0) {
+    // No use of scratch if allocation top is provided.
+    ASSERT(scratch.is(no_reg));
+#ifdef DEBUG
+    // Assert that result actually contains top on entry.
+    movq(kScratchRegister, new_space_allocation_top);
+    cmpq(result, Operand(kScratchRegister, 0));
+    Check(equal, "Unexpected allocation top");
+#endif
+    return;
+  }
+
+  // Move address of new object to result. Use scratch register if available.
+  if (scratch.is(no_reg)) {
+    movq(kScratchRegister, new_space_allocation_top);
+    movq(result, Operand(kScratchRegister, 0));
+  } else {
+    ASSERT(!scratch.is(result_end));
+    movq(scratch, new_space_allocation_top);
+    movq(result, Operand(scratch, 0));
+  }
+}
+
+
+void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
+                                               Register scratch) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+
+  // Update new top.
+  if (result_end.is(rax)) {
+    // rax can be stored directly to a memory location.
+    store_rax(new_space_allocation_top);
+  } else {
+    // Register required - use scratch provided if available.
+    if (scratch.is(no_reg)) {
+      movq(kScratchRegister, new_space_allocation_top);
+      movq(Operand(kScratchRegister, 0), result_end);
+    } else {
+      movq(Operand(scratch, 0), result_end);
+    }
+  }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(int object_size,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  ASSERT(!result.is(result_end));
+
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, result_end, scratch, flags);
+
+  // Calculate new top and bail out if new space is exhausted.
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  lea(result_end, Operand(result, object_size));
+  movq(kScratchRegister, new_space_allocation_limit);
+  cmpq(result_end, Operand(kScratchRegister, 0));
+  j(above, gc_required);
+
+  // Update allocation top.
+  UpdateAllocationTopHelper(result_end, scratch);
+
+  // Tag the result if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    addq(result, Immediate(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(int header_size,
+                                        ScaleFactor element_size,
+                                        Register element_count,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  ASSERT(!result.is(result_end));
+
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, result_end, scratch, flags);
+
+  // Calculate new top and bail out if new space is exhausted.
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  lea(result_end, Operand(result, element_count, element_size, header_size));
+  movq(kScratchRegister, new_space_allocation_limit);
+  cmpq(result_end, Operand(kScratchRegister, 0));
+  j(above, gc_required);
+
+  // Update allocation top.
+  UpdateAllocationTopHelper(result_end, scratch);
+
+  // Tag the result if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    addq(result, Immediate(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, result_end, scratch, flags);
+
+  // Calculate new top and bail out if new space is exhausted.
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  if (!object_size.is(result_end)) {
+    movq(result_end, object_size);
+  }
+  addq(result_end, result);
+  movq(kScratchRegister, new_space_allocation_limit);
+  cmpq(result_end, Operand(kScratchRegister, 0));
+  j(above, gc_required);
+
+  // Update allocation top.
+  UpdateAllocationTopHelper(result_end, scratch);
+
+  // Tag the result if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    addq(result, Immediate(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address();
+
+  // Make sure the object has no tag before resetting top.
+  and_(object, Immediate(~kHeapObjectTagMask));
+  movq(kScratchRegister, new_space_allocation_top);
+#ifdef DEBUG
+  cmpq(object, Operand(kScratchRegister, 0));
+  Check(below, "Undo allocation of non allocated memory");
+#endif
+  movq(Operand(kScratchRegister, 0), object);
+}
+
+
+CodePatcher::CodePatcher(byte* address, int size)
+    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+  // Create a new macro assembler pointing to the address of the code to patch.
+  // The size is adjusted with kGap on order for the assembler to generate size
+  // bytes of instructions without failing with buffer size constraints.
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+  // Indicate that code has changed.
+  CPU::FlushICache(address_, size_);
+
+  // Check that the code was patched as expected.
+  ASSERT(masm_.pc_ == address_ + size_);
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
new file mode 100644
index 0000000..adc136a
--- /dev/null
+++ b/src/x64/macro-assembler-x64.h
@@ -0,0 +1,694 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
+#define V8_X64_MACRO_ASSEMBLER_X64_H_
+
+#include "assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// Default scratch register used by MacroAssembler (and other code that needs
+// a spare register). The register isn't callee save, and not used by the
+// function calling convention.
+static const Register kScratchRegister = r10;
+
+// Forward declaration.
+class JumpTarget;
+
+struct SmiIndex {
+  SmiIndex(Register index_register, ScaleFactor scale)
+      : reg(index_register),
+        scale(scale) {}
+  Register reg;
+  ScaleFactor scale;
+};
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+  MacroAssembler(void* buffer, int size);
+
+  void LoadRoot(Register destination, Heap::RootListIndex index);
+  void CompareRoot(Register with, Heap::RootListIndex index);
+  void CompareRoot(Operand with, Heap::RootListIndex index);
+  void PushRoot(Heap::RootListIndex index);
+
+  // ---------------------------------------------------------------------------
+  // GC Support
+
+  // Set the remembered set bit for [object+offset].
+  // object is the object being stored into, value is the object being stored.
+  // If offset is zero, then the scratch register contains the array index into
+  // the elements array represented as a Smi.
+  // All registers are clobbered by the operation.
+  void RecordWrite(Register object,
+                   int offset,
+                   Register value,
+                   Register scratch);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // ---------------------------------------------------------------------------
+  // Debugger Support
+
+  void SaveRegistersToMemory(RegList regs);
+  void RestoreRegistersFromMemory(RegList regs);
+  void PushRegistersFromMemory(RegList regs);
+  void PopRegistersToMemory(RegList regs);
+  void CopyRegistersFromStackToMemory(Register base,
+                                      Register scratch,
+                                      RegList regs);
+#endif
+
+  // ---------------------------------------------------------------------------
+  // Activation frames
+
+  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
+  // Enter specific kind of exit frame; either EXIT or
+  // EXIT_DEBUG. Expects the number of arguments in register rax and
+  // sets up the number of arguments in register rdi and the pointer
+  // to the first argument in register rsi.
+  void EnterExitFrame(StackFrame::Type type, int result_size = 1);
+
+  // Leave the current exit frame. Expects/provides the return value in
+  // register rax:rdx (untouched) and the pointer to the first
+  // argument in register rsi.
+  void LeaveExitFrame(StackFrame::Type type, int result_size = 1);
+
+
+  // ---------------------------------------------------------------------------
+  // JavaScript invokes
+
+  // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(Register code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  InvokeFlag flag);
+
+  void InvokeCode(Handle<Code> code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  RelocInfo::Mode rmode,
+                  InvokeFlag flag);
+
+  // Invoke the JavaScript function in the given register. Changes the
+  // current context to the context in the function before invoking.
+  void InvokeFunction(Register function,
+                      const ParameterCount& actual,
+                      InvokeFlag flag);
+
+  // Invoke specified builtin JavaScript function. Adds an entry to
+  // the unresolved list if the name does not resolve.
+  void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+
+  // Store the code object for the given builtin in the target register.
+  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+
+  // ---------------------------------------------------------------------------
+  // Smi tagging, untagging and operations on tagged smis.
+
+  // Conversions between tagged smi values and non-tagged integer values.
+
+  // Tag an integer value. The result must be known to be a valid smi value.
+  // Only uses the low 32 bits of the src register.
+  void Integer32ToSmi(Register dst, Register src);
+
+  // Tag an integer value if possible, or jump the integer value cannot be
+  // represented as a smi. Only uses the low 32 bit of the src registers.
+  void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
+
+  // Adds constant to src and tags the result as a smi.
+  // Result must be a valid smi.
+  void Integer64AddToSmi(Register dst, Register src, int constant);
+
+  // Convert smi to 32-bit integer. I.e., not sign extended into
+  // high 32 bits of destination.
+  void SmiToInteger32(Register dst, Register src);
+
+  // Convert smi to 64-bit integer (sign extended if necessary).
+  void SmiToInteger64(Register dst, Register src);
+
+  // Multiply a positive smi's integer value by a power of two.
+  // Provides result as 64-bit integer value.
+  void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
+                                             Register src,
+                                             int power);
+
+  // Functions performing a check on a known or potential smi. Returns
+  // a condition that is satisfied if the check is successful.
+
+  // Is the value a tagged smi.
+  Condition CheckSmi(Register src);
+
+  // Is the value not a tagged smi.
+  Condition CheckNotSmi(Register src);
+
+  // Is the value a positive tagged smi.
+  Condition CheckPositiveSmi(Register src);
+
+  // Is the value not a positive tagged smi.
+  Condition CheckNotPositiveSmi(Register src);
+
+  // Are both values are tagged smis.
+  Condition CheckBothSmi(Register first, Register second);
+
+  // Is one of the values not a tagged smi.
+  Condition CheckNotBothSmi(Register first, Register second);
+
+  // Is the value the minimum smi value (since we are using
+  // two's complement numbers, negating the value is known to yield
+  // a non-smi value).
+  Condition CheckIsMinSmi(Register src);
+
+  // Check whether a tagged smi is equal to a constant.
+  Condition CheckSmiEqualsConstant(Register src, int constant);
+
+  // Check whether a tagged smi is greater than or equal to a constant.
+  Condition CheckSmiGreaterEqualsConstant(Register src, int constant);
+
+  // Checks whether an 32-bit integer value is a valid for conversion
+  // to a smi.
+  Condition CheckInteger32ValidSmiValue(Register src);
+
+  // Test-and-jump functions. Typically combines a check function
+  // above with a conditional jump.
+
+  // Jump if the value cannot be represented by a smi.
+  void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
+
+  // Jump to label if the value is a tagged smi.
+  void JumpIfSmi(Register src, Label* on_smi);
+
+  // Jump to label if the value is not a tagged smi.
+  void JumpIfNotSmi(Register src, Label* on_not_smi);
+
+  // Jump to label if the value is not a positive tagged smi.
+  void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
+
+  // Jump to label if the value is a tagged smi with value equal
+  // to the constant.
+  void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
+
+  // Jump to label if the value is a tagged smi with value greater than or equal
+  // to the constant.
+  void JumpIfSmiGreaterEqualsConstant(Register src,
+                                      int constant,
+                                      Label* on_equals);
+
+  // Jump if either or both register are not smi values.
+  void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
+
+  // Operations on tagged smi values.
+
+  // Smis represent a subset of integers. The subset is always equivalent to
+  // a two's complement interpretation of a fixed number of bits.
+
+  // Optimistically adds an integer constant to a supposed smi.
+  // If the src is not a smi, or the result is not a smi, jump to
+  // the label.
+  void SmiTryAddConstant(Register dst,
+                         Register src,
+                         int32_t constant,
+                         Label* on_not_smi_result);
+
+  // Add an integer constant to a tagged smi, giving a tagged smi as result,
+  // or jumping to a label if the result cannot be represented by a smi.
+  // If the label is NULL, no testing on the result is done.
+  void SmiAddConstant(Register dst,
+                      Register src,
+                      int32_t constant,
+                      Label* on_not_smi_result);
+
+  // Subtract an integer constant from a tagged smi, giving a tagged smi as
+  // result, or jumping to a label if the result cannot be represented by a smi.
+  // If the label is NULL, no testing on the result is done.
+  void SmiSubConstant(Register dst,
+                      Register src,
+                      int32_t constant,
+                      Label* on_not_smi_result);
+
+  // Negating a smi can give a negative zero or too large positive value.
+  void SmiNeg(Register dst,
+              Register src,
+              Label* on_not_smi_result);
+
+  // Adds smi values and return the result as a smi.
+  // If dst is src1, then src1 will be destroyed, even if
+  // the operation is unsuccessful.
+  void SmiAdd(Register dst,
+              Register src1,
+              Register src2,
+              Label* on_not_smi_result);
+
+  // Subtracts smi values and return the result as a smi.
+  // If dst is src1, then src1 will be destroyed, even if
+  // the operation is unsuccessful.
+  void SmiSub(Register dst,
+              Register src1,
+              Register src2,
+              Label* on_not_smi_result);
+
+  // Multiplies smi values and return the result as a smi,
+  // if possible.
+  // If dst is src1, then src1 will be destroyed, even if
+  // the operation is unsuccessful.
+  void SmiMul(Register dst,
+              Register src1,
+              Register src2,
+              Label* on_not_smi_result);
+
+  // Divides one smi by another and returns the quotient.
+  // Clobbers rax and rdx registers.
+  void SmiDiv(Register dst,
+              Register src1,
+              Register src2,
+              Label* on_not_smi_result);
+
+  // Divides one smi by another and returns the remainder.
+  // Clobbers rax and rdx registers.
+  void SmiMod(Register dst,
+              Register src1,
+              Register src2,
+              Label* on_not_smi_result);
+
+  // Bitwise operations.
+  void SmiNot(Register dst, Register src);
+  void SmiAnd(Register dst, Register src1, Register src2);
+  void SmiOr(Register dst, Register src1, Register src2);
+  void SmiXor(Register dst, Register src1, Register src2);
+  void SmiAndConstant(Register dst, Register src1, int constant);
+  void SmiOrConstant(Register dst, Register src1, int constant);
+  void SmiXorConstant(Register dst, Register src1, int constant);
+
+  void SmiShiftLeftConstant(Register dst,
+                            Register src,
+                            int shift_value,
+                            Label* on_not_smi_result);
+  void SmiShiftLogicalRightConstant(Register dst,
+                                  Register src,
+                                  int shift_value,
+                                  Label* on_not_smi_result);
+  void SmiShiftArithmeticRightConstant(Register dst,
+                                       Register src,
+                                       int shift_value);
+
+  // Shifts a smi value to the left, and returns the result if that is a smi.
+  // Uses and clobbers rcx, so dst may not be rcx.
+  void SmiShiftLeft(Register dst,
+                    Register src1,
+                    Register src2,
+                    Label* on_not_smi_result);
+  // Shifts a smi value to the right, shifting in zero bits at the top, and
+  // returns the unsigned intepretation of the result if that is a smi.
+  // Uses and clobbers rcx, so dst may not be rcx.
+  void SmiShiftLogicalRight(Register dst,
+                          Register src1,
+                          Register src2,
+                          Label* on_not_smi_result);
+  // Shifts a smi value to the right, sign extending the top, and
+  // returns the signed intepretation of the result. That will always
+  // be a valid smi value, since it's numerically smaller than the
+  // original.
+  // Uses and clobbers rcx, so dst may not be rcx.
+  void SmiShiftArithmeticRight(Register dst,
+                               Register src1,
+                               Register src2);
+
+  // Specialized operations
+
+  // Select the non-smi register of two registers where exactly one is a
+  // smi. If neither are smis, jump to the failure label.
+  void SelectNonSmi(Register dst,
+                    Register src1,
+                    Register src2,
+                    Label* on_not_smis);
+
+  // Converts, if necessary, a smi to a combination of number and
+  // multiplier to be used as a scaled index.
+  // The src register contains a *positive* smi value. The shift is the
+  // power of two to multiply the index value by (e.g.
+  // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
+  // The returned index register may be either src or dst, depending
+  // on what is most efficient. If src and dst are different registers,
+  // src is always unchanged.
+  SmiIndex SmiToIndex(Register dst, Register src, int shift);
+
+  // Converts a positive smi to a negative index.
+  SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
+
+  // ---------------------------------------------------------------------------
+  // Macro instructions
+
+  // Expression support
+  void Set(Register dst, int64_t x);
+  void Set(const Operand& dst, int64_t x);
+
+  // Handle support
+  bool IsUnsafeSmi(Smi* value);
+  bool IsUnsafeSmi(Handle<Object> value) {
+    return IsUnsafeSmi(Smi::cast(*value));
+  }
+
+  void LoadUnsafeSmi(Register dst, Smi* source);
+  void LoadUnsafeSmi(Register dst, Handle<Object> source) {
+    LoadUnsafeSmi(dst, Smi::cast(*source));
+  }
+
+  void Move(Register dst, Handle<Object> source);
+  void Move(const Operand& dst, Handle<Object> source);
+  void Cmp(Register dst, Handle<Object> source);
+  void Cmp(const Operand& dst, Handle<Object> source);
+  void Push(Handle<Object> source);
+  void Push(Smi* smi);
+
+  // Control Flow
+  void Jump(Address destination, RelocInfo::Mode rmode);
+  void Jump(ExternalReference ext);
+  void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
+
+  void Call(Address destination, RelocInfo::Mode rmode);
+  void Call(ExternalReference ext);
+  void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+
+  // Compare object type for heap object.
+  // Always use unsigned comparisons: above and below, not less and greater.
+  // Incoming register is heap_object and outgoing register is map.
+  // They may be the same register, and may be kScratchRegister.
+  void CmpObjectType(Register heap_object, InstanceType type, Register map);
+
+  // Compare instance type for map.
+  // Always use unsigned comparisons: above and below, not less and greater.
+  void CmpInstanceType(Register map, InstanceType type);
+
+  // FCmp is similar to integer cmp, but requires unsigned
+  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
+  void FCmp();
+
+  // ---------------------------------------------------------------------------
+  // Exception handling
+
+  // Push a new try handler and link into try handler chain.  The return
+  // address must be pushed before calling this helper.
+  void PushTryHandler(CodeLocation try_location, HandlerType type);
+
+
+  // ---------------------------------------------------------------------------
+  // Inline caching support
+
+  // Generates code that verifies that the maps of objects in the
+  // prototype chain of object hasn't changed since the code was
+  // generated and branches to the miss label if any map has. If
+  // necessary the function also generates code for security check
+  // in case of global object holders. The scratch and holder
+  // registers are always clobbered, but the object register is only
+  // clobbered if it the same as the holder register. The function
+  // returns a register containing the holder - either object_reg or
+  // holder_reg.
+  Register CheckMaps(JSObject* object, Register object_reg,
+                     JSObject* holder, Register holder_reg,
+                     Register scratch, Label* miss);
+
+  // Generate code for checking access rights - used for security checks
+  // on access to global objects across environments. The holder register
+  // is left untouched, but the scratch register and kScratchRegister,
+  // which must be different, are clobbered.
+  void CheckAccessGlobalProxy(Register holder_reg,
+                              Register scratch,
+                              Label* miss);
+
+
+  // ---------------------------------------------------------------------------
+  // Allocation support
+
+  // Allocate an object in new space. If the new space is exhausted control
+  // continues at the gc_required label. The allocated object is returned in
+  // result and end of the new object is returned in result_end. The register
+  // scratch can be passed as no_reg in which case an additional object
+  // reference will be added to the reloc info. The returned pointers in result
+  // and result_end have not yet been tagged as heap objects. If
+  // result_contains_top_on_entry is true the content of result is known to be
+  // the allocation top on entry (could be result_end from a previous call to
+  // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
+  // should be no_reg as it is never used.
+  void AllocateInNewSpace(int object_size,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
+
+  void AllocateInNewSpace(int header_size,
+                          ScaleFactor element_size,
+                          Register element_count,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
+
+  void AllocateInNewSpace(Register object_size,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
+
+  // Undo allocation in new space. The object passed and objects allocated after
+  // it will no longer be allocated. Make sure that no pointers are left to the
+  // object(s) no longer allocated as they would be invalid when allocation is
+  // un-done.
+  void UndoAllocationInNewSpace(Register object);
+
+  // ---------------------------------------------------------------------------
+  // Support functions.
+
+  // Check if result is zero and op is negative.
+  void NegativeZeroTest(Register result, Register op, Label* then_label);
+
+  // Check if result is zero and op is negative in code using jump targets.
+  void NegativeZeroTest(CodeGenerator* cgen,
+                        Register result,
+                        Register op,
+                        JumpTarget* then_target);
+
+  // Check if result is zero and any of op1 and op2 are negative.
+  // Register scratch is destroyed, and it must be different from op2.
+  void NegativeZeroTest(Register result, Register op1, Register op2,
+                        Register scratch, Label* then_label);
+
+  // Try to get function prototype of a function and puts the value in
+  // the result register. Checks that the function really is a
+  // function and jumps to the miss label if the fast checks fail. The
+  // function register will be untouched; the other register may be
+  // clobbered.
+  void TryGetFunctionPrototype(Register function,
+                               Register result,
+                               Label* miss);
+
+  // Generates code for reporting that an illegal operation has
+  // occurred.
+  void IllegalOperation(int num_arguments);
+
+  // ---------------------------------------------------------------------------
+  // Runtime calls
+
+  // Call a code stub.
+  void CallStub(CodeStub* stub);
+
+  // Return from a code stub after popping its arguments.
+  void StubReturn(int argc);
+
+  // Call a runtime routine.
+  // Eventually this should be used for all C calls.
+  void CallRuntime(Runtime::Function* f, int num_arguments);
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId id, int num_arguments);
+
+  // Tail call of a runtime routine (jump).
+  // Like JumpToRuntime, but also takes care of passing the number
+  // of arguments.
+  void TailCallRuntime(const ExternalReference& ext,
+                       int num_arguments,
+                       int result_size);
+
+  // Jump to a runtime routine.
+  void JumpToRuntime(const ExternalReference& ext, int result_size);
+
+
+  // ---------------------------------------------------------------------------
+  // Utilities
+
+  void Ret();
+
+  struct Unresolved {
+    int pc;
+    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
+    const char* name;
+  };
+  List<Unresolved>* unresolved() { return &unresolved_; }
+
+  Handle<Object> CodeObject() { return code_object_; }
+
+
+  // ---------------------------------------------------------------------------
+  // StatsCounter support
+
+  void SetCounter(StatsCounter* counter, int value);
+  void IncrementCounter(StatsCounter* counter, int value);
+  void DecrementCounter(StatsCounter* counter, int value);
+
+
+  // ---------------------------------------------------------------------------
+  // Debugging
+
+  // Calls Abort(msg) if the condition cc is not satisfied.
+  // Use --debug_code to enable.
+  void Assert(Condition cc, const char* msg);
+
+  // Like Assert(), but always enabled.
+  void Check(Condition cc, const char* msg);
+
+  // Print a message to stdout and abort execution.
+  void Abort(const char* msg);
+
+  // Verify restrictions about code generated in stubs.
+  void set_generating_stub(bool value) { generating_stub_ = value; }
+  bool generating_stub() { return generating_stub_; }
+  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
+  bool allow_stub_calls() { return allow_stub_calls_; }
+
+ private:
+  List<Unresolved> unresolved_;
+  bool generating_stub_;
+  bool allow_stub_calls_;
+  Handle<Object> code_object_;  // This handle will be patched with the code
+                                // object on installation.
+
+  // Helper functions for generating invokes.
+  void InvokePrologue(const ParameterCount& expected,
+                      const ParameterCount& actual,
+                      Handle<Code> code_constant,
+                      Register code_register,
+                      Label* done,
+                      InvokeFlag flag);
+
+  // Prepares for a call or jump to a builtin by doing two things:
+  // 1. Emits code that fetches the builtin's function object from the context
+  //    at runtime, and puts it in the register rdi.
+  // 2. Fetches the builtin's code object, and returns it in a handle, at
+  //    compile time, so that later code can emit instructions to jump or call
+  //    the builtin directly.  If the code object has not yet been created, it
+  //    returns the builtin code object for IllegalFunction, and sets the
+  //    output parameter "resolved" to false.  Code that uses the return value
+  //    should then add the address and the builtin name to the list of fixups
+  //    called unresolved_, which is fixed up by the bootstrapper.
+  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
+  // Allocation support helpers.
+  void LoadAllocationTopHelper(Register result,
+                               Register result_end,
+                               Register scratch,
+                               AllocationFlags flags);
+  void UpdateAllocationTopHelper(Register result_end, Register scratch);
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. Is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion.
+class CodePatcher {
+ public:
+  CodePatcher(byte* address, int size);
+  virtual ~CodePatcher();
+
+  // Macro assembler to emit code.
+  MacroAssembler* masm() { return &masm_; }
+
+ private:
+  byte* address_;  // The address of the code being patched.
+  int size_;  // Number of bytes of the expected patch size.
+  MacroAssembler masm_;  // Macro assembler used to generate the code.
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate an Operand for loading a field from an object.
+static inline Operand FieldOperand(Register object, int offset) {
+  return Operand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate an Operand for loading an indexed field from an object.
+static inline Operand FieldOperand(Register object,
+                                   Register index,
+                                   ScaleFactor scale,
+                                   int offset) {
+  return Operand(object, index, scale, offset - kHeapObjectTag);
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+extern void LogGeneratedCodeCoverage(const char* file_line);
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) {                                               \
+    byte* x64_coverage_function =                                         \
+        reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
+    masm->pushfd();                                                       \
+    masm->pushad();                                                       \
+    masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));         \
+    masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY);          \
+    masm->pop(rax);                                                       \
+    masm->popad();                                                        \
+    masm->popfd();                                                        \
+  }                                                                       \
+  masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
new file mode 100644
index 0000000..5d17a2d
--- /dev/null
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -0,0 +1,1299 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "serialize.h"
+#include "unicode.h"
+#include "log.h"
+#include "ast.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "x64/macro-assembler-x64.h"
+#include "x64/regexp-macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_NATIVE_REGEXP
+
+/*
+ * This assembler uses the following register assignment convention
+ * - rdx : currently loaded character(s) as ASCII or UC16. Must be loaded using
+ *         LoadCurrentCharacter before using any of the dispatch methods.
+ * - rdi : current position in input, as negative offset from end of string.
+ *         Please notice that this is the byte offset, not the character
+ *         offset! Is always a 32-bit signed (negative) offset, but must be
+ *         maintained sign-extended to 64 bits, since it is used as index.
+ * - rsi : end of input (points to byte after last character in input),
+ *         so that rsi+rdi points to the current character.
+ * - rbp : frame pointer. Used to access arguments, local variables and
+ *         RegExp registers.
+ * - rsp : points to tip of C stack.
+ * - rcx : points to tip of backtrack stack. The backtrack stack contains
+ *         only 32-bit values. Most are offsets from some base (e.g., character
+ *         positions from end of string or code location from Code* pointer).
+ * - r8  : code object pointer. Used to convert between absolute and
+ *         code-object-relative addresses.
+ *
+ * The registers rax, rbx, rcx, r9 and r11 are free to use for computations.
+ * If changed to use r12+, they should be saved as callee-save registers.
+ *
+ * Each call to a C++ method should retain these registers.
+ *
+ * The stack will have the following content, in some order, indexable from the
+ * frame pointer (see, e.g., kStackHighEnd):
+ *    - stack_area_base       (High end of the memory area to use as
+ *                             backtracking stack)
+ *    - at_start              (if 1, start at start of string, if 0, don't)
+ *    - int* capture_array    (int[num_saved_registers_], for output).
+ *    - end of input          (Address of end of string)
+ *    - start of input        (Address of first character in string)
+ *    - String** input_string (location of a handle containing the string)
+ *    - return address
+ *    - backup of callee save registers (rbx, possibly rsi and rdi).
+ *    - Offset of location before start of input (effectively character
+ *      position -1). Used to initialize capture registers to a non-position.
+ *    - register 0  rbp[-n]   (Only positions must be stored in the first
+ *    - register 1  rbp[-n-8]  num_saved_registers_ registers)
+ *    - ...
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers starts out uninitialized.
+ *
+ * The first seven values must be provided by the calling code by
+ * calling the code's entry address cast to a function pointer with the
+ * following signature:
+ * int (*match)(String* input_string,
+ *              Address start,
+ *              Address end,
+ *              int* capture_output_array,
+ *              bool at_start,
+ *              byte* stack_area_base)
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
+    Mode mode,
+    int registers_to_save)
+    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+      code_relative_fixup_positions_(4),
+      mode_(mode),
+      num_registers_(registers_to_save),
+      num_saved_registers_(registers_to_save),
+      entry_label_(),
+      start_label_(),
+      success_label_(),
+      backtrack_label_(),
+      exit_label_() {
+  ASSERT_EQ(0, registers_to_save % 2);
+  __ jmp(&entry_label_);   // We'll write the entry code when we know more.
+  __ bind(&start_label_);  // And then continue from here.
+}
+
+
+RegExpMacroAssemblerX64::~RegExpMacroAssemblerX64() {
+  delete masm_;
+  // Unuse labels in case we throw away the assembler without calling GetCode.
+  entry_label_.Unuse();
+  start_label_.Unuse();
+  success_label_.Unuse();
+  backtrack_label_.Unuse();
+  exit_label_.Unuse();
+  check_preempt_label_.Unuse();
+  stack_overflow_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerX64::stack_limit_slack()  {
+  return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerX64::AdvanceCurrentPosition(int by) {
+  if (by != 0) {
+    Label inside_string;
+    __ addq(rdi, Immediate(by * char_size()));
+  }
+}
+
+
+void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
+  ASSERT(reg >= 0);
+  ASSERT(reg < num_registers_);
+  if (by != 0) {
+    __ addq(register_location(reg), Immediate(by));
+  }
+}
+
+
+void RegExpMacroAssemblerX64::Backtrack() {
+  CheckPreemption();
+  // Pop Code* offset from backtrack stack, add Code* and jump to location.
+  Pop(rbx);
+  __ addq(rbx, code_object_pointer());
+  __ jmp(rbx);
+}
+
+
+void RegExpMacroAssemblerX64::Bind(Label* label) {
+  __ bind(label);
+}
+
+
+void RegExpMacroAssemblerX64::CheckCharacter(uint32_t c, Label* on_equal) {
+  __ cmpl(current_character(), Immediate(c));
+  BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
+  __ cmpl(current_character(), Immediate(limit));
+  BranchOrBacktrack(greater, on_greater);
+}
+
+
+void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
+  Label not_at_start;
+  // Did we start the match at the start of the string at all?
+  __ cmpb(Operand(rbp, kAtStart), Immediate(0));
+  BranchOrBacktrack(equal, &not_at_start);
+  // If we did, are we still at the start of the input?
+  __ lea(rax, Operand(rsi, rdi, times_1, 0));
+  __ cmpq(rax, Operand(rbp, kInputStart));
+  BranchOrBacktrack(equal, on_at_start);
+  __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
+  // Did we start the match at the start of the string at all?
+  __ cmpb(Operand(rbp, kAtStart), Immediate(0));
+  BranchOrBacktrack(equal, on_not_at_start);
+  // If we did, are we still at the start of the input?
+  __ lea(rax, Operand(rsi, rdi, times_1, 0));
+  __ cmpq(rax, Operand(rbp, kInputStart));
+  BranchOrBacktrack(not_equal, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerX64::CheckCharacterLT(uc16 limit, Label* on_less) {
+  __ cmpl(current_character(), Immediate(limit));
+  BranchOrBacktrack(less, on_less);
+}
+
+
+void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
+                                              int cp_offset,
+                                              Label* on_failure,
+                                              bool check_end_of_string) {
+  int byte_length = str.length() * char_size();
+  int byte_offset = cp_offset * char_size();
+  if (check_end_of_string) {
+    // Check that there are at least str.length() characters left in the input.
+    __ cmpl(rdi, Immediate(-(byte_offset + byte_length)));
+    BranchOrBacktrack(greater, on_failure);
+  }
+
+  if (on_failure == NULL) {
+    // Instead of inlining a backtrack, (re)use the global backtrack target.
+    on_failure = &backtrack_label_;
+  }
+
+  // TODO(lrn): Test multiple characters at a time by loading 4 or 8 bytes
+  // at a time.
+  for (int i = 0; i < str.length(); i++) {
+    if (mode_ == ASCII) {
+      __ cmpb(Operand(rsi, rdi, times_1, byte_offset + i),
+              Immediate(static_cast<int8_t>(str[i])));
+    } else {
+      ASSERT(mode_ == UC16);
+      __ cmpw(Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)),
+              Immediate(str[i]));
+    }
+    BranchOrBacktrack(not_equal, on_failure);
+  }
+}
+
+
+void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
+  Label fallthrough;
+  __ cmpl(rdi, Operand(backtrack_stackpointer(), 0));
+  __ j(not_equal, &fallthrough);
+  Drop();
+  BranchOrBacktrack(no_condition, on_equal);
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
+    int start_reg,
+    Label* on_no_match) {
+  Label fallthrough;
+  __ movq(rdx, register_location(start_reg));  // Offset of start of capture
+  __ movq(rbx, register_location(start_reg + 1));  // Offset of end of capture
+  __ subq(rbx, rdx);  // Length of capture.
+
+  // -----------------------
+  // rdx  = Start offset of capture.
+  // rbx = Length of capture
+
+  // If length is negative, this code will fail (it's a symptom of a partial or
+  // illegal capture where start of capture after end of capture).
+  // This must not happen (no back-reference can reference a capture that wasn't
+  // closed before in the reg-exp, and we must not generate code that can cause
+  // this condition).
+
+  // If length is zero, either the capture is empty or it is nonparticipating.
+  // In either case succeed immediately.
+  __ j(equal, &fallthrough);
+
+  if (mode_ == ASCII) {
+    Label loop_increment;
+    if (on_no_match == NULL) {
+      on_no_match = &backtrack_label_;
+    }
+
+    __ lea(r9, Operand(rsi, rdx, times_1, 0));
+    __ lea(r11, Operand(rsi, rdi, times_1, 0));
+    __ addq(rbx, r9);  // End of capture
+    // ---------------------
+    // r11 - current input character address
+    // r9 - current capture character address
+    // rbx - end of capture
+
+    Label loop;
+    __ bind(&loop);
+    __ movzxbl(rdx, Operand(r9, 0));
+    __ movzxbl(rax, Operand(r11, 0));
+    // al - input character
+    // dl - capture character
+    __ cmpb(rax, rdx);
+    __ j(equal, &loop_increment);
+
+    // Mismatch, try case-insensitive match (converting letters to lower-case).
+    // I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
+    // a match.
+    __ or_(rax, Immediate(0x20));  // Convert match character to lower-case.
+    __ or_(rdx, Immediate(0x20));  // Convert capture character to lower-case.
+    __ cmpb(rax, rdx);
+    __ j(not_equal, on_no_match);  // Definitely not equal.
+    __ subb(rax, Immediate('a'));
+    __ cmpb(rax, Immediate('z' - 'a'));
+    __ j(above, on_no_match);  // Weren't letters anyway.
+
+    __ bind(&loop_increment);
+    // Increment pointers into match and capture strings.
+    __ addq(r11, Immediate(1));
+    __ addq(r9, Immediate(1));
+    // Compare to end of capture, and loop if not done.
+    __ cmpq(r9, rbx);
+    __ j(below, &loop);
+
+    // Compute new value of character position after the matched part.
+    __ movq(rdi, r11);
+    __ subq(rdi, rsi);
+  } else {
+    ASSERT(mode_ == UC16);
+    // Save important/volatile registers before calling C function.
+#ifndef _WIN64
+    // Callee save on Win64
+    __ push(rsi);
+    __ push(rdi);
+#endif
+    __ push(backtrack_stackpointer());
+
+    int num_arguments = 3;
+    FrameAlign(num_arguments);
+
+    // Put arguments into parameter registers. Parameters are
+    //   Address byte_offset1 - Address captured substring's start.
+    //   Address byte_offset2 - Address of current character position.
+    //   size_t byte_length - length of capture in bytes(!)
+#ifdef _WIN64
+    // Compute and set byte_offset1 (start of capture).
+    __ lea(rcx, Operand(rsi, rdx, times_1, 0));
+    // Set byte_offset2.
+    __ lea(rdx, Operand(rsi, rdi, times_1, 0));
+    // Set byte_length.
+    __ movq(r8, rbx);
+#else  // AMD64 calling convention
+    // Compute byte_offset2 (current position = rsi+rdi).
+    __ lea(rax, Operand(rsi, rdi, times_1, 0));
+    // Compute and set byte_offset1 (start of capture).
+    __ lea(rdi, Operand(rsi, rdx, times_1, 0));
+    // Set byte_offset2.
+    __ movq(rsi, rax);
+    // Set byte_length.
+    __ movq(rdx, rbx);
+#endif
+    ExternalReference compare =
+        ExternalReference::re_case_insensitive_compare_uc16();
+    CallCFunction(compare, num_arguments);
+
+    // Restore original values before reacting on result value.
+    __ Move(code_object_pointer(), masm_->CodeObject());
+    __ pop(backtrack_stackpointer());
+#ifndef _WIN64
+    __ pop(rdi);
+    __ pop(rsi);
+#endif
+
+    // Check if function returned non-zero for success or zero for failure.
+    __ testq(rax, rax);
+    BranchOrBacktrack(zero, on_no_match);
+    // On success, increment position by length of capture.
+    // Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
+    __ addq(rdi, rbx);
+  }
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotBackReference(
+    int start_reg,
+    Label* on_no_match) {
+  Label fallthrough;
+
+  // Find length of back-referenced capture.
+  __ movq(rdx, register_location(start_reg));
+  __ movq(rax, register_location(start_reg + 1));
+  __ subq(rax, rdx);  // Length to check.
+
+  // Fail on partial or illegal capture (start of capture after end of capture).
+  // This must not happen (no back-reference can reference a capture that wasn't
+  // closed before in the reg-exp).
+  __ Check(greater_equal, "Invalid capture referenced");
+
+  // Succeed on empty capture (including non-participating capture)
+  __ j(equal, &fallthrough);
+
+  // -----------------------
+  // rdx - Start of capture
+  // rax - length of capture
+
+  // Check that there are sufficient characters left in the input.
+  __ movl(rbx, rdi);
+  __ addl(rbx, rax);
+  BranchOrBacktrack(greater, on_no_match);
+
+  // Compute pointers to match string and capture string
+  __ lea(rbx, Operand(rsi, rdi, times_1, 0));  // Start of match.
+  __ addq(rdx, rsi);  // Start of capture.
+  __ lea(r9, Operand(rdx, rax, times_1, 0));  // End of capture
+
+  // -----------------------
+  // rbx - current capture character address.
+  // rbx - current input character address .
+  // r9 - end of input to match (capture length after rbx).
+
+  Label loop;
+  __ bind(&loop);
+  if (mode_ == ASCII) {
+    __ movzxbl(rax, Operand(rdx, 0));
+    __ cmpb(rax, Operand(rbx, 0));
+  } else {
+    ASSERT(mode_ == UC16);
+    __ movzxwl(rax, Operand(rdx, 0));
+    __ cmpw(rax, Operand(rbx, 0));
+  }
+  BranchOrBacktrack(not_equal, on_no_match);
+  // Increment pointers into capture and match string.
+  __ addq(rbx, Immediate(char_size()));
+  __ addq(rdx, Immediate(char_size()));
+  // Check if we have reached end of match area.
+  __ cmpq(rdx, r9);
+  __ j(below, &loop);
+
+  // Success.
+  // Set current character position to position after match.
+  __ movq(rdi, rbx);
+  __ subq(rdi, rsi);
+
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotRegistersEqual(int reg1,
+                                                     int reg2,
+                                                     Label* on_not_equal) {
+  __ movq(rax, register_location(reg1));
+  __ cmpq(rax, register_location(reg2));
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotCharacter(uint32_t c,
+                                                Label* on_not_equal) {
+  __ cmpl(current_character(), Immediate(c));
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
+                                                     uint32_t mask,
+                                                     Label* on_equal) {
+  __ movl(rax, current_character());
+  __ and_(rax, Immediate(mask));
+  __ cmpl(rax, Immediate(c));
+  BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
+                                                        uint32_t mask,
+                                                        Label* on_not_equal) {
+  __ movl(rax, current_character());
+  __ and_(rax, Immediate(mask));
+  __ cmpl(rax, Immediate(c));
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
+    uc16 c,
+    uc16 minus,
+    uc16 mask,
+    Label* on_not_equal) {
+  ASSERT(minus < String::kMaxUC16CharCode);
+  __ lea(rax, Operand(current_character(), -minus));
+  __ and_(rax, Immediate(mask));
+  __ cmpl(rax, Immediate(c));
+  BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
+                                                         int cp_offset,
+                                                         bool check_offset,
+                                                         Label* on_no_match) {
+  // Range checks (c in min..max) are generally implemented by an unsigned
+  // (c - min) <= (max - min) check
+  switch (type) {
+  case 's':
+    // Match space-characters
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      if (check_offset) {
+        LoadCurrentCharacter(cp_offset, on_no_match);
+      } else {
+        LoadCurrentCharacterUnchecked(cp_offset, 1);
+      }
+      Label success;
+      __ cmpl(current_character(), Immediate(' '));
+      __ j(equal, &success);
+      // Check range 0x09..0x0d
+      __ subl(current_character(), Immediate('\t'));
+      __ cmpl(current_character(), Immediate('\r' - '\t'));
+      BranchOrBacktrack(above, on_no_match);
+      __ bind(&success);
+      return true;
+    }
+    return false;
+  case 'S':
+    // Match non-space characters.
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      __ cmpl(current_character(), Immediate(' '));
+      BranchOrBacktrack(equal, on_no_match);
+      __ subl(current_character(), Immediate('\t'));
+      __ cmpl(current_character(), Immediate('\r' - '\t'));
+      BranchOrBacktrack(below_equal, on_no_match);
+      return true;
+    }
+    return false;
+  case 'd':
+    // Match ASCII digits ('0'..'9')
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ subl(current_character(), Immediate('0'));
+    __ cmpl(current_character(), Immediate('9' - '0'));
+    BranchOrBacktrack(above, on_no_match);
+    return true;
+  case 'D':
+    // Match non ASCII-digits
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ subl(current_character(), Immediate('0'));
+    __ cmpl(current_character(), Immediate('9' - '0'));
+    BranchOrBacktrack(below_equal, on_no_match);
+    return true;
+  case '.': {
+    // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+    if (check_offset) {
+      LoadCurrentCharacter(cp_offset, on_no_match, 1);
+    } else {
+      LoadCurrentCharacterUnchecked(cp_offset, 1);
+    }
+    __ xor_(current_character(), Immediate(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+    __ subl(current_character(), Immediate(0x0b));
+    __ cmpl(current_character(), Immediate(0x0c - 0x0b));
+    BranchOrBacktrack(below_equal, on_no_match);
+    if (mode_ == UC16) {
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ subl(current_character(), Immediate(0x2028 - 0x0b));
+      __ cmpl(current_character(), Immediate(1));
+      BranchOrBacktrack(below_equal, on_no_match);
+    }
+    return true;
+  }
+  case '*':
+    // Match any character.
+    if (check_offset) {
+      CheckPosition(cp_offset, on_no_match);
+    }
+    return true;
+  // No custom implementation (yet): w, W, s(UC16), S(UC16).
+  default:
+    return false;
+  }
+}
+
+
+void RegExpMacroAssemblerX64::Fail() {
+  ASSERT(FAILURE == 0);  // Return value for failure is zero.
+  __ xor_(rax, rax);  // zero rax.
+  __ jmp(&exit_label_);
+}
+
+
+Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
+  // Finalize code - write the entry point code now we know how many
+  // registers we need.
+
+  // Entry code:
+  __ bind(&entry_label_);
+  // Start new stack frame.
+  __ push(rbp);
+  __ movq(rbp, rsp);
+  // Save parameters and callee-save registers. Order here should correspond
+  //  to order of kBackup_ebx etc.
+#ifdef _WIN64
+  // MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
+  // Store register parameters in pre-allocated stack slots,
+  __ movq(Operand(rbp, kInputString), rcx);
+  __ movq(Operand(rbp, kStartIndex), rdx);  // Passed as int32 in edx.
+  __ movq(Operand(rbp, kInputStart), r8);
+  __ movq(Operand(rbp, kInputEnd), r9);
+  // Callee-save on Win64.
+  __ push(rsi);
+  __ push(rdi);
+  __ push(rbx);
+#else
+  // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
+  // Push register parameters on stack for reference.
+  ASSERT_EQ(kInputString, -1 * kPointerSize);
+  ASSERT_EQ(kStartIndex, -2 * kPointerSize);
+  ASSERT_EQ(kInputStart, -3 * kPointerSize);
+  ASSERT_EQ(kInputEnd, -4 * kPointerSize);
+  ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
+  ASSERT_EQ(kAtStart, -6 * kPointerSize);
+  __ push(rdi);
+  __ push(rsi);
+  __ push(rdx);
+  __ push(rcx);
+  __ push(r8);
+  __ push(r9);
+
+  __ push(rbx);  // Callee-save
+#endif
+  __ push(Immediate(0));  // Make room for "input start - 1" constant.
+
+  // Check if we have space on the stack for registers.
+  Label stack_limit_hit;
+  Label stack_ok;
+
+  ExternalReference stack_guard_limit =
+      ExternalReference::address_of_stack_guard_limit();
+  __ movq(rcx, rsp);
+  __ movq(kScratchRegister, stack_guard_limit);
+  __ subq(rcx, Operand(kScratchRegister, 0));
+  // Handle it if the stack pointer is already below the stack limit.
+  __ j(below_equal, &stack_limit_hit);
+  // Check if there is room for the variable number of registers above
+  // the stack limit.
+  __ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
+  __ j(above_equal, &stack_ok);
+  // Exit with OutOfMemory exception. There is not enough space on the stack
+  // for our working registers.
+  __ movq(rax, Immediate(EXCEPTION));
+  __ jmp(&exit_label_);
+
+  __ bind(&stack_limit_hit);
+  __ Move(code_object_pointer(), masm_->CodeObject());
+  CallCheckStackGuardState();  // Preserves no registers beside rbp and rsp.
+  __ testq(rax, rax);
+  // If returned value is non-zero, we exit with the returned value as result.
+  __ j(not_zero, &exit_label_);
+
+  __ bind(&stack_ok);
+
+  // Allocate space on stack for registers.
+  __ subq(rsp, Immediate(num_registers_ * kPointerSize));
+  // Load string length.
+  __ movq(rsi, Operand(rbp, kInputEnd));
+  // Load input position.
+  __ movq(rdi, Operand(rbp, kInputStart));
+  // Set up rdi to be negative offset from string end.
+  __ subq(rdi, rsi);
+  // Set rax to address of char before start of input
+  // (effectively string position -1).
+  __ lea(rax, Operand(rdi, -char_size()));
+  // Store this value in a local variable, for use when clearing
+  // position registers.
+  __ movq(Operand(rbp, kInputStartMinusOne), rax);
+  if (num_saved_registers_ > 0) {
+    // Fill saved registers with initial value = start offset - 1
+    // Fill in stack push order, to avoid accessing across an unwritten
+    // page (a problem on Windows).
+    __ movq(rcx, Immediate(kRegisterZero));
+    Label init_loop;
+    __ bind(&init_loop);
+    __ movq(Operand(rbp, rcx, times_1, 0), rax);
+    __ subq(rcx, Immediate(kPointerSize));
+    __ cmpq(rcx,
+            Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
+    __ j(greater, &init_loop);
+  }
+  // Ensure that we have written to each stack page, in order. Skipping a page
+  // on Windows can cause segmentation faults. Assuming page size is 4k.
+  const int kPageSize = 4096;
+  const int kRegistersPerPage = kPageSize / kPointerSize;
+  for (int i = num_saved_registers_ + kRegistersPerPage - 1;
+      i < num_registers_;
+      i += kRegistersPerPage) {
+    __ movq(register_location(i), rax);  // One write every page.
+  }
+
+  // Initialize backtrack stack pointer.
+  __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+  // Initialize code object pointer.
+  __ Move(code_object_pointer(), masm_->CodeObject());
+  // Load previous char as initial value of current-character.
+  Label at_start;
+  __ cmpb(Operand(rbp, kAtStart), Immediate(0));
+  __ j(not_equal, &at_start);
+  LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
+  __ jmp(&start_label_);
+  __ bind(&at_start);
+  __ movq(current_character(), Immediate('\n'));
+  __ jmp(&start_label_);
+
+
+  // Exit code:
+  if (success_label_.is_linked()) {
+    // Save captures when successful.
+    __ bind(&success_label_);
+    if (num_saved_registers_ > 0) {
+      // copy captures to output
+      __ movq(rbx, Operand(rbp, kRegisterOutput));
+      __ movq(rcx, Operand(rbp, kInputEnd));
+      __ subq(rcx, Operand(rbp, kInputStart));
+      for (int i = 0; i < num_saved_registers_; i++) {
+        __ movq(rax, register_location(i));
+        __ addq(rax, rcx);  // Convert to index from start, not end.
+        if (mode_ == UC16) {
+          __ sar(rax, Immediate(1));  // Convert byte index to character index.
+        }
+        __ movl(Operand(rbx, i * kIntSize), rax);
+      }
+    }
+    __ movq(rax, Immediate(SUCCESS));
+  }
+
+  // Exit and return rax
+  __ bind(&exit_label_);
+
+#ifdef _WIN64
+  // Restore callee save registers.
+  __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
+  __ pop(rbx);
+  __ pop(rdi);
+  __ pop(rsi);
+  // Stack now at rbp.
+#else
+  // Restore callee save register.
+  __ movq(rbx, Operand(rbp, kBackup_rbx));
+  // Skip rsp to rbp.
+  __ movq(rsp, rbp);
+#endif
+  // Exit function frame, restore previous one.
+  __ pop(rbp);
+  __ ret(0);
+
+  // Backtrack code (branch target for conditional backtracks).
+  if (backtrack_label_.is_linked()) {
+    __ bind(&backtrack_label_);
+    Backtrack();
+  }
+
+  Label exit_with_exception;
+
+  // Preempt-code
+  if (check_preempt_label_.is_linked()) {
+    SafeCallTarget(&check_preempt_label_);
+
+    __ push(backtrack_stackpointer());
+    __ push(rdi);
+
+    CallCheckStackGuardState();
+    __ testq(rax, rax);
+    // If returning non-zero, we should end execution with the given
+    // result as return value.
+    __ j(not_zero, &exit_label_);
+
+    // Restore registers.
+    __ Move(code_object_pointer(), masm_->CodeObject());
+    __ pop(rdi);
+    __ pop(backtrack_stackpointer());
+    // String might have moved: Reload esi from frame.
+    __ movq(rsi, Operand(rbp, kInputEnd));
+    SafeReturn();
+  }
+
+  // Backtrack stack overflow code.
+  if (stack_overflow_label_.is_linked()) {
+    SafeCallTarget(&stack_overflow_label_);
+    // Reached if the backtrack-stack limit has been hit.
+
+    Label grow_failed;
+    // Save registers before calling C function
+#ifndef _WIN64
+    // Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
+    __ push(rsi);
+    __ push(rdi);
+#endif
+
+    // Call GrowStack(backtrack_stackpointer())
+    int num_arguments = 2;
+    FrameAlign(num_arguments);
+#ifdef _WIN64
+    // Microsoft passes parameters in rcx, rdx.
+    // First argument, backtrack stackpointer, is already in rcx.
+    __ lea(rdx, Operand(rbp, kStackHighEnd));  // Second argument
+#else
+    // AMD64 ABI passes parameters in rdi, rsi.
+    __ movq(rdi, backtrack_stackpointer());   // First argument.
+    __ lea(rsi, Operand(rbp, kStackHighEnd));  // Second argument.
+#endif
+    ExternalReference grow_stack = ExternalReference::re_grow_stack();
+    CallCFunction(grow_stack, num_arguments);
+    // If return NULL, we have failed to grow the stack, and
+    // must exit with a stack-overflow exception.
+    __ testq(rax, rax);
+    __ j(equal, &exit_with_exception);
+    // Otherwise use return value as new stack pointer.
+    __ movq(backtrack_stackpointer(), rax);
+    // Restore saved registers and continue.
+    __ Move(code_object_pointer(), masm_->CodeObject());
+#ifndef _WIN64
+    __ pop(rdi);
+    __ pop(rsi);
+#endif
+    SafeReturn();
+  }
+
+  if (exit_with_exception.is_linked()) {
+    // If any of the code above needed to exit with an exception.
+    __ bind(&exit_with_exception);
+    // Exit with Result EXCEPTION(-1) to signal thrown exception.
+    __ movq(rax, Immediate(EXCEPTION));
+    __ jmp(&exit_label_);
+  }
+
+  FixupCodeRelativePositions();
+
+  CodeDesc code_desc;
+  masm_->GetCode(&code_desc);
+  Handle<Code> code = Factory::NewCode(code_desc,
+                                       NULL,
+                                       Code::ComputeFlags(Code::REGEXP),
+                                       masm_->CodeObject());
+  LOG(RegExpCodeCreateEvent(*code, *source));
+  return Handle<Object>::cast(code);
+}
+
+
+void RegExpMacroAssemblerX64::GoTo(Label* to) {
+  BranchOrBacktrack(no_condition, to);
+}
+
+
+void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
+                                           int comparand,
+                                           Label* if_ge) {
+  __ cmpq(register_location(reg), Immediate(comparand));
+  BranchOrBacktrack(greater_equal, if_ge);
+}
+
+
+void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
+                                           int comparand,
+                                           Label* if_lt) {
+  __ cmpq(register_location(reg), Immediate(comparand));
+  BranchOrBacktrack(less, if_lt);
+}
+
+
+void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
+                                              Label* if_eq) {
+  __ cmpq(rdi, register_location(reg));
+  BranchOrBacktrack(equal, if_eq);
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+    RegExpMacroAssemblerX64::Implementation() {
+  return kX64Implementation;
+}
+
+
+void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset,
+                                                   Label* on_end_of_input,
+                                                   bool check_bounds,
+                                                   int characters) {
+  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
+  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
+  if (check_bounds) {
+    CheckPosition(cp_offset + characters - 1, on_end_of_input);
+  }
+  LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerX64::PopCurrentPosition() {
+  Pop(rdi);
+}
+
+
+void RegExpMacroAssemblerX64::PopRegister(int register_index) {
+  Pop(rax);
+  __ movq(register_location(register_index), rax);
+}
+
+
+void RegExpMacroAssemblerX64::PushBacktrack(Label* label) {
+  Push(label);
+  CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerX64::PushCurrentPosition() {
+  Push(rdi);
+}
+
+
+void RegExpMacroAssemblerX64::PushRegister(int register_index,
+                                           StackCheckFlag check_stack_limit) {
+  __ movq(rax, register_location(register_index));
+  Push(rax);
+  if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
+  __ movq(rdi, register_location(reg));
+}
+
+
+void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
+  __ movq(backtrack_stackpointer(), register_location(reg));
+  __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+}
+
+
+void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
+  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  __ movq(register_location(register_index), Immediate(to));
+}
+
+
+void RegExpMacroAssemblerX64::Succeed() {
+  __ jmp(&success_label_);
+}
+
+
+void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
+                                                             int cp_offset) {
+  if (cp_offset == 0) {
+    __ movq(register_location(reg), rdi);
+  } else {
+    __ lea(rax, Operand(rdi, cp_offset * char_size()));
+    __ movq(register_location(reg), rax);
+  }
+}
+
+
+void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
+  ASSERT(reg_from <= reg_to);
+  __ movq(rax, Operand(rbp, kInputStartMinusOne));
+  for (int reg = reg_from; reg <= reg_to; reg++) {
+    __ movq(register_location(reg), rax);
+  }
+}
+
+
+void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
+  __ movq(rax, backtrack_stackpointer());
+  __ subq(rax, Operand(rbp, kStackHighEnd));
+  __ movq(register_location(reg), rax);
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
+  // This function call preserves no register values. Caller should
+  // store anything volatile in a C call or overwritten by this function.
+  int num_arguments = 3;
+  FrameAlign(num_arguments);
+#ifdef _WIN64
+  // Second argument: Code* of self. (Do this before overwriting r8).
+  __ movq(rdx, code_object_pointer());
+  // Third argument: RegExp code frame pointer.
+  __ movq(r8, rbp);
+  // First argument: Next address on the stack (will be address of
+  // return address).
+  __ lea(rcx, Operand(rsp, -kPointerSize));
+#else
+  // Third argument: RegExp code frame pointer.
+  __ movq(rdx, rbp);
+  // Second argument: Code* of self.
+  __ movq(rsi, code_object_pointer());
+  // First argument: Next address on the stack (will be address of
+  // return address).
+  __ lea(rdi, Operand(rsp, -kPointerSize));
+#endif
+  ExternalReference stack_check =
+      ExternalReference::re_check_stack_guard_state();
+  CallCFunction(stack_check, num_arguments);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+  return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
+                                                  Code* re_code,
+                                                  Address re_frame) {
+  if (StackGuard::IsStackOverflow()) {
+    Top::StackOverflow();
+    return EXCEPTION;
+  }
+
+  // If not real stack overflow the stack guard was used to interrupt
+  // execution for another purpose.
+
+  // Prepare for possible GC.
+  HandleScope handles;
+  Handle<Code> code_handle(re_code);
+
+  Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+  // Current string.
+  bool is_ascii = subject->IsAsciiRepresentation();
+
+  ASSERT(re_code->instruction_start() <= *return_address);
+  ASSERT(*return_address <=
+      re_code->instruction_start() + re_code->instruction_size());
+
+  Object* result = Execution::HandleStackGuardInterrupt();
+
+  if (*code_handle != re_code) {  // Return address no longer valid
+    intptr_t delta = *code_handle - re_code;
+    // Overwrite the return address on the stack.
+    *return_address += delta;
+  }
+
+  if (result->IsException()) {
+    return EXCEPTION;
+  }
+
+  // String might have changed.
+  if (subject->IsAsciiRepresentation() != is_ascii) {
+    // If we changed between an ASCII and an UC16 string, the specialized
+    // code cannot be used, and we need to restart regexp matching from
+    // scratch (including, potentially, compiling a new version of the code).
+    return RETRY;
+  }
+
+  // Otherwise, the content of the string might have moved. It must still
+  // be a sequential or external string with the same content.
+  // Update the start and end pointers in the stack frame to the current
+  // location (whether it has actually moved or not).
+  ASSERT(StringShape(*subject).IsSequential() ||
+      StringShape(*subject).IsExternal());
+
+  // The original start address of the characters to match.
+  const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+  // Find the current start address of the same character at the current string
+  // position.
+  int start_index = frame_entry<int>(re_frame, kStartIndex);
+  const byte* new_address = StringCharacterPosition(*subject, start_index);
+
+  if (start_address != new_address) {
+    // If there is a difference, update the object pointer and start and end
+    // addresses in the RegExp stack frame to match the new value.
+    const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+    int byte_length = end_address - start_address;
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
+    frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+    frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  }
+
+  return 0;
+}
+
+
+Operand RegExpMacroAssemblerX64::register_location(int register_index) {
+  ASSERT(register_index < (1<<30));
+  if (num_registers_ <= register_index) {
+    num_registers_ = register_index + 1;
+  }
+  return Operand(rbp, kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerX64::CheckPosition(int cp_offset,
+                                            Label* on_outside_input) {
+  __ cmpl(rdi, Immediate(-cp_offset * char_size()));
+  BranchOrBacktrack(greater_equal, on_outside_input);
+}
+
+
+void RegExpMacroAssemblerX64::BranchOrBacktrack(Condition condition,
+                                                Label* to) {
+  if (condition < 0) {  // No condition
+    if (to == NULL) {
+      Backtrack();
+      return;
+    }
+    __ jmp(to);
+    return;
+  }
+  if (to == NULL) {
+    __ j(condition, &backtrack_label_);
+    return;
+  }
+  __ j(condition, to);
+}
+
+
+void RegExpMacroAssemblerX64::SafeCall(Label* to) {
+  __ call(to);
+}
+
+
+void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
+  __ bind(label);
+  __ subq(Operand(rsp, 0), code_object_pointer());
+}
+
+
+void RegExpMacroAssemblerX64::SafeReturn() {
+  __ addq(Operand(rsp, 0), code_object_pointer());
+  __ ret(0);
+}
+
+
+void RegExpMacroAssemblerX64::Push(Register source) {
+  ASSERT(!source.is(backtrack_stackpointer()));
+  // Notice: This updates flags, unlike normal Push.
+  __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+  __ movl(Operand(backtrack_stackpointer(), 0), source);
+}
+
+
+void RegExpMacroAssemblerX64::Push(Immediate value) {
+  // Notice: This updates flags, unlike normal Push.
+  __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+  __ movl(Operand(backtrack_stackpointer(), 0), value);
+}
+
+
+void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
+  for (int i = 0, n = code_relative_fixup_positions_.length(); i < n; i++) {
+    int position = code_relative_fixup_positions_[i];
+    // The position succeeds a relative label offset from position.
+    // Patch the relative offset to be relative to the Code object pointer
+    // instead.
+    int patch_position = position - kIntSize;
+    int offset = masm_->long_at(patch_position);
+    masm_->long_at_put(patch_position,
+                       offset
+                       + position
+                       + Code::kHeaderSize
+                       - kHeapObjectTag);
+  }
+  code_relative_fixup_positions_.Clear();
+}
+
+
+void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
+  __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+  __ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
+  MarkPositionForCodeRelativeFixup();
+}
+
+
+void RegExpMacroAssemblerX64::Pop(Register target) {
+  ASSERT(!target.is(backtrack_stackpointer()));
+  __ movsxlq(target, Operand(backtrack_stackpointer(), 0));
+  // Notice: This updates flags, unlike normal Pop.
+  __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+}
+
+
+void RegExpMacroAssemblerX64::Drop() {
+  __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+}
+
+
+void RegExpMacroAssemblerX64::CheckPreemption() {
+  // Check for preemption.
+  Label no_preempt;
+  ExternalReference stack_guard_limit =
+      ExternalReference::address_of_stack_guard_limit();
+  __ load_rax(stack_guard_limit);
+  __ cmpq(rsp, rax);
+  __ j(above, &no_preempt);
+
+  SafeCall(&check_preempt_label_);
+
+  __ bind(&no_preempt);
+}
+
+
+void RegExpMacroAssemblerX64::CheckStackLimit() {
+  if (FLAG_check_stack) {
+    Label no_stack_overflow;
+    ExternalReference stack_limit =
+        ExternalReference::address_of_regexp_stack_limit();
+    __ load_rax(stack_limit);
+    __ cmpq(backtrack_stackpointer(), rax);
+    __ j(above, &no_stack_overflow);
+
+    SafeCall(&stack_overflow_label_);
+
+    __ bind(&no_stack_overflow);
+  }
+}
+
+
+void RegExpMacroAssemblerX64::FrameAlign(int num_arguments) {
+  // TODO(lrn): Since we no longer use the system stack arbitrarily (but we do
+  // use it, e.g., for SafeCall), we know the number of elements on the stack
+  // since the last frame alignment. We might be able to do this simpler then.
+  int frameAlignment = OS::ActivationFrameAlignment();
+  ASSERT(frameAlignment != 0);
+  // Make stack end at alignment and make room for num_arguments pointers
+  // (on Win64 only) and the original value of rsp.
+  __ movq(kScratchRegister, rsp);
+  ASSERT(IsPowerOf2(frameAlignment));
+#ifdef _WIN64
+  // Allocate space for parameters and old rsp.
+  __ subq(rsp, Immediate((num_arguments + 1) * kPointerSize));
+  __ and_(rsp, Immediate(-frameAlignment));
+  __ movq(Operand(rsp, num_arguments * kPointerSize), kScratchRegister);
+#else
+  // Allocate space for old rsp.
+  __ subq(rsp, Immediate(kPointerSize));
+  __ and_(rsp, Immediate(-frameAlignment));
+  __ movq(Operand(rsp, 0), kScratchRegister);
+#endif
+}
+
+
+void RegExpMacroAssemblerX64::CallCFunction(ExternalReference function,
+                                            int num_arguments) {
+  __ movq(rax, function);
+  __ call(rax);
+  ASSERT(OS::ActivationFrameAlignment() != 0);
+#ifdef _WIN64
+  __ movq(rsp, Operand(rsp, num_arguments * kPointerSize));
+#else
+  // All arguments passed in registers.
+  ASSERT(num_arguments <= 6);
+  __ pop(rsp);
+#endif
+}
+
+
+void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
+                                                            int characters) {
+  if (mode_ == ASCII) {
+    if (characters == 4) {
+      __ movl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
+    } else if (characters == 2) {
+      __ movzxwl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
+    } else {
+      ASSERT(characters == 1);
+      __ movzxbl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
+    }
+  } else {
+    ASSERT(mode_ == UC16);
+    if (characters == 2) {
+      __ movl(current_character(),
+              Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
+    } else {
+      ASSERT(characters == 1);
+      __ movzxwl(current_character(),
+                 Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
+    }
+  }
+}
+
+
+void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
+  __ int3();  // Unused on x64.
+}
+
+#undef __
+
+#endif  // V8_NATIVE_REGEXP
+
+}}  // namespace v8::internal
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
new file mode 100644
index 0000000..3e6720d
--- /dev/null
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -0,0 +1,294 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
+#define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_NATIVE_REGEXP
+
+class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerX64(Mode mode, int registers_to_save);
+  virtual ~RegExpMacroAssemblerX64();
+  virtual int stack_limit_slack();
+  virtual void AdvanceCurrentPosition(int by);
+  virtual void AdvanceRegister(int reg, int by);
+  virtual void Backtrack();
+  virtual void Bind(Label* label);
+  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckCharacter(uint32_t c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(uint32_t c,
+                                      uint32_t mask,
+                                      Label* on_equal);
+  virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+  virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+  virtual void CheckCharacters(Vector<const uc16> str,
+                               int cp_offset,
+                               Label* on_failure,
+                               bool check_end_of_string);
+  // A "greedy loop" is a loop that is both greedy and with a simple
+  // body. It has a particularly simple implementation.
+  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+  virtual void CheckNotAtStart(Label* on_not_at_start);
+  virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+                                               Label* on_no_match);
+  virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(uint32_t c,
+                                         uint32_t mask,
+                                         Label* on_not_equal);
+  virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+                                              uc16 minus,
+                                              uc16 mask,
+                                              Label* on_not_equal);
+  // Checks whether the given offset from the current position is before
+  // the end of the string.
+  virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+  virtual bool CheckSpecialCharacterClass(uc16 type,
+                                          int cp_offset,
+                                          bool check_offset,
+                                          Label* on_no_match);
+  virtual void Fail();
+  virtual Handle<Object> GetCode(Handle<String> source);
+  virtual void GoTo(Label* label);
+  virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+  virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+  virtual void IfRegisterEqPos(int reg, Label* if_eq);
+  virtual IrregexpImplementation Implementation();
+  virtual void LoadCurrentCharacter(int cp_offset,
+                                    Label* on_end_of_input,
+                                    bool check_bounds = true,
+                                    int characters = 1);
+  virtual void PopCurrentPosition();
+  virtual void PopRegister(int register_index);
+  virtual void PushBacktrack(Label* label);
+  virtual void PushCurrentPosition();
+  virtual void PushRegister(int register_index,
+                            StackCheckFlag check_stack_limit);
+  virtual void ReadCurrentPositionFromRegister(int reg);
+  virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetRegister(int register_index, int to);
+  virtual void Succeed();
+  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+  virtual void ClearRegisters(int reg_from, int reg_to);
+  virtual void WriteStackPointerToRegister(int reg);
+
+  static Result Match(Handle<Code> regexp,
+                      Handle<String> subject,
+                      int* offsets_vector,
+                      int offsets_vector_length,
+                      int previous_index);
+
+  static Result Execute(Code* code,
+                        String* input,
+                        int start_offset,
+                        const byte* input_start,
+                        const byte* input_end,
+                        int* output,
+                        bool at_start);
+
+  // Called from RegExp if the stack-guard is triggered.
+  // If the code object is relocated, the return address is fixed before
+  // returning.
+  static int CheckStackGuardState(Address* return_address,
+                                  Code* re_code,
+                                  Address re_frame);
+
+ private:
+  // Offsets from rbp of function parameters and stored registers.
+  static const int kFramePointer = 0;
+  // Above the frame pointer - function parameters and return address.
+  static const int kReturn_eip = kFramePointer + kPointerSize;
+  static const int kFrameAlign = kReturn_eip + kPointerSize;
+
+#ifdef _WIN64
+  // Parameters (first four passed as registers, but with room on stack).
+  // In Microsoft 64-bit Calling Convention, there is room on the callers
+  // stack (before the return address) to spill parameter registers. We
+  // use this space to store the register passed parameters.
+  static const int kInputString = kFrameAlign;
+  // StartIndex is passed as 32 bit int.
+  static const int kStartIndex = kInputString + kPointerSize;
+  static const int kInputStart = kStartIndex + kPointerSize;
+  static const int kInputEnd = kInputStart + kPointerSize;
+  static const int kRegisterOutput = kInputEnd + kPointerSize;
+  // AtStart is passed as 32 bit int (values 0 or 1).
+  static const int kAtStart = kRegisterOutput + kPointerSize;
+  static const int kStackHighEnd = kAtStart + kPointerSize;
+#else
+  // In AMD64 ABI Calling Convention, the first six integer parameters
+  // are passed as registers, and caller must allocate space on the stack
+  // if it wants them stored. We push the parameters after the frame pointer.
+  static const int kInputString = kFramePointer - kPointerSize;
+  static const int kStartIndex = kInputString - kPointerSize;
+  static const int kInputStart = kStartIndex - kPointerSize;
+  static const int kInputEnd = kInputStart - kPointerSize;
+  static const int kRegisterOutput = kInputEnd - kPointerSize;
+  static const int kAtStart = kRegisterOutput - kPointerSize;
+  static const int kStackHighEnd = kFrameAlign;
+#endif
+
+#ifdef _WIN64
+  // Microsoft calling convention has three callee-saved registers
+  // (that we are using). We push these after the frame pointer.
+  static const int kBackup_rsi = kFramePointer - kPointerSize;
+  static const int kBackup_rdi = kBackup_rsi - kPointerSize;
+  static const int kBackup_rbx = kBackup_rdi - kPointerSize;
+  static const int kLastCalleeSaveRegister = kBackup_rbx;
+#else
+  // AMD64 Calling Convention has only one callee-save register that
+  // we use. We push this after the frame pointer (and after the
+  // parameters).
+  static const int kBackup_rbx = kAtStart - kPointerSize;
+  static const int kLastCalleeSaveRegister = kBackup_rbx;
+#endif
+
+  // When adding local variables remember to push space for them in
+  // the frame in GetCode.
+  static const int kInputStartMinusOne =
+      kLastCalleeSaveRegister - kPointerSize;
+
+  // First register address. Following registers are below it on the stack.
+  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+  // Initial size of code buffer.
+  static const size_t kRegExpCodeSize = 1024;
+
+  // Load a number of characters at the given offset from the
+  // current position, into the current-character register.
+  void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+  // Check whether preemption has been requested.
+  void CheckPreemption();
+
+  // Check whether we are exceeding the stack limit on the backtrack stack.
+  void CheckStackLimit();
+
+  // Generate a call to CheckStackGuardState.
+  void CallCheckStackGuardState();
+
+  // The rbp-relative location of a regexp register.
+  Operand register_location(int register_index);
+
+  // The register containing the current character after LoadCurrentCharacter.
+  inline Register current_character() { return rdx; }
+
+  // The register containing the backtrack stack top. Provides a meaningful
+  // name to the register.
+  inline Register backtrack_stackpointer() { return rcx; }
+
+  // The registers containing a self pointer to this code's Code object.
+  inline Register code_object_pointer() { return r8; }
+
+  // Byte size of chars in the string to match (decided by the Mode argument)
+  inline int char_size() { return static_cast<int>(mode_); }
+
+  // Equivalent to a conditional branch to the label, unless the label
+  // is NULL, in which case it is a conditional Backtrack.
+  void BranchOrBacktrack(Condition condition, Label* to);
+
+  void MarkPositionForCodeRelativeFixup() {
+    code_relative_fixup_positions_.Add(masm_->pc_offset());
+  }
+
+  void FixupCodeRelativePositions();
+
+  // Call and return internally in the generated code in a way that
+  // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+  inline void SafeCall(Label* to);
+  inline void SafeCallTarget(Label* label);
+  inline void SafeReturn();
+
+  // Pushes the value of a register on the backtrack stack. Decrements the
+  // stack pointer (rcx) by a word size and stores the register's value there.
+  inline void Push(Register source);
+
+  // Pushes a value on the backtrack stack. Decrements the stack pointer (rcx)
+  // by a word size and stores the value there.
+  inline void Push(Immediate value);
+
+  // Pushes the Code object relative offset of a label on the backtrack stack
+  // (i.e., a backtrack target). Decrements the stack pointer (rcx)
+  // by a word size and stores the value there.
+  inline void Push(Label* label);
+
+  // Pops a value from the backtrack stack. Reads the word at the stack pointer
+  // (rcx) and increments it by a word size.
+  inline void Pop(Register target);
+
+  // Drops the top value from the backtrack stack without reading it.
+  // Increments the stack pointer (rcx) by a word size.
+  inline void Drop();
+
+  // Before calling a C-function from generated code, align arguments on stack.
+  // After aligning the frame, arguments must be stored in esp[0], esp[4],
+  // etc., not pushed. The argument count assumes all arguments are word sized.
+  // Some compilers/platforms require the stack to be aligned when calling
+  // C++ code.
+  // Needs a scratch register to do some arithmetic. This register will be
+  // trashed.
+  inline void FrameAlign(int num_arguments);
+
+  // Calls a C function and cleans up the space for arguments allocated
+  // by FrameAlign. The called function is not allowed to trigger a garbage
+  // collection, since that might move the code and invalidate the return
+  // address (unless this is somehow accounted for by the called function).
+  inline void CallCFunction(ExternalReference function, int num_arguments);
+
+  MacroAssembler* masm_;
+
+  ZoneList<int> code_relative_fixup_positions_;
+
+  // Which mode to generate code for (ASCII or UC16).
+  Mode mode_;
+
+  // One greater than maximal register index actually used.
+  int num_registers_;
+
+  // Number of registers to output at the end (the saved registers
+  // are always 0..num_saved_registers_-1)
+  int num_saved_registers_;
+
+  // Labels used internally.
+  Label entry_label_;
+  Label start_label_;
+  Label success_label_;
+  Label backtrack_label_;
+  Label exit_label_;
+  Label check_preempt_label_;
+  Label stack_overflow_label_;
+};
+
+#endif  // V8_NATIVE_REGEXP
+
+}}  // namespace v8::internal
+
+#endif  // V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h
new file mode 100644
index 0000000..d630b33
--- /dev/null
+++ b/src/x64/register-allocator-x64-inl.h
@@ -0,0 +1,86 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
+#define V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+  return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
+      reg.is(kScratchRegister);
+}
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers.
+int RegisterAllocator::ToNumber(Register reg) {
+  ASSERT(reg.is_valid() && !IsReserved(reg));
+  const int kNumbers[] = {
+    0,   // rax
+    2,   // rcx
+    3,   // rdx
+    1,   // rbx
+    -1,  // rsp  Stack pointer.
+    -1,  // rbp  Frame pointer.
+    -1,  // rsi  Context.
+    4,   // rdi
+    5,   // r8
+    6,   // r9
+    -1,  // r10  Scratch register.
+    9,   // r11
+    10,  // r12
+    -1,  // r13  Roots array.  This is callee saved.
+    7,   // r14
+    8    // r15
+  };
+  return kNumbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+  ASSERT(num >= 0 && num < kNumRegisters);
+  const Register kRegisters[] =
+      { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r15, r11, r12 };
+  return kRegisters[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+  Reset();
+  // The non-reserved rdi register is live on JS function entry.
+  Use(rdi);  // JS function.
+}
+} }  // namespace v8::internal
+
+#endif  // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/src/x64/register-allocator-x64.cc b/src/x64/register-allocator-x64.cc
new file mode 100644
index 0000000..deb2318
--- /dev/null
+++ b/src/x64/register-allocator-x64.cc
@@ -0,0 +1,84 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+  ASSERT(is_valid());
+  if (is_constant()) {
+    Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
+    ASSERT(fresh.is_valid());
+    CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
+    // This result becomes a copy of the fresh one.
+    *this = fresh;
+  }
+  ASSERT(is_register());
+}
+
+
+void Result::ToRegister(Register target) {
+  ASSERT(is_valid());
+  if (!is_register() || !reg().is(target)) {
+    Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
+    ASSERT(fresh.is_valid());
+    if (is_register()) {
+      CodeGeneratorScope::Current()->masm()->movq(fresh.reg(), reg());
+    } else {
+      ASSERT(is_constant());
+      CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
+    }
+    *this = fresh;
+  } else if (is_register() && reg().is(target)) {
+    ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
+    CodeGeneratorScope::Current()->frame()->Spill(target);
+    ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
+  }
+  ASSERT(is_register());
+  ASSERT(reg().is(target));
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+  // This function is not used in 64-bit code.
+  UNREACHABLE();
+  return Result();
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/x64/register-allocator-x64.h b/src/x64/register-allocator-x64.h
new file mode 100644
index 0000000..8d666d2
--- /dev/null
+++ b/src/x64/register-allocator-x64.h
@@ -0,0 +1,43 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_REGISTER_ALLOCATOR_X64_H_
+#define V8_X64_REGISTER_ALLOCATOR_X64_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+  static const int kNumRegisters = 11;
+  static const int kInvalidRegister = -1;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_REGISTER_ALLOCATOR_X64_H_
diff --git a/src/x64/simulator-x64.cc b/src/x64/simulator-x64.cc
new file mode 100644
index 0000000..209aa2d
--- /dev/null
+++ b/src/x64/simulator-x64.cc
@@ -0,0 +1,27 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
new file mode 100644
index 0000000..998c909
--- /dev/null
+++ b/src/x64/simulator-x64.h
@@ -0,0 +1,54 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_SIMULATOR_X64_H_
+#define V8_X64_SIMULATOR_X64_H_
+
+#include "allocation.h"
+
+// Since there is no simulator for the ia32 architecture the only thing we can
+// do is to call the entry directly.
+// TODO(X64): Don't pass p0, since it isn't used?
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  entry(p0, p1, p2, p3, p4);
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on x64 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return c_limit;
+  }
+};
+
+// Call the generated regexp code directly. The entry function pointer should
+// expect seven int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  entry(p0, p1, p2, p3, p4, p5, p6)
+
+#endif  // V8_X64_SIMULATOR_X64_H_
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
new file mode 100644
index 0000000..0994230
--- /dev/null
+++ b/src/x64/stub-cache-x64.cc
@@ -0,0 +1,1860 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+#include "macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+
+//-----------------------------------------------------------------------------
+// StubCompiler static helper functions
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(MacroAssembler* masm,
+                       Code::Flags flags,
+                       StubCache::Table table,
+                       Register name,
+                       Register offset) {
+  // The offset register must hold a *positive* smi.
+  ExternalReference key_offset(SCTableReference::keyReference(table));
+  Label miss;
+
+  __ movq(kScratchRegister, key_offset);
+  SmiIndex index = masm->SmiToIndex(offset, offset, kPointerSizeLog2);
+  // Check that the key in the entry matches the name.
+  __ cmpl(name, Operand(kScratchRegister, index.reg, index.scale, 0));
+  __ j(not_equal, &miss);
+  // Get the code entry from the cache.
+  // Use key_offset + kPointerSize, rather than loading value_offset.
+  __ movq(kScratchRegister,
+          Operand(kScratchRegister, index.reg, index.scale, kPointerSize));
+  // Check that the flags match what we're looking for.
+  __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
+  __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+  __ cmpl(offset, Immediate(flags));
+  __ j(not_equal, &miss);
+
+  // Jump to the first instruction in the code stub.
+  __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(kScratchRegister);
+
+  __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = Builtins::builtin(Builtins::LoadIC_Miss);
+  } else {
+    code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+                                                       int index,
+                                                       Register prototype) {
+  // Load the global or builtins object from the current context.
+  __ movq(prototype,
+             Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Load the global context from the global or builtins object.
+  __ movq(prototype,
+             FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
+  // Load the function from the global context.
+  __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
+  // Load the initial map.  The global functions all have initial maps.
+  __ movq(prototype,
+             FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the prototype from the initial map.
+  __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
+  // Adjust for the number of properties stored in the holder.
+  index -= holder->map()->inobject_properties();
+  if (index < 0) {
+    // Get the property straight out of the holder.
+    int offset = holder->map()->instance_size() + (index * kPointerSize);
+    __ movq(dst, FieldOperand(src, offset));
+  } else {
+    // Calculate the offset into the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+    __ movq(dst, FieldOperand(dst, offset));
+  }
+}
+
+
+template <typename Pushable>
+static void PushInterceptorArguments(MacroAssembler* masm,
+                                     Register receiver,
+                                     Register holder,
+                                     Pushable name,
+                                     JSObject* holder_obj) {
+  __ push(receiver);
+  __ push(holder);
+  __ push(name);
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  __ movq(kScratchRegister, Handle<Object>(interceptor),
+          RelocInfo::EMBEDDED_OBJECT);
+  __ push(kScratchRegister);
+  __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+                              Code::Flags flags,
+                              Register receiver,
+                              Register name,
+                              Register scratch,
+                              Register extra) {
+  Label miss;
+  USE(extra);  // The register extra is not used on the X64 platform.
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 16.
+  ASSERT(sizeof(Entry) == 16);
+
+  // Make sure the flags do not name a specific type.
+  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  ASSERT(!scratch.is(receiver));
+  ASSERT(!scratch.is(name));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+  // Use only the low 32 bits of the map pointer.
+  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, Immediate(flags));
+  __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the primary table.
+  ProbeTable(masm, flags, kPrimary, name, scratch);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, Immediate(flags));
+  __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+  __ subl(scratch, name);
+  __ addl(scratch, Immediate(flags));
+  __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the secondary table.
+  ProbeTable(masm, flags, kSecondary, name, scratch);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+                                      Builtins::Name storage_extend,
+                                      JSObject* object,
+                                      int index,
+                                      Map* transition,
+                                      Register receiver_reg,
+                                      Register name_reg,
+                                      Register scratch,
+                                      Label* miss_label) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver_reg, miss_label);
+
+  // Check that the map of the object hasn't changed.
+  __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+         Handle<Map>(object->map()));
+  __ j(not_equal, miss_label);
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  // Perform map transition for the receiver if necessary.
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ Move(rcx, Handle<Map>(transition));
+    Handle<Code> ic(Builtins::builtin(storage_extend));
+    __ Jump(ic, RelocInfo::CODE_TARGET);
+    return;
+  }
+
+  if (transition != NULL) {
+    // Update the map of the object; no write barrier updating is
+    // needed because the map is never in new space.
+    __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+            Handle<Map>(transition));
+  }
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= object->map()->inobject_properties();
+
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = object->map()->instance_size() + (index * kPointerSize);
+    __ movq(FieldOperand(receiver_reg, offset), rax);
+
+    // Update the write barrier for the array address.
+    // Pass the value being stored in the now unused name_reg.
+    __ movq(name_reg, rax);
+    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array (optimistically).
+    __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+    __ movq(FieldOperand(scratch, offset), rax);
+
+    // Update the write barrier for the array address.
+    // Pass the value being stored in the now unused name_reg.
+    __ movq(name_reg, rax);
+    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+  }
+
+  // Return the value (register rax).
+  __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register scratch,
+                                           Label* miss_label) {
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss_label);
+
+  // Check that the object is a JS array.
+  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+  __ j(not_equal, miss_label);
+
+  // Load length directly from the JS array.
+  __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
+  __ ret(0);
+}
+
+
+// Generate code to check if an object is a string.  If the object is
+// a string, the map's instance type is left in the scratch register.
+static void GenerateStringCheck(MacroAssembler* masm,
+                                Register receiver,
+                                Register scratch,
+                                Label* smi,
+                                Label* non_string_object) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, smi);
+
+  // Check that the object is a string.
+  __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+  ASSERT(kNotStringTag != 0);
+  __ testl(scratch, Immediate(kNotStringTag));
+  __ j(not_zero, non_string_object);
+}
+
+
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+                                            Register receiver,
+                                            Register scratch,
+                                            Label* miss) {
+  Label load_length, check_wrapper;
+
+  // Check if the object is a string leaving the instance type in the
+  // scratch register.
+  GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
+
+  // Load length directly from the string.
+  __ bind(&load_length);
+  __ and_(scratch, Immediate(kStringSizeMask));
+  __ movl(rax, FieldOperand(receiver, String::kLengthOffset));
+  // rcx is also the receiver.
+  __ lea(rcx, Operand(scratch, String::kLongLengthShift));
+  __ shr(rax);  // rcx is implicit shift register.
+  __ Integer32ToSmi(rax, rax);
+  __ ret(0);
+
+  // Check if the object is a JSValue wrapper.
+  __ bind(&check_wrapper);
+  __ cmpl(scratch, Immediate(JS_VALUE_TYPE));
+  __ j(not_equal, miss);
+
+  // Check if the wrapped value is a string and load the length
+  // directly if it is.
+  __ movq(receiver, FieldOperand(receiver, JSValue::kValueOffset));
+  GenerateStringCheck(masm, receiver, scratch, miss, miss);
+  __ jmp(&load_length);
+}
+
+
+template <class Pushable>
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+                                                   Register receiver,
+                                                   Register holder,
+                                                   Pushable name,
+                                                   JSObject* holder_obj) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+  __ movq(rax, Immediate(5));
+  __ movq(rbx, ref);
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+}
+
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+                                                 Register receiver,
+                                                 Register result,
+                                                 Register scratch,
+                                                 Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, result, miss_label);
+  if (!result.is(rax)) __ movq(rax, result);
+  __ ret(0);
+}
+
+
+static void LookupPostInterceptor(JSObject* holder,
+                                  String* name,
+                                  LookupResult* lookup) {
+  holder->LocalLookupRealNamedProperty(name, lookup);
+  if (lookup->IsNotFound()) {
+    Object* proto = holder->GetPrototype();
+    if (proto != Heap::null_value()) {
+      proto->Lookup(name, lookup);
+    }
+  }
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+  explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+  void CompileCacheable(MacroAssembler* masm,
+                        StubCompiler* stub_compiler,
+                        Register receiver,
+                        Register holder,
+                        Register scratch1,
+                        Register scratch2,
+                        JSObject* holder_obj,
+                        LookupResult* lookup,
+                        String* name,
+                        Label* miss_label) {
+    AccessorInfo* callback = 0;
+    bool optimize = false;
+    // So far the most popular follow ups for interceptor loads are FIELD
+    // and CALLBACKS, so inline only them, other cases may be added
+    // later.
+    if (lookup->type() == FIELD) {
+      optimize = true;
+    } else if (lookup->type() == CALLBACKS) {
+      Object* callback_object = lookup->GetCallbackObject();
+      if (callback_object->IsAccessorInfo()) {
+        callback = AccessorInfo::cast(callback_object);
+        optimize = callback->getter() != NULL;
+      }
+    }
+
+    if (!optimize) {
+      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      return;
+    }
+
+    // Note: starting a frame here makes GC aware of pointers pushed below.
+    __ EnterInternalFrame();
+
+    if (lookup->type() == CALLBACKS) {
+      __ push(receiver);
+    }
+    __ push(holder);
+    __ push(name_);
+
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
+
+    Label interceptor_failed;
+    __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ j(equal, &interceptor_failed);
+    __ LeaveInternalFrame();
+    __ ret(0);
+
+    __ bind(&interceptor_failed);
+    __ pop(name_);
+    __ pop(holder);
+    if (lookup->type() == CALLBACKS) {
+      __ pop(receiver);
+    }
+
+    __ LeaveInternalFrame();
+
+    if (lookup->type() == FIELD) {
+      holder = stub_compiler->CheckPrototypes(holder_obj,
+                                              holder,
+                                              lookup->holder(),
+                                              scratch1,
+                                              scratch2,
+                                              name,
+                                              miss_label);
+      stub_compiler->GenerateFastPropertyLoad(masm,
+                                              rax,
+                                              holder,
+                                              lookup->holder(),
+                                              lookup->GetFieldIndex());
+      __ ret(0);
+    } else {
+      ASSERT(lookup->type() == CALLBACKS);
+      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+      ASSERT(callback != NULL);
+      ASSERT(callback->getter() != NULL);
+
+      Label cleanup;
+      __ pop(scratch2);
+      __ push(receiver);
+      __ push(scratch2);
+
+      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+                                              lookup->holder(), scratch1,
+                                              scratch2,
+                                              name,
+                                              &cleanup);
+
+      __ pop(scratch2);  // save old return address
+      __ push(holder);
+      __ Move(holder, Handle<AccessorInfo>(callback));
+      __ push(holder);
+      __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
+      __ push(name_);
+      __ push(scratch2);  // restore old return address
+
+      ExternalReference ref =
+          ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+      __ TailCallRuntime(ref, 5, 1);
+
+      __ bind(&cleanup);
+      __ pop(scratch1);
+      __ pop(scratch2);
+      __ push(scratch1);
+    }
+  }
+
+
+  void CompileRegular(MacroAssembler* masm,
+                      Register receiver,
+                      Register holder,
+                      Register scratch,
+                      JSObject* holder_obj,
+                      Label* miss_label) {
+    __ pop(scratch);  // save old return address
+    PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+    __ push(scratch);  // restore old return address
+
+    ExternalReference ref = ExternalReference(
+        IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+    __ TailCallRuntime(ref, 5, 1);
+  }
+
+ private:
+  Register name_;
+};
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+                                   StubCompiler* stub_compiler,
+                                   MacroAssembler* masm,
+                                   JSObject* object,
+                                   JSObject* holder,
+                                   String* name,
+                                   LookupResult* lookup,
+                                   Register receiver,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Label* miss) {
+  ASSERT(holder->HasNamedInterceptor());
+  ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      stub_compiler->CheckPrototypes(object, receiver, holder,
+                                     scratch1, scratch2, name, miss);
+
+  if (lookup->IsValid() && lookup->IsCacheable()) {
+    compiler->CompileCacheable(masm,
+                               stub_compiler,
+                               receiver,
+                               reg,
+                               scratch1,
+                               scratch2,
+                               holder,
+                               lookup,
+                               name,
+                               miss);
+  } else {
+    compiler->CompileRegular(masm,
+                             receiver,
+                             reg,
+                             scratch2,
+                             holder,
+                             miss);
+  }
+}
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+  explicit CallInterceptorCompiler(const ParameterCount& arguments)
+      : arguments_(arguments), argc_(arguments.immediate()) {}
+
+  void CompileCacheable(MacroAssembler* masm,
+                        StubCompiler* stub_compiler,
+                        Register receiver,
+                        Register holder,
+                        Register scratch1,
+                        Register scratch2,
+                        JSObject* holder_obj,
+                        LookupResult* lookup,
+                        String* name,
+                        Label* miss_label) {
+    JSFunction* function = 0;
+    bool optimize = false;
+    // So far the most popular case for failed interceptor is
+    // CONSTANT_FUNCTION sitting below.
+    if (lookup->type() == CONSTANT_FUNCTION) {
+      function = lookup->GetConstantFunction();
+      // JSArray holder is a special case for call constant function
+      // (see the corresponding code).
+      if (function->is_compiled() && !holder_obj->IsJSArray()) {
+        optimize = true;
+      }
+    }
+
+    if (!optimize) {
+      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      return;
+    }
+
+    __ EnterInternalFrame();
+    __ push(holder);  // save the holder
+
+    CompileCallLoadPropertyWithInterceptor(
+        masm,
+        receiver,
+        holder,
+        // Under EnterInternalFrame this refers to name.
+        Operand(rbp, (argc_ + 3) * kPointerSize),
+        holder_obj);
+
+    __ pop(receiver);  // restore holder
+    __ LeaveInternalFrame();
+
+    __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+    Label invoke;
+    __ j(not_equal, &invoke);
+
+    stub_compiler->CheckPrototypes(holder_obj, receiver,
+                                   lookup->holder(), scratch1,
+                                   scratch2,
+                                   name,
+                                   miss_label);
+    if (lookup->holder()->IsGlobalObject()) {
+      __ movq(rdx, Operand(rsp, (argc_ + 1) * kPointerSize));
+      __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+      __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdx);
+    }
+
+    ASSERT(function->is_compiled());
+    // Get the function and setup the context.
+    __ Move(rdi, Handle<JSFunction>(function));
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+    // Jump to the cached code (tail call).
+    ASSERT(function->is_compiled());
+    Handle<Code> code(function->code());
+    ParameterCount expected(function->shared()->formal_parameter_count());
+    __ InvokeCode(code, expected, arguments_,
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+    __ bind(&invoke);
+  }
+
+  void CompileRegular(MacroAssembler* masm,
+                      Register receiver,
+                      Register holder,
+                      Register scratch,
+                      JSObject* holder_obj,
+                      Label* miss_label) {
+    __ EnterInternalFrame();
+
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             Operand(rbp, (argc_ + 3) * kPointerSize),
+                             holder_obj);
+
+    ExternalReference ref = ExternalReference(
+        IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+    __ movq(rax, Immediate(5));
+    __ movq(rbx, ref);
+
+    CEntryStub stub(1);
+    __ CallStub(&stub);
+
+    __ LeaveInternalFrame();
+  }
+
+ private:
+  const ParameterCount& arguments_;
+  int argc_;
+};
+
+
+#undef __
+
+#define __ ACCESS_MASM((masm()))
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              StubCompiler::CheckType check) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  // rsp[0] return address
+  // rsp[8] argument argc
+  // rsp[16] argument argc - 1
+  // ...
+  // rsp[argc * 8] argument 1
+  // rsp[(argc + 1) * 8] argument 0 = reciever
+  // rsp[(argc + 2) * 8] function name
+
+  Label miss;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  if (check != NUMBER_CHECK) {
+    __ JumpIfSmi(rdx, &miss);
+  }
+
+  // Make sure that it's okay not to patch the on stack receiver
+  // unless we're doing a receiver map check.
+  ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+  switch (check) {
+    case RECEIVER_MAP_CHECK:
+      // Check that the maps haven't changed.
+      CheckPrototypes(JSObject::cast(object), rdx, holder,
+                      rbx, rcx, name, &miss);
+
+      // Patch the receiver on the stack with the global proxy if
+      // necessary.
+      if (object->IsGlobalObject()) {
+        __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+        __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+      }
+      break;
+
+    case STRING_CHECK:
+      // Check that the object is a two-byte string or a symbol.
+      __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rcx);
+      __ j(above_equal, &miss);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::STRING_FUNCTION_INDEX,
+                                          rcx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+                      rbx, rdx, name, &miss);
+      break;
+
+    case NUMBER_CHECK: {
+      Label fast;
+      // Check that the object is a smi or a heap number.
+      __ JumpIfSmi(rdx, &fast);
+      __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+      __ j(not_equal, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::NUMBER_FUNCTION_INDEX,
+                                          rcx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+                      rbx, rdx, name, &miss);
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      Label fast;
+      // Check that the object is a boolean.
+      __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
+      __ j(equal, &fast);
+      __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
+      __ j(not_equal, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::BOOLEAN_FUNCTION_INDEX,
+                                          rcx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+                      rbx, rdx, name, &miss);
+      break;
+    }
+
+    case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
+      CheckPrototypes(JSObject::cast(object), rdx, holder,
+                      rbx, rcx, name, &miss);
+      // Make sure object->HasFastElements().
+      // Get the elements array of the object.
+      __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+      // Check that the object is in fast mode (not dictionary).
+      __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+             Factory::fixed_array_map());
+      __ j(not_equal, &miss);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+
+  // Get the function and setup the context.
+  __ Move(rdi, Handle<JSFunction>(function));
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  __ InvokeCode(code, expected, arguments(),
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+Object* CallStubCompiler::CompileCallField(Object* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  // rsp[0] return address
+  // rsp[8] argument argc
+  // rsp[16] argument argc - 1
+  // ...
+  // rsp[argc * 8] argument 1
+  // rsp[(argc + 1) * 8] argument 0 = receiver
+  // rsp[(argc + 2) * 8] function name
+  Label miss;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(rdx, &miss);
+
+  // Do the right check and compute the holder register.
+  Register reg =
+      CheckPrototypes(JSObject::cast(object), rdx, holder,
+                      rbx, rcx, name, &miss);
+
+  GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
+
+  // Check that the function really is a function.
+  __ JumpIfSmi(rdi, &miss);
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
+  __ j(not_equal, &miss);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+    __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+  }
+
+  // Invoke the function.
+  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+                                                 JSObject* holder,
+                                                 String* name) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label miss;
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+
+  // Get the receiver from the stack.
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  CallInterceptorCompiler compiler(arguments());
+  CompileLoadInterceptor(&compiler,
+                         this,
+                         masm(),
+                         JSObject::cast(object),
+                         holder,
+                         name,
+                         &lookup,
+                         rdx,
+                         rbx,
+                         rcx,
+                         &miss);
+
+  // Restore receiver.
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  // Check that the function really is a function.
+  __ JumpIfSmi(rax, &miss);
+  __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+  __ j(not_equal, &miss);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+    __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+  }
+
+  // Invoke the function.
+  __ movq(rdi, rax);
+  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+
+  // Handle load cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(argc);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+
+Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            JSFunction* function,
+                                            String* name) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  // rsp[0] return address
+  // rsp[8] argument argc
+  // rsp[16] argument argc - 1
+  // ...
+  // rsp[argc * 8] argument 1
+  // rsp[(argc + 1) * 8] argument 0 = receiver
+  // rsp[(argc + 2) * 8] function name
+  Label miss;
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  // Get the receiver from the stack.
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(rdx, &miss);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, rdx, holder, rbx, rcx, name, &miss);
+
+  // Get the value from the cell.
+  __ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
+  __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
+
+  // Check that the cell contains the same function.
+  __ Cmp(rdi, Handle<JSFunction>(function));
+  __ j(not_equal, &miss);
+
+  // Patch the receiver on the stack with the global proxy.
+  if (object->IsGlobalObject()) {
+    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+    __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+  }
+
+  // Setup the context (function already in edi).
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  __ IncrementCounter(&Counters::call_global_inline, 1);
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  __ InvokeCode(code, expected, arguments(),
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::call_global_inline_miss, 1);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+                                              JSObject* holder,
+                                              AccessorInfo* callback,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
+                       callback, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                              JSObject* holder,
+                                              Object* value,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  GenerateLoadConstant(object, holder, rax, rbx, rdx, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  GenerateLoadField(object, holder, rax, rbx, rdx, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                 JSObject* holder,
+                                                 String* name) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  // TODO(368): Compile in the whole chain: all the interceptors in
+  // prototypes and ultimate answer.
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          rax,
+                          rcx,
+                          rdx,
+                          rbx,
+                          name,
+                          &miss);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            String* name,
+                                            bool is_dont_delete) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver from the stack.
+  __ movq(rax, Operand(rsp, kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual loads. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(rax, &miss);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, rax, holder, rbx, rdx, name, &miss);
+
+  // Get the value from the cell.
+  __ Move(rax, Handle<JSGlobalPropertyCell>(cell));
+  __ movq(rax, FieldOperand(rax, JSGlobalPropertyCell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (!is_dont_delete) {
+    __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+    __ j(equal, &miss);
+  } else if (FLAG_debug_code) {
+    __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+    __ Check(not_equal, "DontDelete cells can't contain the hole");
+  }
+
+  __ IncrementCounter(&Counters::named_load_global_inline, 1);
+  __ ret(0);
+
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_callback, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
+                       callback, name, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_callback, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  GenerateLoadArrayLength(masm(), rcx, rdx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   Object* value) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  GenerateLoadConstant(receiver, holder, rcx, rbx, rdx,
+                       value, name, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  GenerateLoadFunctionPrototype(masm(), rcx, rdx, rbx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                      JSObject* holder,
+                                                      String* name) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          rcx,
+                          rax,
+                          rdx,
+                          rbx,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  GenerateLoadStringLength(masm(), rcx, rdx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                AccessorInfo* callback,
+                                                String* name) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ movq(rbx, Operand(rsp, 1 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(rbx, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+         Handle<Map>(object->map()));
+  __ j(not_equal, &miss);
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  __ pop(rbx);  // remove the return address
+  __ push(Operand(rsp, 0));  // receiver
+  __ Push(Handle<AccessorInfo>(callback));  // callback info
+  __ push(rcx);  // name
+  __ push(rax);  // value
+  __ push(rbx);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+  __ TailCallRuntime(store_callback_property, 4, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ Move(rcx, Handle<String>(name));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* object,
+                                             int index,
+                                             Map* transition,
+                                             String* name) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ movq(rbx, Operand(rsp, 1 * kPointerSize));
+
+  // Generate store field code.  Trashes the name register.
+  GenerateStoreField(masm(),
+                     Builtins::StoreIC_ExtendStorage,
+                     object,
+                     index,
+                     transition,
+                     rbx, rcx, rdx,
+                     &miss);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ Move(rcx, Handle<String>(name));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                   String* name) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ movq(rbx, Operand(rsp, 1 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(rbx, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+         Handle<Map>(receiver->map()));
+  __ j(not_equal, &miss);
+
+  // Perform global security token check if needed.
+  if (receiver->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+  __ pop(rbx);  // remove the return address
+  __ push(Operand(rsp, 0));  // receiver
+  __ push(rcx);  // name
+  __ push(rax);  // value
+  __ push(rbx);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property =
+      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+  __ TailCallRuntime(store_ic_property, 3, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ Move(rcx, Handle<String>(name));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+                                              JSGlobalPropertyCell* cell,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check that the map of the global has not changed.
+  __ movq(rbx, Operand(rsp, kPointerSize));
+  __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+         Handle<Map>(object->map()));
+  __ j(not_equal, &miss);
+
+  // Store the value in the cell.
+  __ Move(rcx, Handle<JSGlobalPropertyCell>(cell));
+  __ movq(FieldOperand(rcx, JSGlobalPropertyCell::kValueOffset), rax);
+
+  // Return the value (register rax).
+  __ IncrementCounter(&Counters::named_store_global_inline, 1);
+  __ ret(0);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                JSObject* receiver,
+                                                JSObject* holder,
+                                                int index) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_field, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  GenerateLoadField(receiver, holder, rcx, rbx, rdx, index, name, &miss);
+
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_field, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+                                                  int index,
+                                                  Map* transition,
+                                                  String* name) {
+  // ----------- S t a t e -------------
+  //  -- rax     : value
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : key
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ IncrementCounter(&Counters::keyed_store_field, 1);
+
+  // Get the name from the stack.
+  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+  // Check that the name has not changed.
+  __ Cmp(rcx, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  // Get the object from the stack.
+  __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+
+  // Generate store field code.  Trashes the name register.
+  GenerateStoreField(masm(),
+                     Builtins::KeyedStoreIC_ExtendStorage,
+                     object,
+                     index,
+                     transition,
+                     rbx, rcx, rdx,
+                     &miss);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_store_field, 1);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+// TODO(1241006): Avoid having lazy compile stubs specialized by the
+// number of arguments. It is not needed anymore.
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push a copy of the function onto the stack.
+  __ push(rdi);
+
+  __ push(rdi);  // function is also the parameter to the runtime call
+  __ CallRuntime(Runtime::kLazyCompile, 1);
+  __ pop(rdi);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
+  __ jmp(rcx);
+
+  return GetCodeWithFlags(flags, "LazyCompileStub");
+}
+
+
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* holder,
+                                           LookupResult* lookup,
+                                           Register receiver,
+                                           Register name_reg,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           String* name,
+                                           Label* miss) {
+  LoadInterceptorCompiler compiler(name_reg);
+  CompileLoadInterceptor(&compiler,
+                         this,
+                         masm(),
+                         object,
+                         holder,
+                         name,
+                         lookup,
+                         receiver,
+                         scratch1,
+                         scratch2,
+                         miss);
+}
+
+
+void StubCompiler::GenerateLoadCallback(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        AccessorInfo* callback,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Push the arguments on the JS stack of the caller.
+  __ pop(scratch2);  // remove return address
+  __ push(receiver);  // receiver
+  __ push(reg);  // holder
+  __ Move(reg, Handle<AccessorInfo>(callback));  // callback data
+  __ push(reg);
+  __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+  __ push(name_reg);  // name
+  __ push(scratch2);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference load_callback_property =
+      ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+  __ TailCallRuntime(load_callback_property, 5, 1);
+}
+
+
+Register StubCompiler::CheckPrototypes(JSObject* object,
+                                       Register object_reg,
+                                       JSObject* holder,
+                                       Register holder_reg,
+                                       Register scratch,
+                                       String* name,
+                                       Label* miss) {
+  // Check that the maps haven't changed.
+  Register result =
+      __ CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.
+  while (object != holder) {
+    if (object->IsGlobalObject()) {
+      GlobalObject* global = GlobalObject::cast(object);
+      Object* probe = global->EnsurePropertyCell(name);
+      if (probe->IsFailure()) {
+        set_failure(Failure::cast(probe));
+        return result;
+      }
+      JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+      ASSERT(cell->value()->IsTheHole());
+      __ Move(scratch, Handle<Object>(cell));
+      __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+             Factory::the_hole_value());
+      __ j(not_equal, miss);
+    }
+    object = JSObject::cast(object->GetPrototype());
+  }
+
+  // Return the register containing the holder.
+  return result;
+}
+
+
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
+                                     Register receiver,
+                                     Register scratch1,
+                                     Register scratch2,
+                                     int index,
+                                     String* name,
+                                     Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss);
+
+  // Check the prototype chain.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Get the value from the properties.
+  GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
+  __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Object* value,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Return the constant value.
+  __ Move(rax, Handle<Object>(value));
+  __ ret(0);
+}
+
+
+// Specialized stub for constructing objects from functions which only have only
+// simple assignments of the form this.x = ...; in their body.
+Object* ConstructStubCompiler::CompileConstructStub(
+    SharedFunctionInfo* shared) {
+  // ----------- S t a t e -------------
+  //  -- rax : argc
+  //  -- rdi : constructor
+  //  -- rsp[0] : return address
+  //  -- rsp[4] : last argument
+  // -----------------------------------
+  Label generic_stub_call;
+
+  // Use r8 for holding undefined which is used in several places below.
+  __ Move(r8, Factory::undefined_value());
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Check to see whether there are any break points in the function code. If
+  // there are jump to the generic constructor stub which calls the actual
+  // code for the function thereby hitting the break points.
+  __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kDebugInfoOffset));
+  __ cmpq(rbx, r8);
+  __ j(not_equal, &generic_stub_call);
+#endif
+
+  // Load the initial map and verify that it is in fact a map.
+  __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+  // Will both indicate a NULL and a Smi.
+  ASSERT(kSmiTag == 0);
+  __ JumpIfSmi(rbx, &generic_stub_call);
+  __ CmpObjectType(rbx, MAP_TYPE, rcx);
+  __ j(not_equal, &generic_stub_call);
+
+#ifdef DEBUG
+  // Cannot construct functions this way.
+  // rdi: constructor
+  // rbx: initial map
+  __ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
+  __ Assert(not_equal, "Function constructed by construct stub.");
+#endif
+
+  // Now allocate the JSObject in new space.
+  // rdi: constructor
+  // rbx: initial map
+  __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
+  __ shl(rcx, Immediate(kPointerSizeLog2));
+  __ AllocateInNewSpace(rcx,
+                        rdx,
+                        rcx,
+                        no_reg,
+                        &generic_stub_call,
+                        NO_ALLOCATION_FLAGS);
+
+  // Allocated the JSObject, now initialize the fields and add the heap tag.
+  // rbx: initial map
+  // rdx: JSObject (untagged)
+  __ movq(Operand(rdx, JSObject::kMapOffset), rbx);
+  __ Move(rbx, Factory::empty_fixed_array());
+  __ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
+  __ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
+
+  // rax: argc
+  // rdx: JSObject (untagged)
+  // Load the address of the first in-object property into r9.
+  __ lea(r9, Operand(rdx, JSObject::kHeaderSize));
+  // Calculate the location of the first argument. The stack contains only the
+  // return address on top of the argc arguments.
+  __ lea(rcx, Operand(rsp, rax, times_pointer_size, 0));
+
+  // rax: argc
+  // rcx: first argument
+  // rdx: JSObject (untagged)
+  // r8: undefined
+  // r9: first in-object property of the JSObject
+  // Fill the initialized properties with a constant value or a passed argument
+  // depending on the this.x = ...; assignment in the function.
+  for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+    if (shared->IsThisPropertyAssignmentArgument(i)) {
+      Label not_passed;
+      // Set the property to undefined.
+      __ movq(Operand(r9, i * kPointerSize), r8);
+      // Check if the argument assigned to the property is actually passed.
+      int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+      __ cmpq(rax, Immediate(arg_number));
+      __ j(below_equal, &not_passed);
+      // Argument passed - find it on the stack.
+      __ movq(rbx, Operand(rcx, arg_number * -kPointerSize));
+      __ movq(Operand(r9, i * kPointerSize), rbx);
+      __ bind(&not_passed);
+    } else {
+      // Set the property to the constant value.
+      Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+      __ Move(Operand(r9, i * kPointerSize), constant);
+    }
+  }
+
+  // Fill the unused in-object property fields with undefined.
+  for (int i = shared->this_property_assignments_count();
+       i < shared->CalculateInObjectProperties();
+       i++) {
+    __ movq(Operand(r9, i * kPointerSize), r8);
+  }
+
+  // rax: argc
+  // rdx: JSObject (untagged)
+  // Move argc to rbx and the JSObject to return to rax and tag it.
+  __ movq(rbx, rax);
+  __ movq(rax, rdx);
+  __ or_(rax, Immediate(kHeapObjectTag));
+
+  // rax: JSObject
+  // rbx: argc
+  // Remove caller arguments and receiver from the stack and return.
+  __ pop(rcx);
+  __ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
+  __ push(rcx);
+  __ IncrementCounter(&Counters::constructed_objects, 1);
+  __ IncrementCounter(&Counters::constructed_objects_stub, 1);
+  __ ret(0);
+
+  // Jump to the generic stub in case the specialized code cannot handle the
+  // construction.
+  __ bind(&generic_stub_call);
+  Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+  Handle<Code> generic_construct_stub(code);
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
new file mode 100644
index 0000000..655f4c6
--- /dev/null
+++ b/src/x64/virtual-frame-x64.cc
@@ -0,0 +1,1061 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+// On entry to a function, the virtual frame already contains the receiver,
+// the parameters, and a return address.  All frame elements are in memory.
+VirtualFrame::VirtualFrame()
+    : elements_(parameter_count() + local_count() + kPreallocatedElements),
+      stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
+  for (int i = 0; i <= stack_pointer_; i++) {
+    elements_.Add(FrameElement::MemoryElement());
+  }
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    register_locations_[i] = kIllegalIndex;
+  }
+}
+
+
+void VirtualFrame::Enter() {
+  // Registers live on entry to a JS frame:
+  //   rsp: stack pointer, points to return address from this function.
+  //   rbp: base pointer, points to previous JS, ArgumentsAdaptor, or
+  //        Trampoline frame.
+  //   rsi: context of this function call.
+  //   rdi: pointer to this function object.
+  Comment cmnt(masm(), "[ Enter JS frame");
+
+#ifdef DEBUG
+  // Verify that rdi contains a JS function.  The following code
+  // relies on rax being available for use.
+  Condition not_smi = masm()->CheckNotSmi(rdi);
+  __ Check(not_smi,
+           "VirtualFrame::Enter - rdi is not a function (smi check).");
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
+  __ Check(equal,
+           "VirtualFrame::Enter - rdi is not a function (map check).");
+#endif
+
+  EmitPush(rbp);
+
+  __ movq(rbp, rsp);
+
+  // Store the context in the frame.  The context is kept in rsi and a
+  // copy is stored in the frame.  The external reference to rsi
+  // remains.
+  EmitPush(rsi);
+
+  // Store the function in the frame.  The frame owns the register
+  // reference now (ie, it can keep it in rdi or spill it later).
+  Push(rdi);
+  SyncElementAt(element_count() - 1);
+  cgen()->allocator()->Unuse(rdi);
+}
+
+
+void VirtualFrame::Exit() {
+  Comment cmnt(masm(), "[ Exit JS frame");
+  // Record the location of the JS exit code for patching when setting
+  // break point.
+  __ RecordJSReturn();
+
+  // Avoid using the leave instruction here, because it is too
+  // short. We need the return sequence to be a least the size of a
+  // call instruction to support patching the exit code in the
+  // debugger. See GenerateReturnSequence for the full return sequence.
+  // TODO(X64): A patched call will be very long now.  Make sure we
+  // have enough room.
+  __ movq(rsp, rbp);
+  stack_pointer_ = frame_pointer();
+  for (int i = element_count() - 1; i > stack_pointer_; i--) {
+    FrameElement last = elements_.RemoveLast();
+    if (last.is_register()) {
+      Unuse(last.reg());
+    }
+  }
+
+  EmitPop(rbp);
+}
+
+
+void VirtualFrame::AllocateStackSlots() {
+  int count = local_count();
+  if (count > 0) {
+    Comment cmnt(masm(), "[ Allocate space for locals");
+    // The locals are initialized to a constant (the undefined value), but
+    // we sync them with the actual frame to allocate space for spilling
+    // them later.  First sync everything above the stack pointer so we can
+    // use pushes to allocate and initialize the locals.
+    SyncRange(stack_pointer_ + 1, element_count() - 1);
+    Handle<Object> undefined = Factory::undefined_value();
+    FrameElement initial_value =
+        FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
+    __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+    for (int i = 0; i < count; i++) {
+      elements_.Add(initial_value);
+      stack_pointer_++;
+      __ push(kScratchRegister);
+    }
+  }
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+  ASSERT(elements_[context_index()].is_memory());
+  __ movq(Operand(rbp, fp_relative(context_index())), rsi);
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+  ASSERT(elements_[context_index()].is_memory());
+  __ movq(rsi, Operand(rbp, fp_relative(context_index())));
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+  Result temp = cgen()->allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  __ lea(temp.reg(), ParameterAt(-1));
+  Push(&temp);
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(reg);
+}
+
+
+void VirtualFrame::EmitPop(const Operand& operand) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(operand);
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(reg);
+}
+
+
+void VirtualFrame::EmitPush(const Operand& operand) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(operand);
+}
+
+
+void VirtualFrame::EmitPush(Immediate immediate) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(immediate);
+}
+
+
+void VirtualFrame::EmitPush(Handle<Object> value) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ Push(value);
+}
+
+
+void VirtualFrame::EmitPush(Heap::RootListIndex index) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ PushRoot(index);
+}
+
+
+void VirtualFrame::Drop(int count) {
+  ASSERT(count >= 0);
+  ASSERT(height() >= count);
+  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
+
+  // Emit code to lower the stack pointer if necessary.
+  if (num_virtual_elements < count) {
+    int num_dropped = count - num_virtual_elements;
+    stack_pointer_ -= num_dropped;
+    __ addq(rsp, Immediate(num_dropped * kPointerSize));
+  }
+
+  // Discard elements from the virtual frame and free any registers.
+  for (int i = 0; i < count; i++) {
+    FrameElement dropped = elements_.RemoveLast();
+    if (dropped.is_register()) {
+      Unuse(dropped.reg());
+    }
+  }
+}
+
+
+int VirtualFrame::InvalidateFrameSlotAt(int index) {
+  FrameElement original = elements_[index];
+
+  // Is this element the backing store of any copies?
+  int new_backing_index = kIllegalIndex;
+  if (original.is_copied()) {
+    // Verify it is copied, and find first copy.
+    for (int i = index + 1; i < element_count(); i++) {
+      if (elements_[i].is_copy() && elements_[i].index() == index) {
+        new_backing_index = i;
+        break;
+      }
+    }
+  }
+
+  if (new_backing_index == kIllegalIndex) {
+    // No copies found, return kIllegalIndex.
+    if (original.is_register()) {
+      Unuse(original.reg());
+    }
+    elements_[index] = FrameElement::InvalidElement();
+    return kIllegalIndex;
+  }
+
+  // This is the backing store of copies.
+  Register backing_reg;
+  if (original.is_memory()) {
+    Result fresh = cgen()->allocator()->Allocate();
+    ASSERT(fresh.is_valid());
+    Use(fresh.reg(), new_backing_index);
+    backing_reg = fresh.reg();
+    __ movq(backing_reg, Operand(rbp, fp_relative(index)));
+  } else {
+    // The original was in a register.
+    backing_reg = original.reg();
+    set_register_location(backing_reg, new_backing_index);
+  }
+  // Invalidate the element at index.
+  elements_[index] = FrameElement::InvalidElement();
+  // Set the new backing element.
+  if (elements_[new_backing_index].is_synced()) {
+    elements_[new_backing_index] =
+        FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED);
+  } else {
+    elements_[new_backing_index] =
+        FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
+  }
+  // Update the other copies.
+  for (int i = new_backing_index + 1; i < element_count(); i++) {
+    if (elements_[i].is_copy() && elements_[i].index() == index) {
+      elements_[i].set_index(new_backing_index);
+      elements_[new_backing_index].set_copied();
+    }
+  }
+  return new_backing_index;
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+  ASSERT(index >= 0);
+  ASSERT(index <= element_count());
+  FrameElement original = elements_[index];
+  int new_backing_store_index = InvalidateFrameSlotAt(index);
+  if (new_backing_store_index != kIllegalIndex) {
+    elements_.Add(CopyElementAt(new_backing_store_index));
+    return;
+  }
+
+  switch (original.type()) {
+    case FrameElement::MEMORY: {
+      // Emit code to load the original element's data into a register.
+      // Push that register as a FrameElement on top of the frame.
+      Result fresh = cgen()->allocator()->Allocate();
+      ASSERT(fresh.is_valid());
+      FrameElement new_element =
+          FrameElement::RegisterElement(fresh.reg(),
+                                        FrameElement::NOT_SYNCED);
+      Use(fresh.reg(), element_count());
+      elements_.Add(new_element);
+      __ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
+      break;
+    }
+    case FrameElement::REGISTER:
+      Use(original.reg(), element_count());
+      // Fall through.
+    case FrameElement::CONSTANT:
+    case FrameElement::COPY:
+      original.clear_sync();
+      elements_.Add(original);
+      break;
+    case FrameElement::INVALID:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+  // Store the value on top of the frame to the virtual frame slot at
+  // a given index.  The value on top of the frame is left in place.
+  // This is a duplicating operation, so it can create copies.
+  ASSERT(index >= 0);
+  ASSERT(index < element_count());
+
+  int top_index = element_count() - 1;
+  FrameElement top = elements_[top_index];
+  FrameElement original = elements_[index];
+  if (top.is_copy() && top.index() == index) return;
+  ASSERT(top.is_valid());
+
+  InvalidateFrameSlotAt(index);
+
+  // InvalidateFrameSlotAt can potentially change any frame element, due
+  // to spilling registers to allocate temporaries in order to preserve
+  // the copy-on-write semantics of aliased elements.  Reload top from
+  // the frame.
+  top = elements_[top_index];
+
+  if (top.is_copy()) {
+    // There are two cases based on the relative positions of the
+    // stored-to slot and the backing slot of the top element.
+    int backing_index = top.index();
+    ASSERT(backing_index != index);
+    if (backing_index < index) {
+      // 1. The top element is a copy of a slot below the stored-to
+      // slot.  The stored-to slot becomes an unsynced copy of that
+      // same backing slot.
+      elements_[index] = CopyElementAt(backing_index);
+    } else {
+      // 2. The top element is a copy of a slot above the stored-to
+      // slot.  The stored-to slot becomes the new (unsynced) backing
+      // slot and both the top element and the element at the former
+      // backing slot become copies of it.  The sync state of the top
+      // and former backing elements is preserved.
+      FrameElement backing_element = elements_[backing_index];
+      ASSERT(backing_element.is_memory() || backing_element.is_register());
+      if (backing_element.is_memory()) {
+        // Because sets of copies are canonicalized to be backed by
+        // their lowest frame element, and because memory frame
+        // elements are backed by the corresponding stack address, we
+        // have to move the actual value down in the stack.
+        //
+        // TODO(209): considering allocating the stored-to slot to the
+        // temp register.  Alternatively, allow copies to appear in
+        // any order in the frame and lazily move the value down to
+        // the slot.
+        __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
+        __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
+      } else {
+        set_register_location(backing_element.reg(), index);
+        if (backing_element.is_synced()) {
+          // If the element is a register, we will not actually move
+          // anything on the stack but only update the virtual frame
+          // element.
+          backing_element.clear_sync();
+        }
+      }
+      elements_[index] = backing_element;
+
+      // The old backing element becomes a copy of the new backing
+      // element.
+      FrameElement new_element = CopyElementAt(index);
+      elements_[backing_index] = new_element;
+      if (backing_element.is_synced()) {
+        elements_[backing_index].set_sync();
+      }
+
+      // All the copies of the old backing element (including the top
+      // element) become copies of the new backing element.
+      for (int i = backing_index + 1; i < element_count(); i++) {
+        if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
+          elements_[i].set_index(index);
+        }
+      }
+    }
+    return;
+  }
+
+  // Move the top element to the stored-to slot and replace it (the
+  // top element) with a copy.
+  elements_[index] = top;
+  if (top.is_memory()) {
+    // TODO(209): consider allocating the stored-to slot to the temp
+    // register.  Alternatively, allow copies to appear in any order
+    // in the frame and lazily move the value down to the slot.
+    FrameElement new_top = CopyElementAt(index);
+    new_top.set_sync();
+    elements_[top_index] = new_top;
+
+    // The sync state of the former top element is correct (synced).
+    // Emit code to move the value down in the frame.
+    __ movq(kScratchRegister, Operand(rsp, 0));
+    __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
+  } else if (top.is_register()) {
+    set_register_location(top.reg(), index);
+    // The stored-to slot has the (unsynced) register reference and
+    // the top element becomes a copy.  The sync state of the top is
+    // preserved.
+    FrameElement new_top = CopyElementAt(index);
+    if (top.is_synced()) {
+      new_top.set_sync();
+      elements_[index].clear_sync();
+    }
+    elements_[top_index] = new_top;
+  } else {
+    // The stored-to slot holds the same value as the top but
+    // unsynced.  (We do not have copies of constants yet.)
+    ASSERT(top.is_constant());
+    elements_[index].clear_sync();
+  }
+}
+
+
+void VirtualFrame::MakeMergable() {
+  for (int i = 0; i < element_count(); i++) {
+    FrameElement element = elements_[i];
+
+    if (element.is_constant() || element.is_copy()) {
+      if (element.is_synced()) {
+        // Just spill.
+        elements_[i] = FrameElement::MemoryElement();
+      } else {
+        // Allocate to a register.
+        FrameElement backing_element;  // Invalid if not a copy.
+        if (element.is_copy()) {
+          backing_element = elements_[element.index()];
+        }
+        Result fresh = cgen()->allocator()->Allocate();
+        ASSERT(fresh.is_valid());  // A register was spilled if all were in use.
+        elements_[i] =
+            FrameElement::RegisterElement(fresh.reg(),
+                                          FrameElement::NOT_SYNCED);
+        Use(fresh.reg(), i);
+
+        // Emit a move.
+        if (element.is_constant()) {
+          __ Move(fresh.reg(), element.handle());
+        } else {
+          ASSERT(element.is_copy());
+          // Copies are only backed by register or memory locations.
+          if (backing_element.is_register()) {
+            // The backing store may have been spilled by allocating,
+            // but that's OK.  If it was, the value is right where we
+            // want it.
+            if (!fresh.reg().is(backing_element.reg())) {
+              __ movq(fresh.reg(), backing_element.reg());
+            }
+          } else {
+            ASSERT(backing_element.is_memory());
+            __ movq(fresh.reg(), Operand(rbp, fp_relative(element.index())));
+          }
+        }
+      }
+      // No need to set the copied flag --- there are no copies.
+    } else {
+      // Clear the copy flag of non-constant, non-copy elements.
+      // They cannot be copied because copies are not allowed.
+      // The copy flag is not relied on before the end of this loop,
+      // including when registers are spilled.
+      elements_[i].clear_copied();
+    }
+  }
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+  Comment cmnt(masm(), "[ Merge frame");
+  // We should always be merging the code generator's current frame to an
+  // expected frame.
+  ASSERT(cgen()->frame() == this);
+
+  // Adjust the stack pointer upward (toward the top of the virtual
+  // frame) if necessary.
+  if (stack_pointer_ < expected->stack_pointer_) {
+    int difference = expected->stack_pointer_ - stack_pointer_;
+    stack_pointer_ = expected->stack_pointer_;
+    __ subq(rsp, Immediate(difference * kPointerSize));
+  }
+
+  MergeMoveRegistersToMemory(expected);
+  MergeMoveRegistersToRegisters(expected);
+  MergeMoveMemoryToRegisters(expected);
+
+  // Adjust the stack pointer downward if necessary.
+  if (stack_pointer_ > expected->stack_pointer_) {
+    int difference = stack_pointer_ - expected->stack_pointer_;
+    stack_pointer_ = expected->stack_pointer_;
+    __ addq(rsp, Immediate(difference * kPointerSize));
+  }
+
+  // At this point, the frames should be identical.
+  ASSERT(Equals(expected));
+}
+
+
+void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
+  ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+  // Move registers, constants, and copies to memory.  Perform moves
+  // from the top downward in the frame in order to leave the backing
+  // stores of copies in registers.
+  for (int i = element_count() - 1; i >= 0; i--) {
+    FrameElement target = expected->elements_[i];
+    if (target.is_register()) continue;  // Handle registers later.
+    if (target.is_memory()) {
+      FrameElement source = elements_[i];
+      switch (source.type()) {
+        case FrameElement::INVALID:
+          // Not a legal merge move.
+          UNREACHABLE();
+          break;
+
+        case FrameElement::MEMORY:
+          // Already in place.
+          break;
+
+        case FrameElement::REGISTER:
+          Unuse(source.reg());
+          if (!source.is_synced()) {
+            __ movq(Operand(rbp, fp_relative(i)), source.reg());
+          }
+          break;
+
+        case FrameElement::CONSTANT:
+          if (!source.is_synced()) {
+            __ Move(Operand(rbp, fp_relative(i)), source.handle());
+          }
+          break;
+
+        case FrameElement::COPY:
+          if (!source.is_synced()) {
+            int backing_index = source.index();
+            FrameElement backing_element = elements_[backing_index];
+            if (backing_element.is_memory()) {
+              __ movq(kScratchRegister,
+                       Operand(rbp, fp_relative(backing_index)));
+              __ movq(Operand(rbp, fp_relative(i)), kScratchRegister);
+            } else {
+              ASSERT(backing_element.is_register());
+              __ movq(Operand(rbp, fp_relative(i)), backing_element.reg());
+            }
+          }
+          break;
+      }
+    }
+    elements_[i] = target;
+  }
+}
+
+
+void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+  // We have already done X-to-memory moves.
+  ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    // Move the right value into register i if it is currently in a register.
+    int index = expected->register_location(i);
+    int use_index = register_location(i);
+    // Skip if register i is unused in the target or else if source is
+    // not a register (this is not a register-to-register move).
+    if (index == kIllegalIndex || !elements_[index].is_register()) continue;
+
+    Register target = RegisterAllocator::ToRegister(i);
+    Register source = elements_[index].reg();
+    if (index != use_index) {
+      if (use_index == kIllegalIndex) {  // Target is currently unused.
+        // Copy contents of source from source to target.
+        // Set frame element register to target.
+        Use(target, index);
+        Unuse(source);
+        __ movq(target, source);
+      } else {
+        // Exchange contents of registers source and target.
+        // Nothing except the register backing use_index has changed.
+        elements_[use_index].set_reg(source);
+        set_register_location(target, index);
+        set_register_location(source, use_index);
+        __ xchg(source, target);
+      }
+    }
+
+    if (!elements_[index].is_synced() &&
+        expected->elements_[index].is_synced()) {
+      __ movq(Operand(rbp, fp_relative(index)), target);
+    }
+    elements_[index] = expected->elements_[index];
+  }
+}
+
+
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
+  // Move memory, constants, and copies to registers.  This is the
+  // final step and since it is not done from the bottom up, but in
+  // register code order, we have special code to ensure that the backing
+  // elements of copies are in their correct locations when we
+  // encounter the copies.
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int index = expected->register_location(i);
+    if (index != kIllegalIndex) {
+      FrameElement source = elements_[index];
+      FrameElement target = expected->elements_[index];
+      Register target_reg = RegisterAllocator::ToRegister(i);
+      ASSERT(target.reg().is(target_reg));
+      switch (source.type()) {
+        case FrameElement::INVALID:  // Fall through.
+          UNREACHABLE();
+          break;
+        case FrameElement::REGISTER:
+          ASSERT(source.Equals(target));
+          // Go to next iteration.  Skips Use(target_reg) and syncing
+          // below.  It is safe to skip syncing because a target
+          // register frame element would only be synced if all source
+          // elements were.
+          continue;
+          break;
+        case FrameElement::MEMORY:
+          ASSERT(index <= stack_pointer_);
+          __ movq(target_reg, Operand(rbp, fp_relative(index)));
+          break;
+
+        case FrameElement::CONSTANT:
+          __ Move(target_reg, source.handle());
+          break;
+
+        case FrameElement::COPY: {
+          int backing_index = source.index();
+          FrameElement backing = elements_[backing_index];
+          ASSERT(backing.is_memory() || backing.is_register());
+          if (backing.is_memory()) {
+            ASSERT(backing_index <= stack_pointer_);
+            // Code optimization if backing store should also move
+            // to a register: move backing store to its register first.
+            if (expected->elements_[backing_index].is_register()) {
+              FrameElement new_backing = expected->elements_[backing_index];
+              Register new_backing_reg = new_backing.reg();
+              ASSERT(!is_used(new_backing_reg));
+              elements_[backing_index] = new_backing;
+              Use(new_backing_reg, backing_index);
+              __ movq(new_backing_reg,
+                      Operand(rbp, fp_relative(backing_index)));
+              __ movq(target_reg, new_backing_reg);
+            } else {
+              __ movq(target_reg, Operand(rbp, fp_relative(backing_index)));
+            }
+          } else {
+            __ movq(target_reg, backing.reg());
+          }
+        }
+      }
+      // Ensure the proper sync state.
+      if (target.is_synced() && !source.is_synced()) {
+        __ movq(Operand(rbp, fp_relative(index)), target_reg);
+      }
+      Use(target_reg, index);
+      elements_[index] = target;
+    }
+  }
+}
+
+
+Result VirtualFrame::Pop() {
+  FrameElement element = elements_.RemoveLast();
+  int index = element_count();
+  ASSERT(element.is_valid());
+
+  bool pop_needed = (stack_pointer_ == index);
+  if (pop_needed) {
+    stack_pointer_--;
+    if (element.is_memory()) {
+      Result temp = cgen()->allocator()->Allocate();
+      ASSERT(temp.is_valid());
+      __ pop(temp.reg());
+      return temp;
+    }
+
+    __ addq(rsp, Immediate(kPointerSize));
+  }
+  ASSERT(!element.is_memory());
+
+  // The top element is a register, constant, or a copy.  Unuse
+  // registers and follow copies to their backing store.
+  if (element.is_register()) {
+    Unuse(element.reg());
+  } else if (element.is_copy()) {
+    ASSERT(element.index() < index);
+    index = element.index();
+    element = elements_[index];
+  }
+  ASSERT(!element.is_copy());
+
+  // The element is memory, a register, or a constant.
+  if (element.is_memory()) {
+    // Memory elements could only be the backing store of a copy.
+    // Allocate the original to a register.
+    ASSERT(index <= stack_pointer_);
+    Result temp = cgen()->allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    Use(temp.reg(), index);
+    FrameElement new_element =
+        FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
+    // Preserve the copy flag on the element.
+    if (element.is_copied()) new_element.set_copied();
+    elements_[index] = new_element;
+    __ movq(temp.reg(), Operand(rbp, fp_relative(index)));
+    return Result(temp.reg());
+  } else if (element.is_register()) {
+    return Result(element.reg());
+  } else {
+    ASSERT(element.is_constant());
+    return Result(element.handle());
+  }
+}
+
+
+Result VirtualFrame::RawCallStub(CodeStub* stub) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallStub(stub);
+  Result result = cgen()->allocator()->Allocate(rax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+  PrepareForCall(0, 0);
+  arg->ToRegister(rax);
+  arg->Unuse();
+  return RawCallStub(stub);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+  PrepareForCall(0, 0);
+
+  if (arg0->is_register() && arg0->reg().is(rax)) {
+    if (arg1->is_register() && arg1->reg().is(rdx)) {
+      // Wrong registers.
+      __ xchg(rax, rdx);
+    } else {
+      // Register rdx is free for arg0, which frees rax for arg1.
+      arg0->ToRegister(rdx);
+      arg1->ToRegister(rax);
+    }
+  } else {
+    // Register rax is free for arg1, which guarantees rdx is free for
+    // arg0.
+    arg1->ToRegister(rax);
+    arg0->ToRegister(rdx);
+  }
+
+  arg0->Unuse();
+  arg1->Unuse();
+  return RawCallStub(stub);
+}
+
+
+void VirtualFrame::SyncElementBelowStackPointer(int index) {
+  // Emit code to write elements below the stack pointer to their
+  // (already allocated) stack address.
+  ASSERT(index <= stack_pointer_);
+  FrameElement element = elements_[index];
+  ASSERT(!element.is_synced());
+  switch (element.type()) {
+    case FrameElement::INVALID:
+      break;
+
+    case FrameElement::MEMORY:
+      // This function should not be called with synced elements.
+      // (memory elements are always synced).
+      UNREACHABLE();
+      break;
+
+    case FrameElement::REGISTER:
+      __ movq(Operand(rbp, fp_relative(index)), element.reg());
+      break;
+
+    case FrameElement::CONSTANT:
+      __ Move(Operand(rbp, fp_relative(index)), element.handle());
+      break;
+
+    case FrameElement::COPY: {
+      int backing_index = element.index();
+      FrameElement backing_element = elements_[backing_index];
+      if (backing_element.is_memory()) {
+        __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
+        __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
+      } else {
+        ASSERT(backing_element.is_register());
+        __ movq(Operand(rbp, fp_relative(index)), backing_element.reg());
+      }
+      break;
+    }
+  }
+  elements_[index].set_sync();
+}
+
+
+void VirtualFrame::SyncElementByPushing(int index) {
+  // Sync an element of the frame that is just above the stack pointer
+  // by pushing it.
+  ASSERT(index == stack_pointer_ + 1);
+  stack_pointer_++;
+  FrameElement element = elements_[index];
+
+  switch (element.type()) {
+    case FrameElement::INVALID:
+      __ push(Immediate(Smi::FromInt(0)));
+      break;
+
+    case FrameElement::MEMORY:
+      // No memory elements exist above the stack pointer.
+      UNREACHABLE();
+      break;
+
+    case FrameElement::REGISTER:
+      __ push(element.reg());
+      break;
+
+    case FrameElement::CONSTANT:
+      __ Move(kScratchRegister, element.handle());
+      __ push(kScratchRegister);
+      break;
+
+    case FrameElement::COPY: {
+      int backing_index = element.index();
+      FrameElement backing = elements_[backing_index];
+      ASSERT(backing.is_memory() || backing.is_register());
+      if (backing.is_memory()) {
+        __ push(Operand(rbp, fp_relative(backing_index)));
+      } else {
+        __ push(backing.reg());
+      }
+      break;
+    }
+  }
+  elements_[index].set_sync();
+}
+
+
+// Clear the dirty bits for the range of elements in
+// [min(stack_pointer_ + 1,begin), end].
+void VirtualFrame::SyncRange(int begin, int end) {
+  ASSERT(begin >= 0);
+  ASSERT(end < element_count());
+  // Sync elements below the range if they have not been materialized
+  // on the stack.
+  int start = Min(begin, stack_pointer_ + 1);
+
+  // If positive we have to adjust the stack pointer.
+  int delta = end - stack_pointer_;
+  if (delta > 0) {
+    stack_pointer_ = end;
+    __ subq(rsp, Immediate(delta * kPointerSize));
+  }
+
+  for (int i = start; i <= end; i++) {
+    if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+  }
+}
+
+
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeFlag flag,
+                                   int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ InvokeBuiltin(id, flag);
+  Result result = cgen()->allocator()->Allocate(rax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+//------------------------------------------------------------------------------
+// Virtual frame stub and IC calling functions.
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+                                       RelocInfo::Mode rmode) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ Call(code, rmode);
+  Result result = cgen()->allocator()->Allocate(rax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(f, arg_count);
+  Result result = cgen()->allocator()->Allocate(rax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(id, arg_count);
+  Result result = cgen()->allocator()->Allocate(rax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
+  // Name and receiver are on the top of the frame.  The IC expects
+  // name in rcx and receiver on the stack.  It does not drop the
+  // receiver.
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  Result name = Pop();
+  PrepareForCall(1, 0);  // One stack arg, not callee-dropped.
+  name.ToRegister(rcx);
+  name.Unuse();
+  return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
+  // Key and receiver are on top of the frame.  The IC expects them on
+  // the stack.  It does not drop them.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  PrepareForCall(2, 0);  // Two stack args, neither callee-dropped.
+  return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallKeyedStoreIC() {
+  // Value, key, and receiver are on the top of the frame.  The IC
+  // expects value in rax and key and receiver on the stack.  It does
+  // not drop the key and receiver.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  // TODO(1222589): Make the IC grab the values from the stack.
+  Result value = Pop();
+  PrepareForCall(2, 0);  // Two stack args, neither callee-dropped.
+  value.ToRegister(rax);
+  value.Unuse();
+  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
+Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
+                                int arg_count,
+                                int loop_nesting) {
+  // Arguments, receiver, and function name are on top of the frame.
+  // The IC expects them on the stack.  It does not drop the function
+  // name slot (but it does drop the rest).
+  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
+  Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
+  // Spill args, receiver, and function.  The call will drop args and
+  // receiver.
+  PrepareForCall(arg_count + 2, arg_count + 1);
+  return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallConstructor(int arg_count) {
+  // Arguments, receiver, and function are on top of the frame.  The
+  // IC expects arg count in rax, function in rdi, and the arguments
+  // and receiver on the stack.
+  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+  // Duplicate the function before preparing the frame.
+  PushElementAt(arg_count + 1);
+  Result function = Pop();
+  PrepareForCall(arg_count + 1, arg_count + 1);  // Spill args and receiver.
+  function.ToRegister(rdi);
+
+  // Constructors are called with the number of arguments in register
+  // eax for now. Another option would be to have separate construct
+  // call trampolines per different arguments counts encountered.
+  Result num_args = cgen()->allocator()->Allocate(rax);
+  ASSERT(num_args.is_valid());
+  __ movq(num_args.reg(), Immediate(arg_count));
+
+  function.Unuse();
+  num_args.Unuse();
+  return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
+}
+
+
+Result VirtualFrame::CallStoreIC() {
+  // Name, value, and receiver are on top of the frame.  The IC
+  // expects name in rcx, value in rax, and receiver on the stack.  It
+  // does not drop the receiver.
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+  Result name = Pop();
+  Result value = Pop();
+  PrepareForCall(1, 0);  // One stack arg, not callee-dropped.
+
+  if (value.is_register() && value.reg().is(rcx)) {
+    if (name.is_register() && name.reg().is(rax)) {
+      // Wrong registers.
+      __ xchg(rax, rcx);
+    } else {
+      // Register rax is free for value, which frees rcx for name.
+      value.ToRegister(rax);
+      name.ToRegister(rcx);
+    }
+  } else {
+    // Register rcx is free for name, which guarantees rax is free for
+    // value.
+    name.ToRegister(rcx);
+    value.ToRegister(rax);
+  }
+
+  name.Unuse();
+  value.Unuse();
+  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  // Grow the expression stack by handler size less one (the return
+  // address is already pushed by a call instruction).
+  Adjust(kHandlerSize - 1);
+  __ PushTryHandler(IN_JAVASCRIPT, type);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
new file mode 100644
index 0000000..006148d
--- /dev/null
+++ b/src/x64/virtual-frame-x64.h
@@ -0,0 +1,556 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_VIRTUAL_FRAME_X64_H_
+#define V8_X64_VIRTUAL_FRAME_X64_H_
+
+#include "register-allocator.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame.  It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack.  It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame : public ZoneObject {
+ public:
+  // A utility class to introduce a scope where the virtual frame is
+  // expected to remain spilled.  The constructor spills the code
+  // generator's current frame, but no attempt is made to require it
+  // to stay spilled.  It is intended as documentation while the code
+  // generator is being transformed.
+  class SpilledScope BASE_EMBEDDED {
+   public:
+    SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+      ASSERT(cgen()->has_valid_frame());
+      cgen()->frame()->SpillAll();
+      cgen()->set_in_spilled_code(true);
+    }
+
+    ~SpilledScope() {
+      cgen()->set_in_spilled_code(previous_state_);
+    }
+
+   private:
+    bool previous_state_;
+
+    CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+  };
+
+  // An illegal index into the virtual frame.
+  static const int kIllegalIndex = -1;
+
+  // Construct an initial virtual frame on entry to a JS function.
+  VirtualFrame();
+
+  // Construct a virtual frame as a clone of an existing one.
+  explicit VirtualFrame(VirtualFrame* original);
+
+  CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+  MacroAssembler* masm() { return cgen()->masm(); }
+
+  // Create a duplicate of an existing valid frame element.
+  FrameElement CopyElementAt(int index);
+
+  // The number of elements on the virtual frame.
+  int element_count() { return elements_.length(); }
+
+  // The height of the virtual expression stack.
+  int height() {
+    return element_count() - expression_base_index();
+  }
+
+  int register_location(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num];
+  }
+
+  int register_location(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)];
+  }
+
+  void set_register_location(Register reg, int index) {
+    register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+  }
+
+  bool is_used(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num] != kIllegalIndex;
+  }
+
+  bool is_used(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)]
+        != kIllegalIndex;
+  }
+
+  // Add extra in-memory elements to the top of the frame to match an actual
+  // frame (eg, the frame after an exception handler is pushed).  No code is
+  // emitted.
+  void Adjust(int count);
+
+  // Forget count elements from the top of the frame all in-memory
+  // (including synced) and adjust the stack pointer downward, to
+  // match an external frame effect (examples include a call removing
+  // its arguments, and exiting a try/catch removing an exception
+  // handler).  No code will be emitted.
+  void Forget(int count) {
+    ASSERT(count >= 0);
+    ASSERT(stack_pointer_ == element_count() - 1);
+    stack_pointer_ -= count;
+    ForgetElements(count);
+  }
+
+  // Forget count elements from the top of the frame without adjusting
+  // the stack pointer downward.  This is used, for example, before
+  // merging frames at break, continue, and return targets.
+  void ForgetElements(int count);
+
+  // Spill all values from the frame to memory.
+  void SpillAll();
+
+  // Spill all occurrences of a specific register from the frame.
+  void Spill(Register reg) {
+    if (is_used(reg)) SpillElementAt(register_location(reg));
+  }
+
+  // Spill all occurrences of an arbitrary register if possible.  Return the
+  // register spilled or no_reg if it was not possible to free any register
+  // (ie, they all have frame-external references).
+  Register SpillAnyRegister();
+
+  // Sync the range of elements in [begin, end] with memory.
+  void SyncRange(int begin, int end);
+
+  // Make this frame so that an arbitrary frame of the same height can
+  // be merged to it.  Copies and constants are removed from the frame.
+  void MakeMergable();
+
+  // Prepare this virtual frame for merging to an expected frame by
+  // performing some state changes that do not require generating
+  // code.  It is guaranteed that no code will be generated.
+  void PrepareMergeTo(VirtualFrame* expected);
+
+  // Make this virtual frame have a state identical to an expected virtual
+  // frame.  As a side effect, code may be emitted to make this frame match
+  // the expected one.
+  void MergeTo(VirtualFrame* expected);
+
+  // Detach a frame from its code generator, perhaps temporarily.  This
+  // tells the register allocator that it is free to use frame-internal
+  // registers.  Used when the code generator's frame is switched from this
+  // one to NULL by an unconditional jump.
+  void DetachFromCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Unuse(i);
+    }
+  }
+
+  // (Re)attach a frame to its code generator.  This informs the register
+  // allocator that the frame-internal register references are active again.
+  // Used when a code generator's frame is switched from NULL to this one by
+  // binding a label.
+  void AttachToCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Use(i);
+    }
+  }
+
+  // Emit code for the physical JS entry and exit frame sequences.  After
+  // calling Enter, the virtual frame is ready for use; and after calling
+  // Exit it should not be used.  Note that Enter does not allocate space in
+  // the physical frame for storing frame-allocated locals.
+  void Enter();
+  void Exit();
+
+  // Prepare for returning from the frame by spilling locals.  This
+  // avoids generating unnecessary merge code when jumping to the
+  // shared return site.  Emits code for spills.
+  void PrepareForReturn();
+
+  // Allocate and initialize the frame-allocated locals.
+  void AllocateStackSlots();
+
+  // An element of the expression stack as an assembly operand.
+  Operand ElementAt(int index) const {
+    return Operand(rsp, index * kPointerSize);
+  }
+
+  // Random-access store to a frame-top relative frame element.  The result
+  // becomes owned by the frame and is invalidated.
+  void SetElementAt(int index, Result* value);
+
+  // Set a frame element to a constant.  The index is frame-top relative.
+  void SetElementAt(int index, Handle<Object> value) {
+    Result temp(value);
+    SetElementAt(index, &temp);
+  }
+
+  void PushElementAt(int index) {
+    PushFrameSlotAt(element_count() - index - 1);
+  }
+
+  void StoreToElementAt(int index) {
+    StoreToFrameSlotAt(element_count() - index - 1);
+  }
+
+  // A frame-allocated local as an assembly operand.
+  Operand LocalAt(int index) {
+    ASSERT(0 <= index);
+    ASSERT(index < local_count());
+    return Operand(rbp, kLocal0Offset - index * kPointerSize);
+  }
+
+  // Push a copy of the value of a local frame slot on top of the frame.
+  void PushLocalAt(int index) {
+    PushFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the value of a local frame slot on top of the frame and invalidate
+  // the local slot.  The slot should be written to before trying to read
+  // from it again.
+  void TakeLocalAt(int index) {
+    TakeFrameSlotAt(local0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a local frame slot.  The
+  // value is left in place on top of the frame.
+  void StoreToLocalAt(int index) {
+    StoreToFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the address of the receiver slot on the frame.
+  void PushReceiverSlotAddress();
+
+  // Push the function on top of the frame.
+  void PushFunction() { PushFrameSlotAt(function_index()); }
+
+  // Save the value of the esi register to the context frame slot.
+  void SaveContextRegister();
+
+  // Restore the esi register from the value of the context frame
+  // slot.
+  void RestoreContextRegister();
+
+  // A parameter as an assembly operand.
+  Operand ParameterAt(int index) {
+    ASSERT(-1 <= index);  // -1 is the receiver.
+    ASSERT(index < parameter_count());
+    return Operand(rbp, (1 + parameter_count() - index) * kPointerSize);
+  }
+
+  // Push a copy of the value of a parameter frame slot on top of the frame.
+  void PushParameterAt(int index) {
+    PushFrameSlotAt(param0_index() + index);
+  }
+
+  // Push the value of a paramter frame slot on top of the frame and
+  // invalidate the parameter slot.  The slot should be written to before
+  // trying to read from it again.
+  void TakeParameterAt(int index) {
+    TakeFrameSlotAt(param0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a parameter frame slot.
+  // The value is left in place on top of the frame.
+  void StoreToParameterAt(int index) {
+    StoreToFrameSlotAt(param0_index() + index);
+  }
+
+  // The receiver frame slot.
+  Operand Receiver() { return ParameterAt(-1); }
+
+  // Push a try-catch or try-finally handler on top of the virtual frame.
+  void PushTryHandler(HandlerType type);
+
+  // Call stub given the number of arguments it expects on (and
+  // removes from) the stack.
+  Result CallStub(CodeStub* stub, int arg_count) {
+    PrepareForCall(arg_count, arg_count);
+    return RawCallStub(stub);
+  }
+
+  // Call stub that takes a single argument passed in eax.  The
+  // argument is given as a result which does not have to be eax or
+  // even a register.  The argument is consumed by the call.
+  Result CallStub(CodeStub* stub, Result* arg);
+
+  // Call stub that takes a pair of arguments passed in edx (arg0, rdx) and
+  // eax (arg1, rax).  The arguments are given as results which do not have
+  // to be in the proper registers or even in registers.  The
+  // arguments are consumed by the call.
+  Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+
+  // Call runtime given the number of arguments expected on (and
+  // removed from) the stack.
+  Result CallRuntime(Runtime::Function* f, int arg_count);
+  Result CallRuntime(Runtime::FunctionId id, int arg_count);
+
+  // Invoke builtin given the number of arguments it expects on (and
+  // removes from) the stack.
+  Result InvokeBuiltin(Builtins::JavaScript id,
+                       InvokeFlag flag,
+                       int arg_count);
+
+  // Call load IC.  Name and receiver are found on top of the frame.
+  // Receiver is not dropped.
+  Result CallLoadIC(RelocInfo::Mode mode);
+
+  // Call keyed load IC.  Key and receiver are found on top of the
+  // frame.  They are not dropped.
+  Result CallKeyedLoadIC(RelocInfo::Mode mode);
+
+  // Call store IC.  Name, value, and receiver are found on top of the
+  // frame.  Receiver is not dropped.
+  Result CallStoreIC();
+
+  // Call keyed store IC.  Value, key, and receiver are found on top
+  // of the frame.  Key and receiver are not dropped.
+  Result CallKeyedStoreIC();
+
+  // Call call IC.  Arguments, reciever, and function name are found
+  // on top of the frame.  Function name slot is not dropped.  The
+  // argument count does not include the receiver.
+  Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+
+  // Allocate and call JS function as constructor.  Arguments,
+  // receiver (global object), and function are found on top of the
+  // frame.  Function is not dropped.  The argument count does not
+  // include the receiver.
+  Result CallConstructor(int arg_count);
+
+  // Drop a number of elements from the top of the expression stack.  May
+  // emit code to affect the physical frame.  Does not clobber any registers
+  // excepting possibly the stack pointer.
+  void Drop(int count);
+
+  // Drop one element.
+  void Drop() { Drop(1); }
+
+  // Duplicate the top element of the frame.
+  void Dup() { PushFrameSlotAt(element_count() - 1); }
+
+  // Pop an element from the top of the expression stack.  Returns a
+  // Result, which may be a constant or a register.
+  Result Pop();
+
+  // Pop and save an element from the top of the expression stack and
+  // emit a corresponding pop instruction.
+  void EmitPop(Register reg);
+  void EmitPop(const Operand& operand);
+
+  // Push an element on top of the expression stack and emit a
+  // corresponding push instruction.
+  void EmitPush(Register reg);
+  void EmitPush(const Operand& operand);
+  void EmitPush(Heap::RootListIndex index);
+  void EmitPush(Immediate immediate);
+  // Uses kScratchRegister, emits appropriate relocation info.
+  void EmitPush(Handle<Object> value);
+
+  // Push an element on the virtual frame.
+  void Push(Register reg);
+  void Push(Handle<Object> value);
+  void Push(Smi* value) { Push(Handle<Object>(value)); }
+
+  // Pushing a result invalidates it (its contents become owned by the
+  // frame).
+  void Push(Result* result) {
+    if (result->is_register()) {
+      Push(result->reg());
+    } else {
+      ASSERT(result->is_constant());
+      Push(result->handle());
+    }
+    result->Unuse();
+  }
+
+  // Nip removes zero or more elements from immediately below the top
+  // of the frame, leaving the previous top-of-frame value on top of
+  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+  void Nip(int num_dropped);
+
+ private:
+  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+  static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
+
+  ZoneList<FrameElement> elements_;
+
+  // The index of the element that is at the processor's stack pointer
+  // (the esp register).
+  int stack_pointer_;
+
+  // The index of the register frame element using each register, or
+  // kIllegalIndex if a register is not on the frame.
+  int register_locations_[RegisterAllocator::kNumRegisters];
+
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count() { return cgen()->scope()->num_parameters(); }
+  int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+  // The index of the element that is at the processor's frame pointer
+  // (the ebp register).  The parameters, receiver, and return address
+  // are below the frame pointer.
+  int frame_pointer() { return parameter_count() + 2; }
+
+  // The index of the first parameter.  The receiver lies below the first
+  // parameter.
+  int param0_index() { return 1; }
+
+  // The index of the context slot in the frame.  It is immediately
+  // above the frame pointer.
+  int context_index() { return frame_pointer() + 1; }
+
+  // The index of the function slot in the frame.  It is above the frame
+  // pointer and the context slot.
+  int function_index() { return frame_pointer() + 2; }
+
+  // The index of the first local.  Between the frame pointer and the
+  // locals lie the context and the function.
+  int local0_index() { return frame_pointer() + 3; }
+
+  // The index of the base of the expression stack.
+  int expression_base_index() { return local0_index() + local_count(); }
+
+  // Convert a frame index into a frame pointer relative offset into the
+  // actual stack.
+  int fp_relative(int index) {
+    ASSERT(index < element_count());
+    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
+    return (frame_pointer() - index) * kPointerSize;
+  }
+
+  // Record an occurrence of a register in the virtual frame.  This has the
+  // effect of incrementing the register's external reference count and
+  // of updating the index of the register's location in the frame.
+  void Use(Register reg, int index) {
+    ASSERT(!is_used(reg));
+    set_register_location(reg, index);
+    cgen()->allocator()->Use(reg);
+  }
+
+  // Record that a register reference has been dropped from the frame.  This
+  // decrements the register's external reference count and invalidates the
+  // index of the register's location in the frame.
+  void Unuse(Register reg) {
+    ASSERT(is_used(reg));
+    set_register_location(reg, kIllegalIndex);
+    cgen()->allocator()->Unuse(reg);
+  }
+
+  // Spill the element at a particular index---write it to memory if
+  // necessary, free any associated register, and forget its value if
+  // constant.
+  void SpillElementAt(int index);
+
+  // Sync the element at a particular index.  If it is a register or
+  // constant that disagrees with the value on the stack, write it to memory.
+  // Keep the element type as register or constant, and clear the dirty bit.
+  void SyncElementAt(int index);
+
+  // Sync a single unsynced element that lies beneath or at the stack pointer.
+  void SyncElementBelowStackPointer(int index);
+
+  // Sync a single unsynced element that lies just above the stack pointer.
+  void SyncElementByPushing(int index);
+
+  // Push a copy of a frame slot (typically a local or parameter) on top of
+  // the frame.
+  void PushFrameSlotAt(int index);
+
+  // Push a the value of a frame slot (typically a local or parameter) on
+  // top of the frame and invalidate the slot.
+  void TakeFrameSlotAt(int index);
+
+  // Store the value on top of the frame to a frame slot (typically a local
+  // or parameter).
+  void StoreToFrameSlotAt(int index);
+
+  // Spill all elements in registers. Spill the top spilled_args elements
+  // on the frame.  Sync all other frame elements.
+  // Then drop dropped_args elements from the virtual frame, to match
+  // the effect of an upcoming call that will drop them from the stack.
+  void PrepareForCall(int spilled_args, int dropped_args);
+
+  // Move frame elements currently in registers or constants, that
+  // should be in memory in the expected frame, to memory.
+  void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+  // Make the register-to-register moves necessary to
+  // merge this frame with the expected frame.
+  // Register to memory moves must already have been made,
+  // and memory to register moves must follow this call.
+  // This is because some new memory-to-register moves are
+  // created in order to break cycles of register moves.
+  // Used in the implementation of MergeTo().
+  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+  // Make the memory-to-register and constant-to-register moves
+  // needed to make this frame equal the expected frame.
+  // Called after all register-to-memory and register-to-register
+  // moves have been made.  After this function returns, the frames
+  // should be equal.
+  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+  // Invalidates a frame slot (puts an invalid frame element in it).
+  // Copies on the frame are correctly handled, and if this slot was
+  // the backing store of copies, the index of the new backing store
+  // is returned.  Otherwise, returns kIllegalIndex.
+  // Register counts are correctly updated.
+  int InvalidateFrameSlotAt(int index);
+
+  // Call a code stub that has already been prepared for calling (via
+  // PrepareForCall).
+  Result RawCallStub(CodeStub* stub);
+
+  // Calls a code object which has already been prepared for calling
+  // (via PrepareForCall).
+  Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+  bool Equals(VirtualFrame* other);
+
+  // Classes that need raw access to the elements_ array.
+  friend class DeferredCode;
+  friend class JumpTarget;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_VIRTUAL_FRAME_X64_H_
diff --git a/src/zone-inl.h b/src/zone-inl.h
new file mode 100644
index 0000000..121ba19
--- /dev/null
+++ b/src/zone-inl.h
@@ -0,0 +1,297 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ZONE_INL_H_
+#define V8_ZONE_INL_H_
+
+#include "zone.h"
+#include "v8-counters.h"
+
+namespace v8 {
+namespace internal {
+
+
+inline void* Zone::New(int size) {
+  ASSERT(AssertNoZoneAllocation::allow_allocation());
+  ASSERT(ZoneScope::nesting() > 0);
+  // Round up the requested size to fit the alignment.
+  size = RoundUp(size, kAlignment);
+
+  // Check if the requested size is available without expanding.
+  Address result = position_;
+  if ((position_ += size) > limit_) result = NewExpand(size);
+
+  // Check that the result has the proper alignment and return it.
+  ASSERT(IsAddressAligned(result, kAlignment, 0));
+  return reinterpret_cast<void*>(result);
+}
+
+
+template <typename T>
+T* Zone::NewArray(int length) {
+  return static_cast<T*>(Zone::New(length * sizeof(T)));
+}
+
+
+bool Zone::excess_allocation() {
+  return segment_bytes_allocated_ > zone_excess_limit_;
+}
+
+
+void Zone::adjust_segment_bytes_allocated(int delta) {
+  segment_bytes_allocated_ += delta;
+  Counters::zone_segment_bytes.Set(segment_bytes_allocated_);
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::Insert(const Key& key, Locator* locator) {
+  if (is_empty()) {
+    // If the tree is empty, insert the new node.
+    root_ = new Node(key, C::kNoValue);
+  } else {
+    // Splay on the key to move the last node on the search path
+    // for the key to the root of the tree.
+    Splay(key);
+    // Ignore repeated insertions with the same key.
+    int cmp = C::Compare(key, root_->key_);
+    if (cmp == 0) {
+      locator->bind(root_);
+      return false;
+    }
+    // Insert the new node.
+    Node* node = new Node(key, C::kNoValue);
+    if (cmp > 0) {
+      node->left_ = root_;
+      node->right_ = root_->right_;
+      root_->right_ = NULL;
+    } else {
+      node->right_ = root_;
+      node->left_ = root_->left_;
+      root_->left_ = NULL;
+    }
+    root_ = node;
+  }
+  locator->bind(root_);
+  return true;
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::Find(const Key& key, Locator* locator) {
+  if (is_empty())
+    return false;
+  Splay(key);
+  if (C::Compare(key, root_->key_) == 0) {
+    locator->bind(root_);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::FindGreatestLessThan(const Key& key,
+                                            Locator* locator) {
+  if (is_empty())
+    return false;
+  // Splay on the key to move the node with the given key or the last
+  // node on the search path to the top of the tree.
+  Splay(key);
+  // Now the result is either the root node or the greatest node in
+  // the left subtree.
+  int cmp = C::Compare(root_->key_, key);
+  if (cmp <= 0) {
+    locator->bind(root_);
+    return true;
+  } else {
+    Node* temp = root_;
+    root_ = root_->left_;
+    bool result = FindGreatest(locator);
+    root_ = temp;
+    return result;
+  }
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::FindLeastGreaterThan(const Key& key,
+                                            Locator* locator) {
+  if (is_empty())
+    return false;
+  // Splay on the key to move the node with the given key or the last
+  // node on the search path to the top of the tree.
+  Splay(key);
+  // Now the result is either the root node or the least node in
+  // the right subtree.
+  int cmp = C::Compare(root_->key_, key);
+  if (cmp >= 0) {
+    locator->bind(root_);
+    return true;
+  } else {
+    Node* temp = root_;
+    root_ = root_->right_;
+    bool result = FindLeast(locator);
+    root_ = temp;
+    return result;
+  }
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::FindGreatest(Locator* locator) {
+  if (is_empty())
+    return false;
+  Node* current = root_;
+  while (current->right_ != NULL)
+    current = current->right_;
+  locator->bind(current);
+  return true;
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::FindLeast(Locator* locator) {
+  if (is_empty())
+    return false;
+  Node* current = root_;
+  while (current->left_ != NULL)
+    current = current->left_;
+  locator->bind(current);
+  return true;
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::Remove(const Key& key) {
+  // Bail if the tree is empty
+  if (is_empty())
+    return false;
+  // Splay on the key to move the node with the given key to the top.
+  Splay(key);
+  // Bail if the key is not in the tree
+  if (C::Compare(key, root_->key_) != 0)
+    return false;
+  if (root_->left_ == NULL) {
+    // No left child, so the new tree is just the right child.
+    root_ = root_->right_;
+  } else {
+    // Left child exists.
+    Node* right = root_->right_;
+    // Make the original left child the new root.
+    root_ = root_->left_;
+    // Splay to make sure that the new root has an empty right child.
+    Splay(key);
+    // Insert the original right child as the right child of the new
+    // root.
+    root_->right_ = right;
+  }
+  return true;
+}
+
+
+template <typename C>
+void ZoneSplayTree<C>::Splay(const Key& key) {
+  if (is_empty())
+    return;
+  Node dummy_node(C::kNoKey, C::kNoValue);
+  // Create a dummy node.  The use of the dummy node is a bit
+  // counter-intuitive: The right child of the dummy node will hold
+  // the L tree of the algorithm.  The left child of the dummy node
+  // will hold the R tree of the algorithm.  Using a dummy node, left
+  // and right will always be nodes and we avoid special cases.
+  Node* dummy = &dummy_node;
+  Node* left = dummy;
+  Node* right = dummy;
+  Node* current = root_;
+  while (true) {
+    int cmp = C::Compare(key, current->key_);
+    if (cmp < 0) {
+      if (current->left_ == NULL)
+        break;
+      if (C::Compare(key, current->left_->key_) < 0) {
+        // Rotate right.
+        Node* temp = current->left_;
+        current->left_ = temp->right_;
+        temp->right_ = current;
+        current = temp;
+        if (current->left_ == NULL)
+          break;
+      }
+      // Link right.
+      right->left_ = current;
+      right = current;
+      current = current->left_;
+    } else if (cmp > 0) {
+      if (current->right_ == NULL)
+        break;
+      if (C::Compare(key, current->right_->key_) > 0) {
+        // Rotate left.
+        Node* temp = current->right_;
+        current->right_ = temp->left_;
+        temp->left_ = current;
+        current = temp;
+        if (current->right_ == NULL)
+          break;
+      }
+      // Link left.
+      left->right_ = current;
+      left = current;
+      current = current->right_;
+    } else {
+      break;
+    }
+  }
+  // Assemble.
+  left->right_ = current->left_;
+  right->left_ = current->right_;
+  current->left_ = dummy->right_;
+  current->right_ = dummy->left_;
+  root_ = current;
+}
+
+
+template <typename Config> template <class Callback>
+void ZoneSplayTree<Config>::ForEach(Callback* callback) {
+  // Pre-allocate some space for tiny trees.
+  ZoneList<Node*> nodes_to_visit(10);
+  nodes_to_visit.Add(root_);
+  int pos = 0;
+  while (pos < nodes_to_visit.length()) {
+    Node* node = nodes_to_visit[pos++];
+    if (node == NULL) continue;
+    callback->Call(node->key(), node->value());
+    nodes_to_visit.Add(node->left());
+    nodes_to_visit.Add(node->right());
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_ZONE_INL_H_
diff --git a/src/zone.cc b/src/zone.cc
new file mode 100644
index 0000000..33fe557
--- /dev/null
+++ b/src/zone.cc
@@ -0,0 +1,193 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "zone-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+Address Zone::position_ = 0;
+Address Zone::limit_ = 0;
+int Zone::zone_excess_limit_ = 256 * MB;
+int Zone::segment_bytes_allocated_ = 0;
+
+bool AssertNoZoneAllocation::allow_allocation_ = true;
+
+int ZoneScope::nesting_ = 0;
+
+// Segments represent chunks of memory: They have starting address
+// (encoded in the this pointer) and a size in bytes. Segments are
+// chained together forming a LIFO structure with the newest segment
+// available as Segment::head(). Segments are allocated using malloc()
+// and de-allocated using free().
+
+class Segment {
+ public:
+  Segment* next() const { return next_; }
+  void clear_next() { next_ = NULL; }
+
+  int size() const { return size_; }
+  int capacity() const { return size_ - sizeof(Segment); }
+
+  Address start() const { return address(sizeof(Segment)); }
+  Address end() const { return address(size_); }
+
+  static Segment* head() { return head_; }
+  static void set_head(Segment* head) { head_ = head; }
+
+  // Creates a new segment, sets it size, and pushes it to the front
+  // of the segment chain. Returns the new segment.
+  static Segment* New(int size) {
+    Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
+    Zone::adjust_segment_bytes_allocated(size);
+    if (result != NULL) {
+      result->next_ = head_;
+      result->size_ = size;
+      head_ = result;
+    }
+    return result;
+  }
+
+  // Deletes the given segment. Does not touch the segment chain.
+  static void Delete(Segment* segment, int size) {
+    Zone::adjust_segment_bytes_allocated(-size);
+    Malloced::Delete(segment);
+  }
+
+  static int bytes_allocated() { return bytes_allocated_; }
+
+ private:
+  // Computes the address of the nth byte in this segment.
+  Address address(int n) const {
+    return Address(this) + n;
+  }
+
+  static Segment* head_;
+  static int bytes_allocated_;
+  Segment* next_;
+  int size_;
+};
+
+
+Segment* Segment::head_ = NULL;
+int Segment::bytes_allocated_ = 0;
+
+
+void Zone::DeleteAll() {
+#ifdef DEBUG
+  // Constant byte value used for zapping dead memory in debug mode.
+  static const unsigned char kZapDeadByte = 0xcd;
+#endif
+
+  // Find a segment with a suitable size to keep around.
+  Segment* keep = Segment::head();
+  while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
+    keep = keep->next();
+  }
+
+  // Traverse the chained list of segments, zapping (in debug mode)
+  // and freeing every segment except the one we wish to keep.
+  Segment* current = Segment::head();
+  while (current != NULL) {
+    Segment* next = current->next();
+    if (current == keep) {
+      // Unlink the segment we wish to keep from the list.
+      current->clear_next();
+    } else {
+      int size = current->size();
+#ifdef DEBUG
+      // Zap the entire current segment (including the header).
+      memset(current, kZapDeadByte, size);
+#endif
+      Segment::Delete(current, size);
+    }
+    current = next;
+  }
+
+  // If we have found a segment we want to keep, we must recompute the
+  // variables 'position' and 'limit' to prepare for future allocate
+  // attempts. Otherwise, we must clear the position and limit to
+  // force a new segment to be allocated on demand.
+  if (keep != NULL) {
+    Address start = keep->start();
+    position_ = RoundUp(start, kAlignment);
+    limit_ = keep->end();
+#ifdef DEBUG
+    // Zap the contents of the kept segment (but not the header).
+    memset(start, kZapDeadByte, keep->capacity());
+#endif
+  } else {
+    position_ = limit_ = 0;
+  }
+
+  // Update the head segment to be the kept segment (if any).
+  Segment::set_head(keep);
+}
+
+
+Address Zone::NewExpand(int size) {
+  // Make sure the requested size is already properly aligned and that
+  // there isn't enough room in the Zone to satisfy the request.
+  ASSERT(size == RoundDown(size, kAlignment));
+  ASSERT(position_ + size > limit_);
+
+  // Compute the new segment size. We use a 'high water mark'
+  // strategy, where we increase the segment size every time we expand
+  // except that we employ a maximum segment size when we delete. This
+  // is to avoid excessive malloc() and free() overhead.
+  Segment* head = Segment::head();
+  int old_size = (head == NULL) ? 0 : head->size();
+  static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
+  int new_size = kSegmentOverhead + size + (old_size << 1);
+  if (new_size < kMinimumSegmentSize) {
+    new_size = kMinimumSegmentSize;
+  } else if (new_size > kMaximumSegmentSize) {
+    // Limit the size of new segments to avoid growing the segment size
+    // exponentially, thus putting pressure on contiguous virtual address space.
+    // All the while making sure to allocate a segment large enough to hold the
+    // requested size.
+    new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
+  }
+  Segment* segment = Segment::New(new_size);
+  if (segment == NULL) {
+    V8::FatalProcessOutOfMemory("Zone");
+    return NULL;
+  }
+
+  // Recompute 'top' and 'limit' based on the new segment.
+  Address result = RoundUp(segment->start(), kAlignment);
+  position_ = result + size;
+  limit_ = segment->end();
+  ASSERT(position_ <= limit_);
+  return result;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/zone.h b/src/zone.h
new file mode 100644
index 0000000..4e4f1d7
--- /dev/null
+++ b/src/zone.h
@@ -0,0 +1,305 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ZONE_H_
+#define V8_ZONE_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Zone scopes are in one of two modes.  Either they delete the zone
+// on exit or they do not.
+enum ZoneScopeMode {
+  DELETE_ON_EXIT,
+  DONT_DELETE_ON_EXIT
+};
+
+
+// The Zone supports very fast allocation of small chunks of
+// memory. The chunks cannot be deallocated individually, but instead
+// the Zone supports deallocating all chunks in one fast
+// operation. The Zone is used to hold temporary data structures like
+// the abstract syntax tree, which is deallocated after compilation.
+
+// Note: There is no need to initialize the Zone; the first time an
+// allocation is attempted, a segment of memory will be requested
+// through a call to malloc().
+
+// Note: The implementation is inherently not thread safe. Do not use
+// from multi-threaded code.
+
+class Zone {
+ public:
+  // Allocate 'size' bytes of memory in the Zone; expands the Zone by
+  // allocating new segments of memory on demand using malloc().
+  static inline void* New(int size);
+
+  template <typename T>
+  static inline T* NewArray(int length);
+
+  // Delete all objects and free all memory allocated in the Zone.
+  static void DeleteAll();
+
+  // Returns true if more memory has been allocated in zones than
+  // the limit allows.
+  static inline bool excess_allocation();
+
+  static inline void adjust_segment_bytes_allocated(int delta);
+
+ private:
+
+  // All pointers returned from New() have this alignment.
+  static const int kAlignment = kPointerSize;
+
+  // Never allocate segments smaller than this size in bytes.
+  static const int kMinimumSegmentSize = 8 * KB;
+
+  // Never allocate segments larger than this size in bytes.
+  static const int kMaximumSegmentSize = 1 * MB;
+
+  // Never keep segments larger than this size in bytes around.
+  static const int kMaximumKeptSegmentSize = 64 * KB;
+
+  // Report zone excess when allocation exceeds this limit.
+  static int zone_excess_limit_;
+
+  // The number of bytes allocated in segments.  Note that this number
+  // includes memory allocated from the OS but not yet allocated from
+  // the zone.
+  static int segment_bytes_allocated_;
+
+  // The Zone is intentionally a singleton; you should not try to
+  // allocate instances of the class.
+  Zone() { UNREACHABLE(); }
+
+
+  // Expand the Zone to hold at least 'size' more bytes and allocate
+  // the bytes. Returns the address of the newly allocated chunk of
+  // memory in the Zone. Should only be called if there isn't enough
+  // room in the Zone already.
+  static Address NewExpand(int size);
+
+
+  // The free region in the current (front) segment is represented as
+  // the half-open interval [position, limit). The 'position' variable
+  // is guaranteed to be aligned as dictated by kAlignment.
+  static Address position_;
+  static Address limit_;
+};
+
+
+// ZoneObject is an abstraction that helps define classes of objects
+// allocated in the Zone. Use it as a base class; see ast.h.
+class ZoneObject {
+ public:
+  // Allocate a new ZoneObject of 'size' bytes in the Zone.
+  void* operator new(size_t size) { return Zone::New(size); }
+
+  // Ideally, the delete operator should be private instead of
+  // public, but unfortunately the compiler sometimes synthesizes
+  // (unused) destructors for classes derived from ZoneObject, which
+  // require the operator to be visible. MSVC requires the delete
+  // operator to be public.
+
+  // ZoneObjects should never be deleted individually; use
+  // Zone::DeleteAll() to delete all zone objects in one go.
+  void operator delete(void*, size_t) { UNREACHABLE(); }
+};
+
+
+class AssertNoZoneAllocation {
+ public:
+  AssertNoZoneAllocation() : prev_(allow_allocation_) {
+    allow_allocation_ = false;
+  }
+  ~AssertNoZoneAllocation() { allow_allocation_ = prev_; }
+  static bool allow_allocation() { return allow_allocation_; }
+ private:
+  bool prev_;
+  static bool allow_allocation_;
+};
+
+
+// The ZoneListAllocationPolicy is used to specialize the GenericList
+// implementation to allocate ZoneLists and their elements in the
+// Zone.
+class ZoneListAllocationPolicy {
+ public:
+  // Allocate 'size' bytes of memory in the zone.
+  static void* New(int size) {  return Zone::New(size); }
+
+  // De-allocation attempts are silently ignored.
+  static void Delete(void* p) { }
+};
+
+
+// ZoneLists are growable lists with constant-time access to the
+// elements. The list itself and all its elements are allocated in the
+// Zone. ZoneLists cannot be deleted individually; you can delete all
+// objects in the Zone by calling Zone::DeleteAll().
+template<typename T>
+class ZoneList: public List<T, ZoneListAllocationPolicy> {
+ public:
+  // Construct a new ZoneList with the given capacity; the length is
+  // always zero. The capacity must be non-negative.
+  explicit ZoneList(int capacity)
+      : List<T, ZoneListAllocationPolicy>(capacity) { }
+};
+
+
+// ZoneScopes keep track of the current parsing and compilation
+// nesting and cleans up generated ASTs in the Zone when exiting the
+// outer-most scope.
+class ZoneScope BASE_EMBEDDED {
+ public:
+  explicit ZoneScope(ZoneScopeMode mode) : mode_(mode) {
+    nesting_++;
+  }
+
+  virtual ~ZoneScope() {
+    if (ShouldDeleteOnExit()) Zone::DeleteAll();
+    --nesting_;
+  }
+
+  bool ShouldDeleteOnExit() {
+    return nesting_ == 1 && mode_ == DELETE_ON_EXIT;
+  }
+
+  // For ZoneScopes that do not delete on exit by default, call this
+  // method to request deletion on exit.
+  void DeleteOnExit() {
+    mode_ = DELETE_ON_EXIT;
+  }
+
+  static int nesting() { return nesting_; }
+
+ private:
+  ZoneScopeMode mode_;
+  static int nesting_;
+};
+
+
+// A zone splay tree.  The config type parameter encapsulates the
+// different configurations of a concrete splay tree:
+//
+//   typedef Key: the key type
+//   typedef Value: the value type
+//   static const kNoKey: the dummy key used when no key is set
+//   static const kNoValue: the dummy value used to initialize nodes
+//   int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
+//
+template <typename Config>
+class ZoneSplayTree : public ZoneObject {
+ public:
+  typedef typename Config::Key Key;
+  typedef typename Config::Value Value;
+
+  class Locator;
+
+  ZoneSplayTree() : root_(NULL) { }
+
+  // Inserts the given key in this tree with the given value.  Returns
+  // true if a node was inserted, otherwise false.  If found the locator
+  // is enabled and provides access to the mapping for the key.
+  bool Insert(const Key& key, Locator* locator);
+
+  // Looks up the key in this tree and returns true if it was found,
+  // otherwise false.  If the node is found the locator is enabled and
+  // provides access to the mapping for the key.
+  bool Find(const Key& key, Locator* locator);
+
+  // Finds the mapping with the greatest key less than or equal to the
+  // given key.
+  bool FindGreatestLessThan(const Key& key, Locator* locator);
+
+  // Find the mapping with the greatest key in this tree.
+  bool FindGreatest(Locator* locator);
+
+  // Finds the mapping with the least key greater than or equal to the
+  // given key.
+  bool FindLeastGreaterThan(const Key& key, Locator* locator);
+
+  // Find the mapping with the least key in this tree.
+  bool FindLeast(Locator* locator);
+
+  // Remove the node with the given key from the tree.
+  bool Remove(const Key& key);
+
+  bool is_empty() { return root_ == NULL; }
+
+  // Perform the splay operation for the given key. Moves the node with
+  // the given key to the top of the tree.  If no node has the given
+  // key, the last node on the search path is moved to the top of the
+  // tree.
+  void Splay(const Key& key);
+
+  class Node : public ZoneObject {
+   public:
+    Node(const Key& key, const Value& value)
+        : key_(key),
+          value_(value),
+          left_(NULL),
+          right_(NULL) { }
+    Key key() { return key_; }
+    Value value() { return value_; }
+    Node* left() { return left_; }
+    Node* right() { return right_; }
+   private:
+    friend class ZoneSplayTree;
+    friend class Locator;
+    Key key_;
+    Value value_;
+    Node* left_;
+    Node* right_;
+  };
+
+  // A locator provides access to a node in the tree without actually
+  // exposing the node.
+  class Locator {
+   public:
+    explicit Locator(Node* node) : node_(node) { }
+    Locator() : node_(NULL) { }
+    const Key& key() { return node_->key_; }
+    Value& value() { return node_->value_; }
+    void set_value(const Value& value) { node_->value_ = value; }
+    inline void bind(Node* node) { node_ = node; }
+   private:
+    Node* node_;
+  };
+
+  template <class Callback>
+  void ForEach(Callback* callback);
+
+ private:
+  Node* root_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_ZONE_H_